unsigned int bytes_written;
        struct cifs_sb_info *cifs_sb;
        int done = 0;
-       pgoff_t end = -1;
+       pgoff_t end;
        pgoff_t index;
-       int is_range = 0;
+       int range_whole = 0;
        struct kvec iov[32];
        int len;
        int n_iov = 0;
        xid = GetXid();
 
        pagevec_init(&pvec, 0);
-       if (wbc->sync_mode == WB_SYNC_NONE)
+       if (wbc->range_cyclic) {
                index = mapping->writeback_index; /* Start from prev offset */
-       else {
-               index = 0;
-               scanned = 1;
-       }
-       if (wbc->start || wbc->end) {
-               index = wbc->start >> PAGE_CACHE_SHIFT;
-               end = wbc->end >> PAGE_CACHE_SHIFT;
-               is_range = 1;
+               end = -1;
+       } else {
+               index = wbc->range_start >> PAGE_CACHE_SHIFT;
+               end = wbc->range_end >> PAGE_CACHE_SHIFT;
+               if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
+                       range_whole = 1;
                scanned = 1;
        }
 retry:
                                break;
                        }
 
-                       if (unlikely(is_range) && (page->index > end)) {
+                       if (!wbc->range_cyclic && page->index > end) {
                                done = 1;
                                unlock_page(page);
                                break;
                index = 0;
                goto retry;
        }
-       if (!is_range)
+       if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
                mapping->writeback_index = index;
 
        FreeXid(xid);
 
 {
        struct writeback_control wbc = {
                .sync_mode      = wait ? WB_SYNC_ALL : WB_SYNC_HOLD,
+               .range_start    = 0,
+               .range_end      = LLONG_MAX,
        };
        unsigned long nr_dirty = read_page_state(nr_dirty);
        unsigned long nr_unstable = read_page_state(nr_unstable);
        struct writeback_control wbc = {
                .nr_to_write = LONG_MAX,
                .sync_mode = WB_SYNC_ALL,
+               .range_start = 0,
+               .range_end = LLONG_MAX,
        };
 
        if (!mapping_cap_writeback_dirty(inode->i_mapping))
 
        struct pagevec pvec;
        int nr_pages;
        pgoff_t index;
-       pgoff_t end = -1;               /* Inclusive */
+       pgoff_t end;            /* Inclusive */
        int scanned = 0;
-       int is_range = 0;
+       int range_whole = 0;
 
        if (wbc->nonblocking && bdi_write_congested(bdi)) {
                wbc->encountered_congestion = 1;
                writepage = mapping->a_ops->writepage;
 
        pagevec_init(&pvec, 0);
-       if (wbc->sync_mode == WB_SYNC_NONE) {
+       if (wbc->range_cyclic) {
                index = mapping->writeback_index; /* Start from prev offset */
+               end = -1;
        } else {
-               index = 0;                        /* whole-file sweep */
-               scanned = 1;
-       }
-       if (wbc->start || wbc->end) {
-               index = wbc->start >> PAGE_CACHE_SHIFT;
-               end = wbc->end >> PAGE_CACHE_SHIFT;
-               is_range = 1;
+               index = wbc->range_start >> PAGE_CACHE_SHIFT;
+               end = wbc->range_end >> PAGE_CACHE_SHIFT;
+               if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
+                       range_whole = 1;
                scanned = 1;
        }
 retry:
                                continue;
                        }
 
-                       if (unlikely(is_range) && page->index > end) {
+                       if (!wbc->range_cyclic && page->index > end) {
                                done = 1;
                                unlock_page(page);
                                continue;
                index = 0;
                goto retry;
        }
-       if (!is_range)
+       if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
                mapping->writeback_index = index;
        if (bio)
                mpage_bio_submit(WRITE, bio);
 
        }
 
        if (nbytes == 0)
-               endbyte = -1;
+               endbyte = LLONG_MAX;
        else
                endbyte--;              /* inclusive */
 
 
 #define LONG_MAX       ((long)(~0UL>>1))
 #define LONG_MIN       (-LONG_MAX - 1)
 #define ULONG_MAX      (~0UL)
+#define LLONG_MAX      ((long long)(~0ULL>>1))
+#define LLONG_MIN      (-LLONG_MAX - 1)
+#define ULLONG_MAX     (~0ULL)
 
 #define STACK_MAGIC    0xdeadbeef
 
 
         * a hint that the filesystem need only write out the pages inside that
         * byterange.  The byte at `end' is included in the writeout request.
         */
-       loff_t start;
-       loff_t end;
+       loff_t range_start;
+       loff_t range_end;
 
        unsigned nonblocking:1;         /* Don't get stuck on request queues */
        unsigned encountered_congestion:1; /* An output: a queue is full */
        unsigned for_kupdate:1;         /* A kupdate writeback */
        unsigned for_reclaim:1;         /* Invoked from the page allocator */
        unsigned for_writepages:1;      /* This is a writepages() call */
+       unsigned range_cyclic:1;        /* range_start is cyclic */
 };
 
 /*
 
        struct writeback_control wbc = {
                .sync_mode = sync_mode,
                .nr_to_write = mapping->nrpages * 2,
-               .start = start,
-               .end = end,
+               .range_start = start,
+               .range_end = end,
        };
 
        if (!mapping_cap_writeback_dirty(mapping))
 static inline int __filemap_fdatawrite(struct address_space *mapping,
        int sync_mode)
 {
-       return __filemap_fdatawrite_range(mapping, 0, 0, sync_mode);
+       return __filemap_fdatawrite_range(mapping, 0, LLONG_MAX, sync_mode);
 }
 
 int filemap_fdatawrite(struct address_space *mapping)
 
                        .sync_mode      = WB_SYNC_NONE,
                        .older_than_this = NULL,
                        .nr_to_write    = write_chunk,
+                       .range_cyclic   = 1,
                };
 
                get_dirty_limits(&wbs, &background_thresh,
                .older_than_this = NULL,
                .nr_to_write    = 0,
                .nonblocking    = 1,
+               .range_cyclic   = 1,
        };
 
        for ( ; ; ) {
                .nr_to_write    = 0,
                .nonblocking    = 1,
                .for_kupdate    = 1,
+               .range_cyclic   = 1,
        };
 
        sync_supers();
 
                struct writeback_control wbc = {
                        .sync_mode = WB_SYNC_NONE,
                        .nr_to_write = SWAP_CLUSTER_MAX,
+                       .range_start = 0,
+                       .range_end = LLONG_MAX,
                        .nonblocking = 1,
                        .for_reclaim = 1,
                };