mm: increase usage of folio_next_index() helper
authorSidhartha Kumar <sidhartha.kumar@oracle.com>
Tue, 27 Jun 2023 17:43:49 +0000 (10:43 -0700)
committerAndrew Morton <akpm@linux-foundation.org>
Fri, 18 Aug 2023 17:12:00 +0000 (10:12 -0700)
Simplify code pattern of 'folio->index + folio_nr_pages(folio)' by using
the existing helper folio_next_index().

Link: https://lkml.kernel.org/r/20230627174349.491803-1-sidhartha.kumar@oracle.com
Signed-off-by: Sidhartha Kumar <sidhartha.kumar@oracle.com>
Suggested-by: Christoph Hellwig <hch@infradead.org>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Cc: Andreas Dilger <adilger.kernel@dilger.ca>
Cc: Christoph Hellwig <hch@infradead.org>
Cc: Hugh Dickins <hughd@google.com>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Theodore Ts'o <tytso@mit.edu>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
fs/ext4/inode.c
mm/filemap.c
mm/memory.c
mm/shmem.c
mm/truncate.c

index 43775a6ca5054a485c33932a801928b55240a3c0..3d253e250871c0e10bd396ebe27c0ec8c97bba5e 100644 (file)
@@ -1569,7 +1569,7 @@ static void mpage_release_unused_pages(struct mpage_da_data *mpd,
 
                        if (folio->index < mpd->first_page)
                                continue;
-                       if (folio->index + folio_nr_pages(folio) - 1 > end)
+                       if (folio_next_index(folio) - 1 > end)
                                continue;
                        BUG_ON(!folio_test_locked(folio));
                        BUG_ON(folio_test_writeback(folio));
@@ -2455,7 +2455,7 @@ static int mpage_prepare_extent_to_map(struct mpage_da_data *mpd)
 
                        if (mpd->map.m_len == 0)
                                mpd->first_page = folio->index;
-                       mpd->next_page = folio->index + folio_nr_pages(folio);
+                       mpd->next_page = folio_next_index(folio);
                        /*
                         * Writeout when we cannot modify metadata is simple.
                         * Just submit the page. For data=journal mode we
index 9e44a49bbd74d7ba2cd55849ff0074e4c5318c60..c5e2c70ea046878d5aa95ed13c9fa6d8f932c191 100644 (file)
@@ -2075,7 +2075,7 @@ unsigned find_lock_entries(struct address_space *mapping, pgoff_t *start,
                if (!xa_is_value(folio)) {
                        if (folio->index < *start)
                                goto put;
-                       if (folio->index + folio_nr_pages(folio) - 1 > end)
+                       if (folio_next_index(folio) - 1 > end)
                                goto put;
                        if (!folio_trylock(folio))
                                goto put;
@@ -2174,7 +2174,7 @@ bool folio_more_pages(struct folio *folio, pgoff_t index, pgoff_t max)
                return false;
        if (index >= max)
                return false;
-       return index < folio->index + folio_nr_pages(folio) - 1;
+       return index < folio_next_index(folio) - 1;
 }
 
 /**
@@ -2242,7 +2242,7 @@ update_start:
                if (folio_test_hugetlb(folio))
                        *start = folio->index + 1;
                else
-                       *start = folio->index + folio_nr_pages(folio);
+                       *start = folio_next_index(folio);
        }
 out:
        rcu_read_unlock();
@@ -2359,7 +2359,7 @@ static void filemap_get_read_batch(struct address_space *mapping,
                        break;
                if (folio_test_readahead(folio))
                        break;
-               xas_advance(&xas, folio->index + folio_nr_pages(folio) - 1);
+               xas_advance(&xas, folio_next_index(folio) - 1);
                continue;
 put_folio:
                folio_put(folio);
index 603b2f419948319fec35ea818adf516aca7ae4aa..33f0f28c7ebc635dd05d5af7d7d777f686d62b40 100644 (file)
@@ -3495,7 +3495,7 @@ void unmap_mapping_folio(struct folio *folio)
        VM_BUG_ON(!folio_test_locked(folio));
 
        first_index = folio->index;
-       last_index = folio->index + folio_nr_pages(folio) - 1;
+       last_index = folio_next_index(folio) - 1;
 
        details.even_cows = false;
        details.single_folio = folio;
index f5af4b943e4286e3b414d40c0ebb09d3a6ec0d41..8dfd72bdc86ab88b20670e2e145456145fe42239 100644 (file)
@@ -970,7 +970,7 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
                same_folio = lend < folio_pos(folio) + folio_size(folio);
                folio_mark_dirty(folio);
                if (!truncate_inode_partial_folio(folio, lstart, lend)) {
-                       start = folio->index + folio_nr_pages(folio);
+                       start = folio_next_index(folio);
                        if (same_folio)
                                end = folio->index;
                }
index 95d1291d269b5737ad4275d8a934663eb7344d12..2f28cc0e12ef1d04f4a334e257ffc3941a0da2ce 100644 (file)
@@ -378,7 +378,7 @@ void truncate_inode_pages_range(struct address_space *mapping,
        if (!IS_ERR(folio)) {
                same_folio = lend < folio_pos(folio) + folio_size(folio);
                if (!truncate_inode_partial_folio(folio, lstart, lend)) {
-                       start = folio->index + folio_nr_pages(folio);
+                       start = folio_next_index(folio);
                        if (same_folio)
                                end = folio->index;
                }