fs: Introduce aops->read_folio
authorMatthew Wilcox (Oracle) <willy@infradead.org>
Fri, 29 Apr 2022 12:43:23 +0000 (08:43 -0400)
committerMatthew Wilcox (Oracle) <willy@infradead.org>
Mon, 9 May 2022 20:21:40 +0000 (16:21 -0400)
Change all the callers of ->readpage to call ->read_folio in preference,
if it exists.  This is a transitional duplication, and will be removed
by the end of the series.

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
fs/btrfs/file.c
fs/buffer.c
fs/ceph/addr.c
include/linux/fs.h
kernel/events/uprobes.c
mm/filemap.c
mm/readahead.c
mm/swapfile.c

index 380054c94e4b6a1cb2502f06998acfc015806db3..59510d7b1c65e504b44c996b8a0dee367afa712b 100644 (file)
@@ -2401,7 +2401,7 @@ static int btrfs_file_mmap(struct file    *filp, struct vm_area_struct *vma)
 {
        struct address_space *mapping = filp->f_mapping;
 
-       if (!mapping->a_ops->readpage)
+       if (!mapping->a_ops->readpage && !mapping->a_ops->read_folio)
                return -ENOEXEC;
 
        file_accessed(filp);
index 9737e0dbe3ec62d5c2ca683cb677d9749094db08..225d03cd622d26ab4f73836cc3fc1883d479f087 100644 (file)
@@ -2824,7 +2824,10 @@ int nobh_truncate_page(struct address_space *mapping,
 
        /* Ok, it's mapped. Make sure it's up-to-date */
        if (!folio_test_uptodate(folio)) {
-               err = mapping->a_ops->readpage(NULL, &folio->page);
+               if (mapping->a_ops->read_folio)
+                       err = mapping->a_ops->read_folio(NULL, folio);
+               else
+                       err = mapping->a_ops->readpage(NULL, &folio->page);
                if (err) {
                        folio_put(folio);
                        goto out;
index e65541a51b688f7133efbe85f75cec8a44eb19de..42bba2b5d98bc42e4994e529a7d30760f31d0f2e 100644 (file)
@@ -1772,7 +1772,7 @@ int ceph_mmap(struct file *file, struct vm_area_struct *vma)
 {
        struct address_space *mapping = file->f_mapping;
 
-       if (!mapping->a_ops->readpage)
+       if (!mapping->a_ops->readpage && !mapping->a_ops->read_folio)
                return -ENOEXEC;
        file_accessed(file);
        vma->vm_ops = &ceph_vmops;
index 2be852661a296a40d5714e35e32459aabef07b23..5ad942183a2c5d3b58c7f11a687debbff849e846 100644 (file)
@@ -336,6 +336,7 @@ static inline bool is_sync_kiocb(struct kiocb *kiocb)
 struct address_space_operations {
        int (*writepage)(struct page *page, struct writeback_control *wbc);
        int (*readpage)(struct file *, struct page *);
+       int (*read_folio)(struct file *, struct folio *);
 
        /* Write back some dirty pages from this mapping. */
        int (*writepages)(struct address_space *, struct writeback_control *);
index 6418083901d4d333485f6bc6e560269f784a7d8a..2c7815d20038fa26e6372802289200b18f8dd0f9 100644 (file)
@@ -790,7 +790,7 @@ static int __copy_insn(struct address_space *mapping, struct file *filp,
         * and in page-cache. If ->readpage == NULL it must be shmem_mapping(),
         * see uprobe_register().
         */
-       if (mapping->a_ops->readpage)
+       if (mapping->a_ops->read_folio || mapping->a_ops->readpage)
                page = read_mapping_page(mapping, offset >> PAGE_SHIFT, filp);
        else
                page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT);
@@ -1143,7 +1143,9 @@ static int __uprobe_register(struct inode *inode, loff_t offset,
                return -EINVAL;
 
        /* copy_insn() uses read_mapping_page() or shmem_read_mapping_page() */
-       if (!inode->i_mapping->a_ops->readpage && !shmem_mapping(inode->i_mapping))
+       if (!inode->i_mapping->a_ops->read_folio &&
+           !inode->i_mapping->a_ops->readpage &&
+           !shmem_mapping(inode->i_mapping))
                return -EIO;
        /* Racy, just to catch the obvious mistakes */
        if (offset > i_size_read(inode))
index c15cfc28f9ce386b47e197179b45027fe143cec3..96e3d7ffd98e196b1e6252845522ea23bab0a639 100644 (file)
@@ -2419,7 +2419,10 @@ static int filemap_read_folio(struct file *file, struct address_space *mapping,
         */
        folio_clear_error(folio);
        /* Start the actual read. The read will unlock the page. */
-       error = mapping->a_ops->readpage(file, &folio->page);
+       if (mapping->a_ops->read_folio)
+               error = mapping->a_ops->read_folio(file, folio);
+       else
+               error = mapping->a_ops->readpage(file, &folio->page);
        if (error)
                return error;
 
@@ -3447,7 +3450,7 @@ int generic_file_mmap(struct file *file, struct vm_area_struct *vma)
 {
        struct address_space *mapping = file->f_mapping;
 
-       if (!mapping->a_ops->readpage)
+       if (!mapping->a_ops->read_folio && !mapping->a_ops->readpage)
                return -ENOEXEC;
        file_accessed(file);
        vma->vm_ops = &generic_file_vm_ops;
@@ -3505,6 +3508,8 @@ repeat:
 filler:
                if (filler)
                        err = filler(data, &folio->page);
+               else if (mapping->a_ops->read_folio)
+                       err = mapping->a_ops->read_folio(data, folio);
                else
                        err = mapping->a_ops->readpage(data, &folio->page);
 
index 60a28af25c4ee1c18aa9fe4f0b907c8b03e41078..76024c20a5a5b4401683157a8e916b864fd1b2cd 100644 (file)
@@ -15,7 +15,7 @@
  * explicitly requested by the application.  Readahead only ever
  * attempts to read folios that are not yet in the page cache.  If a
  * folio is present but not up-to-date, readahead will not try to read
- * it. In that case a simple ->readpage() will be requested.
+ * it. In that case a simple ->read_folio() will be requested.
  *
  * Readahead is triggered when an application read request (whether a
  * system call or a page fault) finds that the requested folio is not in
@@ -78,7 +78,7 @@
  * address space operation, for which mpage_readahead() is a canonical
  * implementation.  ->readahead() should normally initiate reads on all
  * folios, but may fail to read any or all folios without causing an I/O
- * error.  The page cache reading code will issue a ->readpage() request
+ * error.  The page cache reading code will issue a ->read_folio() request
  * for any folio which ->readahead() did not read, and only an error
  * from this will be final.
  *
  * were not fetched with readahead_folio().  This will allow a
  * subsequent synchronous readahead request to try them again.  If they
  * are left in the page cache, then they will be read individually using
- * ->readpage() which may be less efficient.
+ * ->read_folio() which may be less efficient.
  */
 
 #include <linux/kernel.h>
@@ -170,8 +170,11 @@ static void read_pages(struct readahead_control *rac)
                        }
                        folio_unlock(folio);
                }
+       } else if (aops->read_folio) {
+               while ((folio = readahead_folio(rac)) != NULL)
+                       aops->read_folio(rac->file, folio);
        } else {
-               while ((folio = readahead_folio(rac)))
+               while ((folio = readahead_folio(rac)) != NULL)
                        aops->readpage(rac->file, &folio->page);
        }
 
@@ -302,7 +305,8 @@ void force_page_cache_ra(struct readahead_control *ractl,
        struct backing_dev_info *bdi = inode_to_bdi(mapping->host);
        unsigned long max_pages, index;
 
-       if (unlikely(!mapping->a_ops->readpage && !mapping->a_ops->readahead))
+       if (unlikely(!mapping->a_ops->read_folio &&
+                    !mapping->a_ops->readpage && !mapping->a_ops->readahead))
                return;
 
        /*
index 63c61f8b261188c34d26b276e5e81cd1a07eb878..7c19098b8b45fdde8247b14da19b417e801fd702 100644 (file)
@@ -3041,7 +3041,7 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
        /*
         * Read the swap header.
         */
-       if (!mapping->a_ops->readpage) {
+       if (!mapping->a_ops->read_folio && !mapping->a_ops->readpage) {
                error = -EINVAL;
                goto bad_swap_unlock_inode;
        }