{
struct address_space *mapping = filp->f_mapping;
- if (!mapping->a_ops->readpage)
+ if (!mapping->a_ops->readpage && !mapping->a_ops->read_folio)
return -ENOEXEC;
file_accessed(filp);
/* Ok, it's mapped. Make sure it's up-to-date */
if (!folio_test_uptodate(folio)) {
- err = mapping->a_ops->readpage(NULL, &folio->page);
+ if (mapping->a_ops->read_folio)
+ err = mapping->a_ops->read_folio(NULL, folio);
+ else
+ err = mapping->a_ops->readpage(NULL, &folio->page);
if (err) {
folio_put(folio);
goto out;
{
struct address_space *mapping = file->f_mapping;
- if (!mapping->a_ops->readpage)
+ if (!mapping->a_ops->readpage && !mapping->a_ops->read_folio)
return -ENOEXEC;
file_accessed(file);
vma->vm_ops = &ceph_vmops;
struct address_space_operations {
int (*writepage)(struct page *page, struct writeback_control *wbc);
int (*readpage)(struct file *, struct page *);
+ int (*read_folio)(struct file *, struct folio *);
/* Write back some dirty pages from this mapping. */
int (*writepages)(struct address_space *, struct writeback_control *);
* and in page-cache. If ->readpage == NULL it must be shmem_mapping(),
* see uprobe_register().
*/
- if (mapping->a_ops->readpage)
+ if (mapping->a_ops->read_folio || mapping->a_ops->readpage)
page = read_mapping_page(mapping, offset >> PAGE_SHIFT, filp);
else
page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT);
return -EINVAL;
/* copy_insn() uses read_mapping_page() or shmem_read_mapping_page() */
- if (!inode->i_mapping->a_ops->readpage && !shmem_mapping(inode->i_mapping))
+ if (!inode->i_mapping->a_ops->read_folio &&
+ !inode->i_mapping->a_ops->readpage &&
+ !shmem_mapping(inode->i_mapping))
return -EIO;
/* Racy, just to catch the obvious mistakes */
if (offset > i_size_read(inode))
*/
folio_clear_error(folio);
/* Start the actual read. The read will unlock the page. */
- error = mapping->a_ops->readpage(file, &folio->page);
+ if (mapping->a_ops->read_folio)
+ error = mapping->a_ops->read_folio(file, folio);
+ else
+ error = mapping->a_ops->readpage(file, &folio->page);
if (error)
return error;
{
struct address_space *mapping = file->f_mapping;
- if (!mapping->a_ops->readpage)
+ if (!mapping->a_ops->read_folio && !mapping->a_ops->readpage)
return -ENOEXEC;
file_accessed(file);
vma->vm_ops = &generic_file_vm_ops;
filler:
if (filler)
err = filler(data, &folio->page);
+ else if (mapping->a_ops->read_folio)
+ err = mapping->a_ops->read_folio(data, folio);
else
err = mapping->a_ops->readpage(data, &folio->page);
* explicitly requested by the application. Readahead only ever
* attempts to read folios that are not yet in the page cache. If a
* folio is present but not up-to-date, readahead will not try to read
- * it. In that case a simple ->readpage() will be requested.
+ * it. In that case a simple ->read_folio() will be requested.
*
* Readahead is triggered when an application read request (whether a
* system call or a page fault) finds that the requested folio is not in
* address space operation, for which mpage_readahead() is a canonical
* implementation. ->readahead() should normally initiate reads on all
* folios, but may fail to read any or all folios without causing an I/O
- * error. The page cache reading code will issue a ->readpage() request
+ * error. The page cache reading code will issue a ->read_folio() request
* for any folio which ->readahead() did not read, and only an error
* from this will be final.
*
* were not fetched with readahead_folio(). This will allow a
* subsequent synchronous readahead request to try them again. If they
* are left in the page cache, then they will be read individually using
- * ->readpage() which may be less efficient.
+ * ->read_folio() which may be less efficient.
*/
#include <linux/kernel.h>
}
folio_unlock(folio);
}
+ } else if (aops->read_folio) {
+ while ((folio = readahead_folio(rac)) != NULL)
+ aops->read_folio(rac->file, folio);
} else {
- while ((folio = readahead_folio(rac)))
+ while ((folio = readahead_folio(rac)) != NULL)
aops->readpage(rac->file, &folio->page);
}
struct backing_dev_info *bdi = inode_to_bdi(mapping->host);
unsigned long max_pages, index;
- if (unlikely(!mapping->a_ops->readpage && !mapping->a_ops->readahead))
+ if (unlikely(!mapping->a_ops->read_folio &&
+ !mapping->a_ops->readpage && !mapping->a_ops->readahead))
return;
/*
/*
* Read the swap header.
*/
- if (!mapping->a_ops->readpage) {
+ if (!mapping->a_ops->read_folio && !mapping->a_ops->readpage) {
error = -EINVAL;
goto bad_swap_unlock_inode;
}