From: Linus Torvalds <torvalds@linux-foundation.org>
Date: Wed, 25 May 2022 02:55:07 +0000 (-0700)
Subject: Merge tag 'folio-5.19' of git://git.infradead.org/users/willy/pagecache
X-Git-Url: http://git.maquefel.me/?a=commitdiff_plain;h=fdaf9a5840acaab18694a19e0eb0aa51162eeeed;p=linux.git

Merge tag 'folio-5.19' of git://git.infradead.org/users/willy/pagecache

Pull page cache updates from Matthew Wilcox:

 - Appoint myself page cache maintainer

 - Fix how scsicam uses the page cache

 - Use the memalloc_nofs_save() API to replace AOP_FLAG_NOFS

 - Remove the AOP flags entirely

 - Remove pagecache_write_begin() and pagecache_write_end()

 - Documentation updates

 - Convert several address_space operations to use folios:
     - is_dirty_writeback
     - readpage becomes read_folio
     - releasepage becomes release_folio
     - freepage becomes free_folio

 - Change filler_t to require a struct file pointer be the first
   argument like ->read_folio

* tag 'folio-5.19' of git://git.infradead.org/users/willy/pagecache: (107 commits)
  nilfs2: Fix some kernel-doc comments
  Appoint myself page cache maintainer
  fs: Remove aops->freepage
  secretmem: Convert to free_folio
  nfs: Convert to free_folio
  orangefs: Convert to free_folio
  fs: Add free_folio address space operation
  fs: Convert drop_buffers() to use a folio
  fs: Change try_to_free_buffers() to take a folio
  jbd2: Convert release_buffer_page() to use a folio
  jbd2: Convert jbd2_journal_try_to_free_buffers to take a folio
  reiserfs: Convert release_buffer_page() to use a folio
  fs: Remove last vestiges of releasepage
  ubifs: Convert to release_folio
  reiserfs: Convert to release_folio
  orangefs: Convert to release_folio
  ocfs2: Convert to release_folio
  nilfs2: Remove comment about releasepage
  nfs: Convert to release_folio
  jfs: Convert to release_folio
  ...
---

fdaf9a5840acaab18694a19e0eb0aa51162eeeed
diff --cc fs/btrfs/extent_io.c
index 588c7c606a2c6,e7a6e87578599..8f6b544ae6167
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@@ -3799,26 -3754,6 +3799,27 @@@ out
  	return ret;
  }
  
- int btrfs_readpage(struct file *file, struct page *page)
++int btrfs_read_folio(struct file *file, struct folio *folio)
 +{
++	struct page *page = &folio->page;
 +	struct btrfs_inode *inode = BTRFS_I(page->mapping->host);
 +	u64 start = page_offset(page);
 +	u64 end = start + PAGE_SIZE - 1;
 +	struct btrfs_bio_ctrl bio_ctrl = { 0 };
 +	int ret;
 +
 +	btrfs_lock_and_flush_ordered_range(inode, start, end, NULL);
 +
 +	ret = btrfs_do_readpage(page, NULL, &bio_ctrl, 0, NULL);
 +	/*
 +	 * If btrfs_do_readpage() failed we will want to submit the assembled
 +	 * bio to do the cleanup.
 +	 */
 +	if (bio_ctrl.bio)
 +		submit_one_bio(bio_ctrl.bio, 0, bio_ctrl.compress_type);
 +	return ret;
 +}
 +
  static inline void contiguous_readpages(struct page *pages[], int nr_pages,
  					u64 start, u64 end,
  					struct extent_map **em_cached,
diff --cc fs/btrfs/extent_io.h
index 956fa434df435,151e9da5da2dc..23d4103c88316
--- a/fs/btrfs/extent_io.h
+++ b/fs/btrfs/extent_io.h
@@@ -149,7 -178,11 +149,7 @@@ typedef struct extent_map *(get_extent_
  int try_release_extent_mapping(struct page *page, gfp_t mask);
  int try_release_extent_buffer(struct page *page);
  
- int btrfs_readpage(struct file *file, struct page *page);
 -int __must_check submit_one_bio(struct bio *bio, int mirror_num,
 -				unsigned long bio_flags);
 -int btrfs_do_readpage(struct page *page, struct extent_map **em_cached,
 -		      struct btrfs_bio_ctrl *bio_ctrl,
 -		      unsigned int read_flags, u64 *prev_em_start);
++int btrfs_read_folio(struct file *file, struct folio *folio);
  int extent_write_full_page(struct page *page, struct writeback_control *wbc);
  int extent_write_locked_range(struct inode *inode, u64 start, u64 end);
  int extent_writepages(struct address_space *mapping,
diff --cc fs/btrfs/send.c
index 5a05beabf0c34,8985d115559df..fa56890ff81fc
--- a/fs/btrfs/send.c
+++ b/fs/btrfs/send.c
@@@ -4905,13 -4984,14 +4905,13 @@@ static int put_file_data(struct send_ct
  			}
  		}
  
 -		if (PageReadahead(page)) {
 -			page_cache_async_readahead(inode->i_mapping, &sctx->ra,
 -						NULL, page_folio(page), index,
 -						last_index + 1 - index);
 -		}
 +		if (PageReadahead(page))
 +			page_cache_async_readahead(sctx->cur_inode->i_mapping,
- 						   &sctx->ra, NULL, page, index,
- 						   last_index + 1 - index);
++						   &sctx->ra, NULL, page_folio(page),
++						   index, last_index + 1 - index);
  
  		if (!PageUptodate(page)) {
- 			btrfs_readpage(NULL, page);
+ 			btrfs_read_folio(NULL, page_folio(page));
  			lock_page(page);
  			if (!PageUptodate(page)) {
  				unlock_page(page);
diff --cc fs/erofs/fscache.c
index 7e4417167d0b4,0000000000000..a5cc4ed2cd0d0
mode 100644,000000..100644
--- a/fs/erofs/fscache.c
+++ b/fs/erofs/fscache.c
@@@ -1,521 -1,0 +1,519 @@@
 +// SPDX-License-Identifier: GPL-2.0-or-later
 +/*
 + * Copyright (C) 2022, Alibaba Cloud
 + */
 +#include <linux/fscache.h>
 +#include "internal.h"
 +
 +static struct netfs_io_request *erofs_fscache_alloc_request(struct address_space *mapping,
 +					     loff_t start, size_t len)
 +{
 +	struct netfs_io_request *rreq;
 +
 +	rreq = kzalloc(sizeof(struct netfs_io_request), GFP_KERNEL);
 +	if (!rreq)
 +		return ERR_PTR(-ENOMEM);
 +
 +	rreq->start	= start;
 +	rreq->len	= len;
 +	rreq->mapping	= mapping;
 +	INIT_LIST_HEAD(&rreq->subrequests);
 +	refcount_set(&rreq->ref, 1);
 +	return rreq;
 +}
 +
 +static void erofs_fscache_put_request(struct netfs_io_request *rreq)
 +{
 +	if (!refcount_dec_and_test(&rreq->ref))
 +		return;
 +	if (rreq->cache_resources.ops)
 +		rreq->cache_resources.ops->end_operation(&rreq->cache_resources);
 +	kfree(rreq);
 +}
 +
 +static void erofs_fscache_put_subrequest(struct netfs_io_subrequest *subreq)
 +{
 +	if (!refcount_dec_and_test(&subreq->ref))
 +		return;
 +	erofs_fscache_put_request(subreq->rreq);
 +	kfree(subreq);
 +}
 +
 +static void erofs_fscache_clear_subrequests(struct netfs_io_request *rreq)
 +{
 +	struct netfs_io_subrequest *subreq;
 +
 +	while (!list_empty(&rreq->subrequests)) {
 +		subreq = list_first_entry(&rreq->subrequests,
 +				struct netfs_io_subrequest, rreq_link);
 +		list_del(&subreq->rreq_link);
 +		erofs_fscache_put_subrequest(subreq);
 +	}
 +}
 +
 +static void erofs_fscache_rreq_unlock_folios(struct netfs_io_request *rreq)
 +{
 +	struct netfs_io_subrequest *subreq;
 +	struct folio *folio;
 +	unsigned int iopos = 0;
 +	pgoff_t start_page = rreq->start / PAGE_SIZE;
 +	pgoff_t last_page = ((rreq->start + rreq->len) / PAGE_SIZE) - 1;
 +	bool subreq_failed = false;
 +
 +	XA_STATE(xas, &rreq->mapping->i_pages, start_page);
 +
 +	subreq = list_first_entry(&rreq->subrequests,
 +				  struct netfs_io_subrequest, rreq_link);
 +	subreq_failed = (subreq->error < 0);
 +
 +	rcu_read_lock();
 +	xas_for_each(&xas, folio, last_page) {
 +		unsigned int pgpos =
 +			(folio_index(folio) - start_page) * PAGE_SIZE;
 +		unsigned int pgend = pgpos + folio_size(folio);
 +		bool pg_failed = false;
 +
 +		for (;;) {
 +			if (!subreq) {
 +				pg_failed = true;
 +				break;
 +			}
 +
 +			pg_failed |= subreq_failed;
 +			if (pgend < iopos + subreq->len)
 +				break;
 +
 +			iopos += subreq->len;
 +			if (!list_is_last(&subreq->rreq_link,
 +					  &rreq->subrequests)) {
 +				subreq = list_next_entry(subreq, rreq_link);
 +				subreq_failed = (subreq->error < 0);
 +			} else {
 +				subreq = NULL;
 +				subreq_failed = false;
 +			}
 +			if (pgend == iopos)
 +				break;
 +		}
 +
 +		if (!pg_failed)
 +			folio_mark_uptodate(folio);
 +
 +		folio_unlock(folio);
 +	}
 +	rcu_read_unlock();
 +}
 +
 +static void erofs_fscache_rreq_complete(struct netfs_io_request *rreq)
 +{
 +	erofs_fscache_rreq_unlock_folios(rreq);
 +	erofs_fscache_clear_subrequests(rreq);
 +	erofs_fscache_put_request(rreq);
 +}
 +
 +static void erofc_fscache_subreq_complete(void *priv,
 +		ssize_t transferred_or_error, bool was_async)
 +{
 +	struct netfs_io_subrequest *subreq = priv;
 +	struct netfs_io_request *rreq = subreq->rreq;
 +
 +	if (IS_ERR_VALUE(transferred_or_error))
 +		subreq->error = transferred_or_error;
 +
 +	if (atomic_dec_and_test(&rreq->nr_outstanding))
 +		erofs_fscache_rreq_complete(rreq);
 +
 +	erofs_fscache_put_subrequest(subreq);
 +}
 +
 +/*
 + * Read data from fscache and fill the read data into page cache described by
 + * @rreq, which shall be both aligned with PAGE_SIZE. @pstart describes
 + * the start physical address in the cache file.
 + */
 +static int erofs_fscache_read_folios_async(struct fscache_cookie *cookie,
 +				struct netfs_io_request *rreq, loff_t pstart)
 +{
 +	enum netfs_io_source source;
 +	struct super_block *sb = rreq->mapping->host->i_sb;
 +	struct netfs_io_subrequest *subreq;
 +	struct netfs_cache_resources *cres = &rreq->cache_resources;
 +	struct iov_iter iter;
 +	loff_t start = rreq->start;
 +	size_t len = rreq->len;
 +	size_t done = 0;
 +	int ret;
 +
 +	atomic_set(&rreq->nr_outstanding, 1);
 +
 +	ret = fscache_begin_read_operation(cres, cookie);
 +	if (ret)
 +		goto out;
 +
 +	while (done < len) {
 +		subreq = kzalloc(sizeof(struct netfs_io_subrequest),
 +				 GFP_KERNEL);
 +		if (subreq) {
 +			INIT_LIST_HEAD(&subreq->rreq_link);
 +			refcount_set(&subreq->ref, 2);
 +			subreq->rreq = rreq;
 +			refcount_inc(&rreq->ref);
 +		} else {
 +			ret = -ENOMEM;
 +			goto out;
 +		}
 +
 +		subreq->start = pstart + done;
 +		subreq->len	=  len - done;
 +		subreq->flags = 1 << NETFS_SREQ_ONDEMAND;
 +
 +		list_add_tail(&subreq->rreq_link, &rreq->subrequests);
 +
 +		source = cres->ops->prepare_read(subreq, LLONG_MAX);
 +		if (WARN_ON(subreq->len == 0))
 +			source = NETFS_INVALID_READ;
 +		if (source != NETFS_READ_FROM_CACHE) {
 +			erofs_err(sb, "failed to fscache prepare_read (source %d)",
 +				  source);
 +			ret = -EIO;
 +			subreq->error = ret;
 +			erofs_fscache_put_subrequest(subreq);
 +			goto out;
 +		}
 +
 +		atomic_inc(&rreq->nr_outstanding);
 +
 +		iov_iter_xarray(&iter, READ, &rreq->mapping->i_pages,
 +				start + done, subreq->len);
 +
 +		ret = fscache_read(cres, subreq->start, &iter,
 +				   NETFS_READ_HOLE_FAIL,
 +				   erofc_fscache_subreq_complete, subreq);
 +		if (ret == -EIOCBQUEUED)
 +			ret = 0;
 +		if (ret) {
 +			erofs_err(sb, "failed to fscache_read (ret %d)", ret);
 +			goto out;
 +		}
 +
 +		done += subreq->len;
 +	}
 +out:
 +	if (atomic_dec_and_test(&rreq->nr_outstanding))
 +		erofs_fscache_rreq_complete(rreq);
 +
 +	return ret;
 +}
 +
- static int erofs_fscache_meta_readpage(struct file *data, struct page *page)
++static int erofs_fscache_meta_read_folio(struct file *data, struct folio *folio)
 +{
 +	int ret;
- 	struct folio *folio = page_folio(page);
 +	struct super_block *sb = folio_mapping(folio)->host->i_sb;
 +	struct netfs_io_request *rreq;
 +	struct erofs_map_dev mdev = {
 +		.m_deviceid = 0,
 +		.m_pa = folio_pos(folio),
 +	};
 +
 +	ret = erofs_map_dev(sb, &mdev);
 +	if (ret)
 +		goto out;
 +
 +	rreq = erofs_fscache_alloc_request(folio_mapping(folio),
 +				folio_pos(folio), folio_size(folio));
 +	if (IS_ERR(rreq))
 +		goto out;
 +
 +	return erofs_fscache_read_folios_async(mdev.m_fscache->cookie,
 +				rreq, mdev.m_pa);
 +out:
 +	folio_unlock(folio);
 +	return ret;
 +}
 +
- static int erofs_fscache_readpage_inline(struct folio *folio,
++static int erofs_fscache_read_folio_inline(struct folio *folio,
 +					 struct erofs_map_blocks *map)
 +{
 +	struct super_block *sb = folio_mapping(folio)->host->i_sb;
 +	struct erofs_buf buf = __EROFS_BUF_INITIALIZER;
 +	erofs_blk_t blknr;
 +	size_t offset, len;
 +	void *src, *dst;
 +
 +	/* For tail packing layout, the offset may be non-zero. */
 +	offset = erofs_blkoff(map->m_pa);
 +	blknr = erofs_blknr(map->m_pa);
 +	len = map->m_llen;
 +
 +	src = erofs_read_metabuf(&buf, sb, blknr, EROFS_KMAP);
 +	if (IS_ERR(src))
 +		return PTR_ERR(src);
 +
 +	dst = kmap_local_folio(folio, 0);
 +	memcpy(dst, src + offset, len);
 +	memset(dst + len, 0, PAGE_SIZE - len);
 +	kunmap_local(dst);
 +
 +	erofs_put_metabuf(&buf);
 +	return 0;
 +}
 +
- static int erofs_fscache_readpage(struct file *file, struct page *page)
++static int erofs_fscache_read_folio(struct file *file, struct folio *folio)
 +{
- 	struct folio *folio = page_folio(page);
 +	struct inode *inode = folio_mapping(folio)->host;
 +	struct super_block *sb = inode->i_sb;
 +	struct erofs_map_blocks map;
 +	struct erofs_map_dev mdev;
 +	struct netfs_io_request *rreq;
 +	erofs_off_t pos;
 +	loff_t pstart;
 +	int ret;
 +
 +	DBG_BUGON(folio_size(folio) != EROFS_BLKSIZ);
 +
 +	pos = folio_pos(folio);
 +	map.m_la = pos;
 +
 +	ret = erofs_map_blocks(inode, &map, EROFS_GET_BLOCKS_RAW);
 +	if (ret)
 +		goto out_unlock;
 +
 +	if (!(map.m_flags & EROFS_MAP_MAPPED)) {
 +		folio_zero_range(folio, 0, folio_size(folio));
 +		goto out_uptodate;
 +	}
 +
 +	if (map.m_flags & EROFS_MAP_META) {
- 		ret = erofs_fscache_readpage_inline(folio, &map);
++		ret = erofs_fscache_read_folio_inline(folio, &map);
 +		goto out_uptodate;
 +	}
 +
 +	mdev = (struct erofs_map_dev) {
 +		.m_deviceid = map.m_deviceid,
 +		.m_pa = map.m_pa,
 +	};
 +
 +	ret = erofs_map_dev(sb, &mdev);
 +	if (ret)
 +		goto out_unlock;
 +
 +
 +	rreq = erofs_fscache_alloc_request(folio_mapping(folio),
 +				folio_pos(folio), folio_size(folio));
 +	if (IS_ERR(rreq))
 +		goto out_unlock;
 +
 +	pstart = mdev.m_pa + (pos - map.m_la);
 +	return erofs_fscache_read_folios_async(mdev.m_fscache->cookie,
 +				rreq, pstart);
 +
 +out_uptodate:
 +	if (!ret)
 +		folio_mark_uptodate(folio);
 +out_unlock:
 +	folio_unlock(folio);
 +	return ret;
 +}
 +
 +static void erofs_fscache_advance_folios(struct readahead_control *rac,
 +					 size_t len, bool unlock)
 +{
 +	while (len) {
 +		struct folio *folio = readahead_folio(rac);
 +		len -= folio_size(folio);
 +		if (unlock) {
 +			folio_mark_uptodate(folio);
 +			folio_unlock(folio);
 +		}
 +	}
 +}
 +
 +static void erofs_fscache_readahead(struct readahead_control *rac)
 +{
 +	struct inode *inode = rac->mapping->host;
 +	struct super_block *sb = inode->i_sb;
 +	size_t len, count, done = 0;
 +	erofs_off_t pos;
 +	loff_t start, offset;
 +	int ret;
 +
 +	if (!readahead_count(rac))
 +		return;
 +
 +	start = readahead_pos(rac);
 +	len = readahead_length(rac);
 +
 +	do {
 +		struct erofs_map_blocks map;
 +		struct erofs_map_dev mdev;
 +		struct netfs_io_request *rreq;
 +
 +		pos = start + done;
 +		map.m_la = pos;
 +
 +		ret = erofs_map_blocks(inode, &map, EROFS_GET_BLOCKS_RAW);
 +		if (ret)
 +			return;
 +
 +		offset = start + done;
 +		count = min_t(size_t, map.m_llen - (pos - map.m_la),
 +			      len - done);
 +
 +		if (!(map.m_flags & EROFS_MAP_MAPPED)) {
 +			struct iov_iter iter;
 +
 +			iov_iter_xarray(&iter, READ, &rac->mapping->i_pages,
 +					offset, count);
 +			iov_iter_zero(count, &iter);
 +
 +			erofs_fscache_advance_folios(rac, count, true);
 +			ret = count;
 +			continue;
 +		}
 +
 +		if (map.m_flags & EROFS_MAP_META) {
 +			struct folio *folio = readahead_folio(rac);
 +
- 			ret = erofs_fscache_readpage_inline(folio, &map);
++			ret = erofs_fscache_read_folio_inline(folio, &map);
 +			if (!ret) {
 +				folio_mark_uptodate(folio);
 +				ret = folio_size(folio);
 +			}
 +
 +			folio_unlock(folio);
 +			continue;
 +		}
 +
 +		mdev = (struct erofs_map_dev) {
 +			.m_deviceid = map.m_deviceid,
 +			.m_pa = map.m_pa,
 +		};
 +		ret = erofs_map_dev(sb, &mdev);
 +		if (ret)
 +			return;
 +
 +		rreq = erofs_fscache_alloc_request(rac->mapping, offset, count);
 +		if (IS_ERR(rreq))
 +			return;
 +		/*
 +		 * Drop the ref of folios here. Unlock them in
 +		 * rreq_unlock_folios() when rreq complete.
 +		 */
 +		erofs_fscache_advance_folios(rac, count, false);
 +		ret = erofs_fscache_read_folios_async(mdev.m_fscache->cookie,
 +					rreq, mdev.m_pa + (pos - map.m_la));
 +		if (!ret)
 +			ret = count;
 +	} while (ret > 0 && ((done += ret) < len));
 +}
 +
 +static const struct address_space_operations erofs_fscache_meta_aops = {
- 	.readpage = erofs_fscache_meta_readpage,
++	.read_folio = erofs_fscache_meta_read_folio,
 +};
 +
 +const struct address_space_operations erofs_fscache_access_aops = {
- 	.readpage = erofs_fscache_readpage,
++	.read_folio = erofs_fscache_read_folio,
 +	.readahead = erofs_fscache_readahead,
 +};
 +
 +int erofs_fscache_register_cookie(struct super_block *sb,
 +				  struct erofs_fscache **fscache,
 +				  char *name, bool need_inode)
 +{
 +	struct fscache_volume *volume = EROFS_SB(sb)->volume;
 +	struct erofs_fscache *ctx;
 +	struct fscache_cookie *cookie;
 +	int ret;
 +
 +	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
 +	if (!ctx)
 +		return -ENOMEM;
 +
 +	cookie = fscache_acquire_cookie(volume, FSCACHE_ADV_WANT_CACHE_SIZE,
 +					name, strlen(name), NULL, 0, 0);
 +	if (!cookie) {
 +		erofs_err(sb, "failed to get cookie for %s", name);
 +		ret = -EINVAL;
 +		goto err;
 +	}
 +
 +	fscache_use_cookie(cookie, false);
 +	ctx->cookie = cookie;
 +
 +	if (need_inode) {
 +		struct inode *const inode = new_inode(sb);
 +
 +		if (!inode) {
 +			erofs_err(sb, "failed to get anon inode for %s", name);
 +			ret = -ENOMEM;
 +			goto err_cookie;
 +		}
 +
 +		set_nlink(inode, 1);
 +		inode->i_size = OFFSET_MAX;
 +		inode->i_mapping->a_ops = &erofs_fscache_meta_aops;
 +		mapping_set_gfp_mask(inode->i_mapping, GFP_NOFS);
 +
 +		ctx->inode = inode;
 +	}
 +
 +	*fscache = ctx;
 +	return 0;
 +
 +err_cookie:
 +	fscache_unuse_cookie(ctx->cookie, NULL, NULL);
 +	fscache_relinquish_cookie(ctx->cookie, false);
 +	ctx->cookie = NULL;
 +err:
 +	kfree(ctx);
 +	return ret;
 +}
 +
 +void erofs_fscache_unregister_cookie(struct erofs_fscache **fscache)
 +{
 +	struct erofs_fscache *ctx = *fscache;
 +
 +	if (!ctx)
 +		return;
 +
 +	fscache_unuse_cookie(ctx->cookie, NULL, NULL);
 +	fscache_relinquish_cookie(ctx->cookie, false);
 +	ctx->cookie = NULL;
 +
 +	iput(ctx->inode);
 +	ctx->inode = NULL;
 +
 +	kfree(ctx);
 +	*fscache = NULL;
 +}
 +
 +int erofs_fscache_register_fs(struct super_block *sb)
 +{
 +	struct erofs_sb_info *sbi = EROFS_SB(sb);
 +	struct fscache_volume *volume;
 +	char *name;
 +	int ret = 0;
 +
 +	name = kasprintf(GFP_KERNEL, "erofs,%s", sbi->opt.fsid);
 +	if (!name)
 +		return -ENOMEM;
 +
 +	volume = fscache_acquire_volume(name, NULL, NULL, 0);
 +	if (IS_ERR_OR_NULL(volume)) {
 +		erofs_err(sb, "failed to register volume for %s", name);
 +		ret = volume ? PTR_ERR(volume) : -EOPNOTSUPP;
 +		volume = NULL;
 +	}
 +
 +	sbi->volume = volume;
 +	kfree(name);
 +	return ret;
 +}
 +
 +void erofs_fscache_unregister_fs(struct super_block *sb)
 +{
 +	struct erofs_sb_info *sbi = EROFS_SB(sb);
 +
 +	fscache_relinquish_volume(sbi->volume, NULL, false);
 +	sbi->volume = NULL;
 +}
diff --cc fs/ext4/inode.c
index 7555cbe771480,987ea77e672d2..3dce7d058985b
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@@ -2956,17 -2944,17 +2956,16 @@@ static int ext4_da_write_begin(struct f
  
  	index = pos >> PAGE_SHIFT;
  
 -	if (ext4_nonda_switch(inode->i_sb) || S_ISLNK(inode->i_mode) ||
 -	    ext4_verity_in_progress(inode)) {
 +	if (ext4_nonda_switch(inode->i_sb) || ext4_verity_in_progress(inode)) {
  		*fsdata = (void *)FALL_BACK_TO_NONDELALLOC;
  		return ext4_write_begin(file, mapping, pos,
- 					len, flags, pagep, fsdata);
+ 					len, pagep, fsdata);
  	}
  	*fsdata = (void *)0;
- 	trace_ext4_da_write_begin(inode, pos, len, flags);
+ 	trace_ext4_da_write_begin(inode, pos, len);
  
  	if (ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA)) {
- 		ret = ext4_da_write_inline_data_begin(mapping, inode,
- 						      pos, len, flags,
+ 		ret = ext4_da_write_inline_data_begin(mapping, inode, pos, len,
  						      pagep, fsdata);
  		if (ret < 0)
  			return ret;
diff --cc mm/readahead.c
index 26bf74a6b2fe6,39983a3a93f04..b78921b547547
--- a/mm/readahead.c
+++ b/mm/readahead.c
@@@ -110,10 -110,9 +110,10 @@@
   * were not fetched with readahead_folio().  This will allow a
   * subsequent synchronous readahead request to try them again.  If they
   * are left in the page cache, then they will be read individually using
-  * ->readpage() which may be less efficient.
+  * ->read_folio() which may be less efficient.
   */
  
 +#include <linux/blkdev.h>
  #include <linux/kernel.h>
  #include <linux/dax.h>
  #include <linux/gfp.h>