}
/**
- * ntfs_read_block - fill a @page of an address space with data
- * @page: page cache page to fill with data
+ * ntfs_read_block - fill a @folio of an address space with data
+ * @folio: page cache folio to fill with data
*
- * Fill the page @page of the address space belonging to the @page->host inode.
* We read each buffer asynchronously and when all buffers are read in, our io
* completion handler ntfs_end_buffer_read_async(), if required, automatically
- * applies the mst fixups to the page before finally marking it uptodate and
+ * applies the mst fixups to the folio before finally marking it uptodate and
* unlocking it.
*
* We only enforce allocated_size limit because i_size is checked for in
*
* Contains an adapted version of fs/buffer.c::block_read_full_folio().
*/
-static int ntfs_read_block(struct page *page)
+static int ntfs_read_block(struct folio *folio)
{
loff_t i_size;
VCN vcn;
int i, nr;
unsigned char blocksize_bits;
- vi = page->mapping->host;
+ vi = folio->mapping->host;
ni = NTFS_I(vi);
vol = ni->vol;
blocksize = vol->sb->s_blocksize;
blocksize_bits = vol->sb->s_blocksize_bits;
- if (!page_has_buffers(page)) {
- create_empty_buffers(page, blocksize, 0);
- if (unlikely(!page_has_buffers(page))) {
- unlock_page(page);
- return -ENOMEM;
- }
- }
- bh = head = page_buffers(page);
- BUG_ON(!bh);
+ head = folio_buffers(folio);
+ if (!head)
+ head = folio_create_empty_buffers(folio, blocksize, 0);
+ bh = head;
/*
* We may be racing with truncate. To avoid some of the problems we
* may leave some buffers unmapped which are now allocated. This is
* not a problem since these buffers will just get mapped when a write
* occurs. In case of a shrinking truncate, we will detect this later
- * on due to the runlist being incomplete and if the page is being
+ * on due to the runlist being incomplete and if the folio is being
* fully truncated, truncate will throw it away as soon as we unlock
* it so no need to worry what we do with it.
*/
- iblock = (s64)page->index << (PAGE_SHIFT - blocksize_bits);
+ iblock = (s64)folio->index << (PAGE_SHIFT - blocksize_bits);
read_lock_irqsave(&ni->size_lock, flags);
lblock = (ni->allocated_size + blocksize - 1) >> blocksize_bits;
init_size = ni->initialized_size;
}
zblock = (init_size + blocksize - 1) >> blocksize_bits;
- /* Loop through all the buffers in the page. */
+ /* Loop through all the buffers in the folio. */
rl = NULL;
nr = i = 0;
do {
if (!err)
err = -EIO;
bh->b_blocknr = -1;
- SetPageError(page);
+ folio_set_error(folio);
ntfs_error(vol->sb, "Failed to read from inode 0x%lx, "
"attribute type 0x%x, vcn 0x%llx, "
"offset 0x%x because its location on "
/*
* Either iblock was outside lblock limits or
* ntfs_rl_vcn_to_lcn() returned error. Just zero that portion
- * of the page and set the buffer uptodate.
+ * of the folio and set the buffer uptodate.
*/
handle_hole:
bh->b_blocknr = -1UL;
clear_buffer_mapped(bh);
handle_zblock:
- zero_user(page, i * blocksize, blocksize);
+ folio_zero_range(folio, i * blocksize, blocksize);
if (likely(!err))
set_buffer_uptodate(bh);
} while (i++, iblock++, (bh = bh->b_this_page) != head);
return 0;
}
/* No i/o was scheduled on any of the buffers. */
- if (likely(!PageError(page)))
- SetPageUptodate(page);
+ if (likely(!folio_test_error(folio)))
+ folio_mark_uptodate(folio);
else /* Signal synchronous i/o error. */
nr = -EIO;
- unlock_page(page);
+ folio_unlock(folio);
return nr;
}
/* NInoNonResident() == NInoIndexAllocPresent() */
if (NInoNonResident(ni)) {
/* Normal, non-resident data stream. */
- return ntfs_read_block(page);
+ return ntfs_read_block(folio);
}
/*
* Attribute is resident, implying it is not compressed or encrypted.