*map->m_next_extent = pgofs + map->m_len;
/* for hardware encryption, but to avoid potential issue in future */
- if (flag == F2FS_GET_BLOCK_DIO) {
+ if (flag == F2FS_GET_BLOCK_DIO)
f2fs_wait_on_block_writeback_range(inode,
map->m_pblk, map->m_len);
- invalidate_mapping_pages(META_MAPPING(sbi),
- map->m_pblk, map->m_pblk + map->m_len - 1);
- }
if (map->m_multidev_dio) {
block_t blk_addr = map->m_pblk;
*/
f2fs_wait_on_block_writeback_range(inode,
map->m_pblk, map->m_len);
- invalidate_mapping_pages(META_MAPPING(sbi),
- map->m_pblk, map->m_pblk + map->m_len - 1);
if (map->m_multidev_dio) {
block_t blk_addr = map->m_pblk;
.submitted = false,
.compr_blocks = compr_blocks,
.need_lock = LOCK_RETRY,
+ .post_read = f2fs_post_read_required(inode),
.io_type = io_type,
.io_wbc = wbc,
.bio = bio,
bool retry; /* need to reallocate block address */
int compr_blocks; /* # of compressed block addresses */
bool encrypted; /* indicate file is encrypted */
+ bool post_read; /* require post read */
enum iostat_type io_type; /* io type */
struct writeback_control *io_wbc; /* writeback control */
struct bio **bio; /* bio for ipu */
goto drop_bio;
}
- invalidate_mapping_pages(META_MAPPING(sbi),
+ if (fio->post_read)
+ invalidate_mapping_pages(META_MAPPING(sbi),
fio->new_blkaddr, fio->new_blkaddr);
stat_inc_inplace_blocks(fio->sbi);
void f2fs_wait_on_block_writeback_range(struct inode *inode, block_t blkaddr,
block_t len)
{
+ struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
block_t i;
+ if (!f2fs_post_read_required(inode))
+ return;
+
for (i = 0; i < len; i++)
f2fs_wait_on_block_writeback(inode, blkaddr + i);
+
+ invalidate_mapping_pages(META_MAPPING(sbi), blkaddr, blkaddr + len - 1);
}
static int read_compacted_summaries(struct f2fs_sb_info *sbi)