* blkdev_issue_flush - queue a flush
* @bdev: blockdev to issue flush for
* @gfp_mask: memory allocation flags (for bio_alloc)
- * @error_sector: error sector
*
* Description:
- * Issue a flush for the block device in question. Caller can supply
- * room for storing the error offset in case of a flush error, if they
- * wish to.
+ * Issue a flush for the block device in question.
*/
-int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask,
- sector_t *error_sector)
+int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask)
{
struct request_queue *q;
struct bio *bio;
bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
ret = submit_bio_wait(bio);
-
- /*
- * The driver must store the error location in ->bi_sector, if
- * it supports it. For non-stacked drivers, this should be
- * copied from blk_rq_pos(rq).
- */
- if (error_sector)
- *error_sector = bio->bi_iter.bi_sector;
-
bio_put(bio);
return ret;
}
dm_integrity_flush_buffers(ic);
if (ic->meta_dev)
- blkdev_issue_flush(ic->dev->bdev, GFP_NOIO, NULL);
+ blkdev_issue_flush(ic->dev->bdev, GFP_NOIO);
limit = ic->provided_data_sectors;
if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING)) {
ret = dmz_rdwr_block(zmd, REQ_OP_WRITE, block, mblk->page);
if (ret == 0)
- ret = blkdev_issue_flush(zmd->dev->bdev, GFP_NOIO, NULL);
+ ret = blkdev_issue_flush(zmd->dev->bdev, GFP_NOIO);
return ret;
}
/* Flush drive cache (this will also sync data) */
if (ret == 0)
- ret = blkdev_issue_flush(zmd->dev->bdev, GFP_NOIO, NULL);
+ ret = blkdev_issue_flush(zmd->dev->bdev, GFP_NOIO);
return ret;
}
/* If there are no dirty metadata blocks, just flush the device cache */
if (list_empty(&write_list)) {
- ret = blkdev_issue_flush(zmd->dev->bdev, GFP_NOIO, NULL);
+ ret = blkdev_issue_flush(zmd->dev->bdev, GFP_NOIO);
goto err;
}
}
/* flush the disk cache after recovery if necessary */
- ret = blkdev_issue_flush(rdev->bdev, GFP_KERNEL, NULL);
+ ret = blkdev_issue_flush(rdev->bdev, GFP_KERNEL);
out:
__free_page(page);
return ret;
u16 nvmet_bdev_flush(struct nvmet_req *req)
{
- if (blkdev_issue_flush(req->ns->bdev, GFP_KERNEL, NULL))
+ if (blkdev_issue_flush(req->ns->bdev, GFP_KERNEL))
return NVME_SC_INTERNAL | NVME_SC_DNR;
return 0;
}
* i_mutex and doing so causes performance issues with concurrent
* O_SYNC writers to a block device.
*/
- error = blkdev_issue_flush(bdev, GFP_KERNEL, NULL);
+ error = blkdev_issue_flush(bdev, GFP_KERNEL);
if (error == -EOPNOTSUPP)
error = 0;
ret = ext4_fsync_journal(inode, datasync, &needs_barrier);
if (needs_barrier) {
- err = blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL, NULL);
+ err = blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL);
if (!ret)
ret = err;
}
if (ret < 0)
goto err_out;
if (barrier)
- blkdev_issue_flush(sb->s_bdev, GFP_NOFS, NULL);
+ blkdev_issue_flush(sb->s_bdev, GFP_NOFS);
skip_zeroout:
ext4_lock_group(sb, group);
needs_barrier = true;
if (needs_barrier) {
int err;
- err = blkdev_issue_flush(sb->s_bdev, GFP_KERNEL, NULL);
+ err = blkdev_issue_flush(sb->s_bdev, GFP_KERNEL);
if (!ret)
ret = err;
}
if (err)
return err;
- return blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL, NULL);
+ return blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL);
}
}
if (!test_bit(HFSPLUS_SB_NOBARRIER, &sbi->flags))
- blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL, NULL);
+ blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL);
inode_unlock(inode);
mutex_unlock(&sbi->vh_mutex);
if (!test_bit(HFSPLUS_SB_NOBARRIER, &sbi->flags))
- blkdev_issue_flush(sb->s_bdev, GFP_KERNEL, NULL);
+ blkdev_issue_flush(sb->s_bdev, GFP_KERNEL);
return error;
}
* jbd2_cleanup_journal_tail() doesn't get called all that often.
*/
if (journal->j_flags & JBD2_BARRIER)
- blkdev_issue_flush(journal->j_fs_dev, GFP_NOFS, NULL);
+ blkdev_issue_flush(journal->j_fs_dev, GFP_NOFS);
return __jbd2_update_log_tail(journal, first_tid, blocknr);
}
if (commit_transaction->t_need_data_flush &&
(journal->j_fs_dev != journal->j_dev) &&
(journal->j_flags & JBD2_BARRIER))
- blkdev_issue_flush(journal->j_fs_dev, GFP_NOFS, NULL);
+ blkdev_issue_flush(journal->j_fs_dev, GFP_NOFS);
/* Done it all: now write the commit record asynchronously. */
if (jbd2_has_feature_async_commit(journal)) {
stats.run.rs_blocks_logged++;
if (jbd2_has_feature_async_commit(journal) &&
journal->j_flags & JBD2_BARRIER) {
- blkdev_issue_flush(journal->j_dev, GFP_NOFS, NULL);
+ blkdev_issue_flush(journal->j_dev, GFP_NOFS);
}
if (err)
err = err2;
/* Make sure all replayed data is on permanent storage */
if (journal->j_flags & JBD2_BARRIER) {
- err2 = blkdev_issue_flush(journal->j_fs_dev, GFP_KERNEL, NULL);
+ err2 = blkdev_issue_flush(journal->j_fs_dev, GFP_KERNEL);
if (!err)
err = err2;
}
err = __generic_file_fsync(file, start, end, datasync);
if (err)
return err;
- return blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL, NULL);
+ return blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL);
}
EXPORT_SYMBOL(generic_file_fsync);
*/
smp_wmb();
- err = blkdev_issue_flush(nilfs->ns_bdev, GFP_KERNEL, NULL);
+ err = blkdev_issue_flush(nilfs->ns_bdev, GFP_KERNEL);
if (err != -EIO)
err = 0;
return err;
needs_barrier = true;
err = jbd2_complete_transaction(journal, commit_tid);
if (needs_barrier) {
- ret = blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL, NULL);
+ ret = blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL);
if (!err)
err = ret;
}
barrier_done = reiserfs_commit_for_inode(inode);
reiserfs_write_unlock(inode->i_sb);
if (barrier_done != 1 && reiserfs_barrier_flush(inode->i_sb))
- blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL, NULL);
+ blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL);
inode_unlock(inode);
if (barrier_done < 0)
return barrier_done;
xfs_blkdev_issue_flush(
xfs_buftarg_t *buftarg)
{
- blkdev_issue_flush(buftarg->bt_bdev, GFP_NOFS, NULL);
+ blkdev_issue_flush(buftarg->bt_bdev, GFP_NOFS);
}
STATIC void
if (ZONEFS_I(inode)->i_ztype == ZONEFS_ZTYPE_CNV)
ret = file_write_and_wait_range(file, start, end);
if (!ret)
- ret = blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL, NULL);
+ ret = blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL);
if (ret)
zonefs_io_error(inode, true);
extern void blk_io_schedule(void);
-extern int blkdev_issue_flush(struct block_device *, gfp_t, sector_t *);
+int blkdev_issue_flush(struct block_device *, gfp_t);
extern int blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
sector_t nr_sects, gfp_t gfp_mask, struct page *page);
return false;
}
-static inline int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask,
- sector_t *error_sector)
+static inline int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask)
{
return 0;
}