block: remove the error_sector argument to blkdev_issue_flush
authorChristoph Hellwig <hch@lst.de>
Wed, 13 May 2020 12:36:00 +0000 (14:36 +0200)
committerJens Axboe <axboe@kernel.dk>
Fri, 22 May 2020 14:45:46 +0000 (08:45 -0600)
The argument isn't used by any caller, and drivers don't fill out
bi_sector for flush requests either.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
22 files changed:
block/blk-flush.c
drivers/md/dm-integrity.c
drivers/md/dm-zoned-metadata.c
drivers/md/raid5-ppl.c
drivers/nvme/target/io-cmd-bdev.c
fs/block_dev.c
fs/ext4/fsync.c
fs/ext4/ialloc.c
fs/ext4/super.c
fs/fat/file.c
fs/hfsplus/inode.c
fs/hfsplus/super.c
fs/jbd2/checkpoint.c
fs/jbd2/commit.c
fs/jbd2/recovery.c
fs/libfs.c
fs/nilfs2/the_nilfs.h
fs/ocfs2/file.c
fs/reiserfs/file.c
fs/xfs/xfs_super.c
fs/zonefs/super.c
include/linux/blkdev.h

index b733f7ac75c762e5dcab941bd66229ce6302b88a..d47a579964fbccc087db42a8b54bcc7b7588cf9d 100644 (file)
@@ -432,15 +432,11 @@ void blk_insert_flush(struct request *rq)
  * blkdev_issue_flush - queue a flush
  * @bdev:      blockdev to issue flush for
  * @gfp_mask:  memory allocation flags (for bio_alloc)
- * @error_sector:      error sector
  *
  * Description:
- *    Issue a flush for the block device in question. Caller can supply
- *    room for storing the error offset in case of a flush error, if they
- *    wish to.
+ *    Issue a flush for the block device in question.
  */
-int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask,
-               sector_t *error_sector)
+int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask)
 {
        struct request_queue *q;
        struct bio *bio;
@@ -458,15 +454,6 @@ int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask,
        bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
 
        ret = submit_bio_wait(bio);
-
-       /*
-        * The driver must store the error location in ->bi_sector, if
-        * it supports it. For non-stacked drivers, this should be
-        * copied from blk_rq_pos(rq).
-        */
-       if (error_sector)
-               *error_sector = bio->bi_iter.bi_sector;
-
        bio_put(bio);
        return ret;
 }
index 4094c47eca7f233db0b8670ecf5a7bcaef4a031b..84cb04904fab7897f02121ec9720bf3f4eeae46a 100644 (file)
@@ -2657,7 +2657,7 @@ static void bitmap_flush_work(struct work_struct *work)
 
        dm_integrity_flush_buffers(ic);
        if (ic->meta_dev)
-               blkdev_issue_flush(ic->dev->bdev, GFP_NOIO, NULL);
+               blkdev_issue_flush(ic->dev->bdev, GFP_NOIO);
 
        limit = ic->provided_data_sectors;
        if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING)) {
index 369de15c4e80cc0b0245ded032b8d3789bb915be..bf224537030553f02fae15494a6e9257b852ec7d 100644 (file)
@@ -661,7 +661,7 @@ static int dmz_write_sb(struct dmz_metadata *zmd, unsigned int set)
 
        ret = dmz_rdwr_block(zmd, REQ_OP_WRITE, block, mblk->page);
        if (ret == 0)
-               ret = blkdev_issue_flush(zmd->dev->bdev, GFP_NOIO, NULL);
+               ret = blkdev_issue_flush(zmd->dev->bdev, GFP_NOIO);
 
        return ret;
 }
@@ -703,7 +703,7 @@ static int dmz_write_dirty_mblocks(struct dmz_metadata *zmd,
 
        /* Flush drive cache (this will also sync data) */
        if (ret == 0)
-               ret = blkdev_issue_flush(zmd->dev->bdev, GFP_NOIO, NULL);
+               ret = blkdev_issue_flush(zmd->dev->bdev, GFP_NOIO);
 
        return ret;
 }
@@ -772,7 +772,7 @@ int dmz_flush_metadata(struct dmz_metadata *zmd)
 
        /* If there are no dirty metadata blocks, just flush the device cache */
        if (list_empty(&write_list)) {
-               ret = blkdev_issue_flush(zmd->dev->bdev, GFP_NOIO, NULL);
+               ret = blkdev_issue_flush(zmd->dev->bdev, GFP_NOIO);
                goto err;
        }
 
index d50238d0a85db0bf29f9ef67cd7ef23c826d8724..a750f4bbb5d96f2f9ae1ed0dabc7163949b34ef6 100644 (file)
@@ -1037,7 +1037,7 @@ static int ppl_recover(struct ppl_log *log, struct ppl_header *pplhdr,
        }
 
        /* flush the disk cache after recovery if necessary */
-       ret = blkdev_issue_flush(rdev->bdev, GFP_KERNEL, NULL);
+       ret = blkdev_issue_flush(rdev->bdev, GFP_KERNEL);
 out:
        __free_page(page);
        return ret;
index ea0e596be15dc5a9c72380cf39dcc8678b88bfe6..26f50c23b82e98cb4d995545eaa58e39d776e862 100644 (file)
@@ -226,7 +226,7 @@ static void nvmet_bdev_execute_flush(struct nvmet_req *req)
 
 u16 nvmet_bdev_flush(struct nvmet_req *req)
 {
-       if (blkdev_issue_flush(req->ns->bdev, GFP_KERNEL, NULL))
+       if (blkdev_issue_flush(req->ns->bdev, GFP_KERNEL))
                return NVME_SC_INTERNAL | NVME_SC_DNR;
        return 0;
 }
index ebd1507789d297a08a64eb047183f561c1980c13..d1e08bba925a49c0f96320a4b0fda7b8d604d072 100644 (file)
@@ -672,7 +672,7 @@ int blkdev_fsync(struct file *filp, loff_t start, loff_t end, int datasync)
         * i_mutex and doing so causes performance issues with concurrent
         * O_SYNC writers to a block device.
         */
-       error = blkdev_issue_flush(bdev, GFP_KERNEL, NULL);
+       error = blkdev_issue_flush(bdev, GFP_KERNEL);
        if (error == -EOPNOTSUPP)
                error = 0;
 
index e10206e7f4bbe7f470fd83687ddd3c48322bc892..35ff9a56db6792093f3e98d619437407a2b1dc96 100644 (file)
@@ -176,7 +176,7 @@ int ext4_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
                ret = ext4_fsync_journal(inode, datasync, &needs_barrier);
 
        if (needs_barrier) {
-               err = blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL, NULL);
+               err = blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL);
                if (!ret)
                        ret = err;
        }
index 4b8c9a9bdf0c8c0e77a39aaa7e0bb653d4c64b40..499f08d8522e2479a5babd3da4f6e6f66c99cd7d 100644 (file)
@@ -1440,7 +1440,7 @@ int ext4_init_inode_table(struct super_block *sb, ext4_group_t group,
        if (ret < 0)
                goto err_out;
        if (barrier)
-               blkdev_issue_flush(sb->s_bdev, GFP_NOFS, NULL);
+               blkdev_issue_flush(sb->s_bdev, GFP_NOFS);
 
 skip_zeroout:
        ext4_lock_group(sb, group);
index bf5fcb477f667211f031c6f1cff42f54a1a5235d..629a56b5c859ffb3c64d7ff169a258c3e5ad6018 100644 (file)
@@ -5256,7 +5256,7 @@ static int ext4_sync_fs(struct super_block *sb, int wait)
                needs_barrier = true;
        if (needs_barrier) {
                int err;
-               err = blkdev_issue_flush(sb->s_bdev, GFP_KERNEL, NULL);
+               err = blkdev_issue_flush(sb->s_bdev, GFP_KERNEL);
                if (!ret)
                        ret = err;
        }
index bdc4503c00a3866c1d41d3e3eda3a7173458bae9..42134c58c87e19d1f6e77e888e3732bb4db10212 100644 (file)
@@ -195,7 +195,7 @@ int fat_file_fsync(struct file *filp, loff_t start, loff_t end, int datasync)
        if (err)
                return err;
 
-       return blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL, NULL);
+       return blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL);
 }
 
 
index 94bd83b366442d9ff3ce0a43878b57b0c0d2aa12..e3da9e96b83578d7f8989dab58a9a69a84ac6f92 100644 (file)
@@ -340,7 +340,7 @@ int hfsplus_file_fsync(struct file *file, loff_t start, loff_t end,
        }
 
        if (!test_bit(HFSPLUS_SB_NOBARRIER, &sbi->flags))
-               blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL, NULL);
+               blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL);
 
        inode_unlock(inode);
 
index 2b9e5743105e2d2bb8a121b238003d2d45080436..129dca3f4b78fea46480db5459f181639d9961b0 100644 (file)
@@ -239,7 +239,7 @@ out:
        mutex_unlock(&sbi->vh_mutex);
 
        if (!test_bit(HFSPLUS_SB_NOBARRIER, &sbi->flags))
-               blkdev_issue_flush(sb->s_bdev, GFP_KERNEL, NULL);
+               blkdev_issue_flush(sb->s_bdev, GFP_KERNEL);
 
        return error;
 }
index 96bf33986d030ae713e176cde36d810bb4aaf43d..263f02ad8ebf8bd7787316f78b570985b439db75 100644 (file)
@@ -414,7 +414,7 @@ int jbd2_cleanup_journal_tail(journal_t *journal)
         * jbd2_cleanup_journal_tail() doesn't get called all that often.
         */
        if (journal->j_flags & JBD2_BARRIER)
-               blkdev_issue_flush(journal->j_fs_dev, GFP_NOFS, NULL);
+               blkdev_issue_flush(journal->j_fs_dev, GFP_NOFS);
 
        return __jbd2_update_log_tail(journal, first_tid, blocknr);
 }
index e855d8260433a2c890f0cc30b10980717c1c2858..6d2da8ad0e6f51a8f2c1b1abf1455cd4654c92d9 100644 (file)
@@ -775,7 +775,7 @@ start_journal_io:
        if (commit_transaction->t_need_data_flush &&
            (journal->j_fs_dev != journal->j_dev) &&
            (journal->j_flags & JBD2_BARRIER))
-               blkdev_issue_flush(journal->j_fs_dev, GFP_NOFS, NULL);
+               blkdev_issue_flush(journal->j_fs_dev, GFP_NOFS);
 
        /* Done it all: now write the commit record asynchronously. */
        if (jbd2_has_feature_async_commit(journal)) {
@@ -882,7 +882,7 @@ start_journal_io:
        stats.run.rs_blocks_logged++;
        if (jbd2_has_feature_async_commit(journal) &&
            journal->j_flags & JBD2_BARRIER) {
-               blkdev_issue_flush(journal->j_dev, GFP_NOFS, NULL);
+               blkdev_issue_flush(journal->j_dev, GFP_NOFS);
        }
 
        if (err)
index a4967b27ffb636212ddd21234b1b2fc211a6038b..2ed278f0dcede6888110ca98930cb06d160f6957 100644 (file)
@@ -286,7 +286,7 @@ int jbd2_journal_recover(journal_t *journal)
                err = err2;
        /* Make sure all replayed data is on permanent storage */
        if (journal->j_flags & JBD2_BARRIER) {
-               err2 = blkdev_issue_flush(journal->j_fs_dev, GFP_KERNEL, NULL);
+               err2 = blkdev_issue_flush(journal->j_fs_dev, GFP_KERNEL);
                if (!err)
                        err = err2;
        }
index 3759fbacf5222c8ca724f4ec1c80f8dde74cccb1..4d08edf19c78252ebe3af999bf60e19823d3b8f2 100644 (file)
@@ -1113,7 +1113,7 @@ int generic_file_fsync(struct file *file, loff_t start, loff_t end,
        err = __generic_file_fsync(file, start, end, datasync);
        if (err)
                return err;
-       return blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL, NULL);
+       return blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL);
 }
 EXPORT_SYMBOL(generic_file_fsync);
 
index 380a543c5b19bd424782e8393ecc285af648396e..b55cdeb4d16991cd463598b4f4e687533016934d 100644 (file)
@@ -375,7 +375,7 @@ static inline int nilfs_flush_device(struct the_nilfs *nilfs)
         */
        smp_wmb();
 
-       err = blkdev_issue_flush(nilfs->ns_bdev, GFP_KERNEL, NULL);
+       err = blkdev_issue_flush(nilfs->ns_bdev, GFP_KERNEL);
        if (err != -EIO)
                err = 0;
        return err;
index 6cd5e4924e4d2d49ff2853713a32830d9a74002b..85979e2214b39d81209168680885fcd317410237 100644 (file)
@@ -194,7 +194,7 @@ static int ocfs2_sync_file(struct file *file, loff_t start, loff_t end,
                needs_barrier = true;
        err = jbd2_complete_transaction(journal, commit_tid);
        if (needs_barrier) {
-               ret = blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL, NULL);
+               ret = blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL);
                if (!err)
                        err = ret;
        }
index 84cf8bdbec9cb53de806930d58e49402a82a4bd5..0b641ae694f123dd706446a074532cb945b44633 100644 (file)
@@ -159,7 +159,7 @@ static int reiserfs_sync_file(struct file *filp, loff_t start, loff_t end,
        barrier_done = reiserfs_commit_for_inode(inode);
        reiserfs_write_unlock(inode->i_sb);
        if (barrier_done != 1 && reiserfs_barrier_flush(inode->i_sb))
-               blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL, NULL);
+               blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL);
        inode_unlock(inode);
        if (barrier_done < 0)
                return barrier_done;
index 424bb9a2d53256b2d65ef3f6291ec6bbd53b642c..a123cd8267d980a66b8151880df771ff7486829b 100644 (file)
@@ -305,7 +305,7 @@ void
 xfs_blkdev_issue_flush(
        xfs_buftarg_t           *buftarg)
 {
-       blkdev_issue_flush(buftarg->bt_bdev, GFP_NOFS, NULL);
+       blkdev_issue_flush(buftarg->bt_bdev, GFP_NOFS);
 }
 
 STATIC void
index 0bf7009f50a2797e60cbe68e2fbeb7ff8956e98f..25afcf55aa41e8d468da688caee9524f82b8bc5f 100644 (file)
@@ -479,7 +479,7 @@ static int zonefs_file_fsync(struct file *file, loff_t start, loff_t end,
        if (ZONEFS_I(inode)->i_ztype == ZONEFS_ZTYPE_CNV)
                ret = file_write_and_wait_range(file, start, end);
        if (!ret)
-               ret = blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL, NULL);
+               ret = blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL);
 
        if (ret)
                zonefs_io_error(inode, true);
index 2b33166b9daf11dcf7414329805dedbe17d63888..7d10f4e632325d9aa2318543e13fe63fb899830f 100644 (file)
@@ -1233,7 +1233,7 @@ static inline bool blk_needs_flush_plug(struct task_struct *tsk)
 
 extern void blk_io_schedule(void);
 
-extern int blkdev_issue_flush(struct block_device *, gfp_t, sector_t *);
+int blkdev_issue_flush(struct block_device *, gfp_t);
 extern int blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
                sector_t nr_sects, gfp_t gfp_mask, struct page *page);
 
@@ -1872,8 +1872,7 @@ static inline bool blk_needs_flush_plug(struct task_struct *tsk)
        return false;
 }
 
-static inline int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask,
-                                    sector_t *error_sector)
+static inline int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask)
 {
        return 0;
 }