block: move blk_rq_err_bytes to scsi
authorChristoph Hellwig <hch@lst.de>
Wed, 17 Nov 2021 06:13:54 +0000 (07:13 +0100)
committerJens Axboe <axboe@kernel.dk>
Mon, 29 Nov 2021 13:34:50 +0000 (06:34 -0700)
blk_rq_err_bytes is only used by the scsi midlayer, so move it there.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Johannes Thumshirn <johannes.thumshirn@wdc.com>
Link: https://lore.kernel.org/r/20211117061404.331732-2-hch@lst.de
Signed-off-by: Jens Axboe <axboe@kernel.dk>
block/blk-core.c
drivers/scsi/scsi_lib.c
include/linux/blk-mq.h

index 1378d084c770f6641a911caf8d98d302026fa689..682b112f513faa502446b9ce74e6c7de883f52b7 100644 (file)
@@ -1176,47 +1176,6 @@ blk_status_t blk_insert_cloned_request(struct request_queue *q, struct request *
 }
 EXPORT_SYMBOL_GPL(blk_insert_cloned_request);
 
-/**
- * blk_rq_err_bytes - determine number of bytes till the next failure boundary
- * @rq: request to examine
- *
- * Description:
- *     A request could be merge of IOs which require different failure
- *     handling.  This function determines the number of bytes which
- *     can be failed from the beginning of the request without
- *     crossing into area which need to be retried further.
- *
- * Return:
- *     The number of bytes to fail.
- */
-unsigned int blk_rq_err_bytes(const struct request *rq)
-{
-       unsigned int ff = rq->cmd_flags & REQ_FAILFAST_MASK;
-       unsigned int bytes = 0;
-       struct bio *bio;
-
-       if (!(rq->rq_flags & RQF_MIXED_MERGE))
-               return blk_rq_bytes(rq);
-
-       /*
-        * Currently the only 'mixing' which can happen is between
-        * different fastfail types.  We can safely fail portions
-        * which have all the failfast bits that the first one has -
-        * the ones which are at least as eager to fail as the first
-        * one.
-        */
-       for (bio = rq->bio; bio; bio = bio->bi_next) {
-               if ((bio->bi_opf & ff) != ff)
-                       break;
-               bytes += bio->bi_iter.bi_size;
-       }
-
-       /* this could lead to infinite loop */
-       BUG_ON(blk_rq_bytes(rq) && !bytes);
-       return bytes;
-}
-EXPORT_SYMBOL_GPL(blk_rq_err_bytes);
-
 static void update_io_ticks(struct block_device *part, unsigned long now,
                bool end)
 {
index 621d841d819a379b8fac65a86a6c59267bedf4cd..5e8b5ecb3245a25a56841bf2d152926ecf394ff6 100644 (file)
@@ -617,6 +617,46 @@ static blk_status_t scsi_result_to_blk_status(struct scsi_cmnd *cmd, int result)
        }
 }
 
+/**
+ * scsi_rq_err_bytes - determine number of bytes till the next failure boundary
+ * @rq: request to examine
+ *
+ * Description:
+ *     A request could be merge of IOs which require different failure
+ *     handling.  This function determines the number of bytes which
+ *     can be failed from the beginning of the request without
+ *     crossing into area which need to be retried further.
+ *
+ * Return:
+ *     The number of bytes to fail.
+ */
+static unsigned int scsi_rq_err_bytes(const struct request *rq)
+{
+       unsigned int ff = rq->cmd_flags & REQ_FAILFAST_MASK;
+       unsigned int bytes = 0;
+       struct bio *bio;
+
+       if (!(rq->rq_flags & RQF_MIXED_MERGE))
+               return blk_rq_bytes(rq);
+
+       /*
+        * Currently the only 'mixing' which can happen is between
+        * different fastfail types.  We can safely fail portions
+        * which have all the failfast bits that the first one has -
+        * the ones which are at least as eager to fail as the first
+        * one.
+        */
+       for (bio = rq->bio; bio; bio = bio->bi_next) {
+               if ((bio->bi_opf & ff) != ff)
+                       break;
+               bytes += bio->bi_iter.bi_size;
+       }
+
+       /* this could lead to infinite loop */
+       BUG_ON(blk_rq_bytes(rq) && !bytes);
+       return bytes;
+}
+
 /* Helper for scsi_io_completion() when "reprep" action required. */
 static void scsi_io_completion_reprep(struct scsi_cmnd *cmd,
                                      struct request_queue *q)
@@ -794,7 +834,7 @@ static void scsi_io_completion_action(struct scsi_cmnd *cmd, int result)
                                scsi_print_command(cmd);
                        }
                }
-               if (!scsi_end_request(req, blk_stat, blk_rq_err_bytes(req)))
+               if (!scsi_end_request(req, blk_stat, scsi_rq_err_bytes(req)))
                        return;
                fallthrough;
        case ACTION_REPREP:
index 2949d9ac74849cf80386e450db9bed4295462b09..a78d9a0f2a1be1aee5a6a2490a99663786bd9ac9 100644 (file)
@@ -947,7 +947,6 @@ struct req_iterator {
  * blk_rq_pos()                        : the current sector
  * blk_rq_bytes()              : bytes left in the entire request
  * blk_rq_cur_bytes()          : bytes left in the current segment
- * blk_rq_err_bytes()          : bytes left till the next error boundary
  * blk_rq_sectors()            : sectors left in the entire request
  * blk_rq_cur_sectors()                : sectors left in the current segment
  * blk_rq_stats_sectors()      : sectors of the entire request used for stats
@@ -971,8 +970,6 @@ static inline int blk_rq_cur_bytes(const struct request *rq)
        return bio_iovec(rq->bio).bv_len;
 }
 
-unsigned int blk_rq_err_bytes(const struct request *rq);
-
 static inline unsigned int blk_rq_sectors(const struct request *rq)
 {
        return blk_rq_bytes(rq) >> SECTOR_SHIFT;