scsi: block: Remove RQF_PREEMPT and BLK_MQ_REQ_PREEMPT
authorBart Van Assche <bvanassche@acm.org>
Wed, 9 Dec 2020 05:29:50 +0000 (21:29 -0800)
committerMartin K. Petersen <martin.petersen@oracle.com>
Wed, 9 Dec 2020 16:41:42 +0000 (11:41 -0500)
Remove flag RQF_PREEMPT and BLK_MQ_REQ_PREEMPT since these are no longer
used by any kernel code.

Link: https://lore.kernel.org/r/20201209052951.16136-8-bvanassche@acm.org
Cc: Can Guo <cang@codeaurora.org>
Cc: Stanley Chu <stanley.chu@mediatek.com>
Cc: Alan Stern <stern@rowland.harvard.edu>
Cc: Ming Lei <ming.lei@redhat.com>
Cc: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Cc: Martin Kepplinger <martin.kepplinger@puri.sm>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Hannes Reinecke <hare@suse.de>
Reviewed-by: Jens Axboe <axboe@kernel.dk>
Reviewed-by: Can Guo <cang@codeaurora.org>
Signed-off-by: Bart Van Assche <bvanassche@acm.org>
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
block/blk-core.c
block/blk-mq-debugfs.c
block/blk-mq.c
include/linux/blk-mq.h
include/linux/blkdev.h

index 10696f9fb6ac6649f72fb1e53069f81421b58ce3..a00bce9f46d88fb27013ffc2957ae87bd597d82c 100644 (file)
@@ -424,11 +424,11 @@ EXPORT_SYMBOL(blk_cleanup_queue);
 /**
  * blk_queue_enter() - try to increase q->q_usage_counter
  * @q: request queue pointer
- * @flags: BLK_MQ_REQ_NOWAIT, BLK_MQ_REQ_PM and/or BLK_MQ_REQ_PREEMPT
+ * @flags: BLK_MQ_REQ_NOWAIT and/or BLK_MQ_REQ_PM
  */
 int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags)
 {
-       const bool pm = flags & (BLK_MQ_REQ_PM | BLK_MQ_REQ_PREEMPT);
+       const bool pm = flags & BLK_MQ_REQ_PM;
 
        while (true) {
                bool success = false;
@@ -630,8 +630,7 @@ struct request *blk_get_request(struct request_queue *q, unsigned int op,
        struct request *req;
 
        WARN_ON_ONCE(op & REQ_NOWAIT);
-       WARN_ON_ONCE(flags & ~(BLK_MQ_REQ_NOWAIT | BLK_MQ_REQ_PM |
-                              BLK_MQ_REQ_PREEMPT));
+       WARN_ON_ONCE(flags & ~(BLK_MQ_REQ_NOWAIT | BLK_MQ_REQ_PM));
 
        req = blk_mq_alloc_request(q, op, flags);
        if (!IS_ERR(req) && q->mq_ops->initialize_rq_fn)
index 3094542e12ae0fa468f242a897b1d08b35fbe83f..9336a6f8d6efd55acbe4277c3fcb30e3042bddf6 100644 (file)
@@ -297,7 +297,6 @@ static const char *const rqf_name[] = {
        RQF_NAME(MIXED_MERGE),
        RQF_NAME(MQ_INFLIGHT),
        RQF_NAME(DONTPREP),
-       RQF_NAME(PREEMPT),
        RQF_NAME(FAILED),
        RQF_NAME(QUIET),
        RQF_NAME(ELVPRIV),
index b5880a1fb38d91573ac68107311678e56b83003e..d50504888b682d972e1a5fbadda89b848aeece95 100644 (file)
@@ -294,8 +294,6 @@ static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data,
        rq->cmd_flags = data->cmd_flags;
        if (data->flags & BLK_MQ_REQ_PM)
                rq->rq_flags |= RQF_PM;
-       if (data->flags & BLK_MQ_REQ_PREEMPT)
-               rq->rq_flags |= RQF_PREEMPT;
        if (blk_queue_io_stat(data->q))
                rq->rq_flags |= RQF_IO_STAT;
        INIT_LIST_HEAD(&rq->queuelist);
index c00e856c6fb1e1b84a4a71196276671f6e8112c6..88af1df9430816aed5a1cee5f458340eb5738a5e 100644 (file)
@@ -446,8 +446,6 @@ enum {
        BLK_MQ_REQ_RESERVED     = (__force blk_mq_req_flags_t)(1 << 1),
        /* set RQF_PM */
        BLK_MQ_REQ_PM           = (__force blk_mq_req_flags_t)(1 << 2),
-       /* set RQF_PREEMPT */
-       BLK_MQ_REQ_PREEMPT      = (__force blk_mq_req_flags_t)(1 << 3),
 };
 
 struct request *blk_mq_alloc_request(struct request_queue *q, unsigned int op,
index 639cae2c158b5973b23f92f25974f80bca757f64..7d4b746f7e6af2bd8fd765273db7d224d799e841 100644 (file)
@@ -79,9 +79,6 @@ typedef __u32 __bitwise req_flags_t;
 #define RQF_MQ_INFLIGHT                ((__force req_flags_t)(1 << 6))
 /* don't call prep for this one */
 #define RQF_DONTPREP           ((__force req_flags_t)(1 << 7))
-/* set for "ide_preempt" requests and also for requests for which the SCSI
-   "quiesce" state must be ignored. */
-#define RQF_PREEMPT            ((__force req_flags_t)(1 << 8))
 /* vaguely specified driver internal error.  Ignored by the block layer */
 #define RQF_FAILED             ((__force req_flags_t)(1 << 10))
 /* don't warn about errors */
@@ -430,8 +427,7 @@ struct request_queue {
        unsigned long           queue_flags;
        /*
         * Number of contexts that have called blk_set_pm_only(). If this
-        * counter is above zero then only RQF_PM and RQF_PREEMPT requests are
-        * processed.
+        * counter is above zero then only RQF_PM requests are processed.
         */
        atomic_t                pm_only;