void nvme_requeue_req(struct request *req)
 {
-       unsigned long flags;
-
-       blk_mq_requeue_request(req, false);
-       spin_lock_irqsave(req->q->queue_lock, flags);
-       if (!blk_queue_stopped(req->q))
-               blk_mq_kick_requeue_list(req->q);
-       spin_unlock_irqrestore(req->q->queue_lock, flags);
+       blk_mq_requeue_request(req, !blk_mq_queue_stopped(req->q));
 }
 EXPORT_SYMBOL_GPL(nvme_requeue_req);
 
        struct nvme_ns *ns;
 
        mutex_lock(&ctrl->namespaces_mutex);
-       list_for_each_entry(ns, &ctrl->namespaces, list) {
-               spin_lock_irq(ns->queue->queue_lock);
-               queue_flag_set(QUEUE_FLAG_STOPPED, ns->queue);
-               spin_unlock_irq(ns->queue->queue_lock);
-
+       list_for_each_entry(ns, &ctrl->namespaces, list)
                blk_mq_quiesce_queue(ns->queue);
-       }
        mutex_unlock(&ctrl->namespaces_mutex);
 }
 EXPORT_SYMBOL_GPL(nvme_stop_queues);
 
        mutex_lock(&ctrl->namespaces_mutex);
        list_for_each_entry(ns, &ctrl->namespaces, list) {
-               queue_flag_clear_unlocked(QUEUE_FLAG_STOPPED, ns->queue);
                blk_mq_start_stopped_hw_queues(ns->queue, true);
                blk_mq_kick_requeue_list(ns->queue);
        }