/*
         * Only new queue scan work when admin and IO queues are both alive
         */
-       if (ctrl->state == NVME_CTRL_LIVE && ctrl->tagset)
+       if (nvme_ctrl_state(ctrl) == NVME_CTRL_LIVE && ctrl->tagset)
                queue_work(nvme_wq, &ctrl->scan_work);
 }
 
  */
 int nvme_try_sched_reset(struct nvme_ctrl *ctrl)
 {
-       if (ctrl->state != NVME_CTRL_RESETTING)
+       if (nvme_ctrl_state(ctrl) != NVME_CTRL_RESETTING)
                return -EBUSY;
        if (!queue_work(nvme_reset_wq, &ctrl->reset_work))
                return -EBUSY;
        struct nvme_ctrl *ctrl = container_of(to_delayed_work(work),
                        struct nvme_ctrl, failfast_work);
 
-       if (ctrl->state != NVME_CTRL_CONNECTING)
+       if (nvme_ctrl_state(ctrl) != NVME_CTRL_CONNECTING)
                return;
 
        set_bit(NVME_CTRL_FAILFAST_EXPIRED, &ctrl->flags);
        ret = nvme_reset_ctrl(ctrl);
        if (!ret) {
                flush_work(&ctrl->reset_work);
-               if (ctrl->state != NVME_CTRL_LIVE)
+               if (nvme_ctrl_state(ctrl) != NVME_CTRL_LIVE)
                        ret = -ENETRESET;
        }
 
 
        spin_lock_irqsave(&ctrl->lock, flags);
 
-       old_state = ctrl->state;
+       old_state = nvme_ctrl_state(ctrl);
        switch (new_state) {
        case NVME_CTRL_LIVE:
                switch (old_state) {
        }
 
        if (changed) {
-               ctrl->state = new_state;
+               WRITE_ONCE(ctrl->state, new_state);
                wake_up_all(&ctrl->state_wq);
        }
 
        if (!changed)
                return false;
 
-       if (ctrl->state == NVME_CTRL_LIVE) {
+       if (new_state == NVME_CTRL_LIVE) {
                if (old_state == NVME_CTRL_CONNECTING)
                        nvme_stop_failfast_work(ctrl);
                nvme_kick_requeue_lists(ctrl);
-       } else if (ctrl->state == NVME_CTRL_CONNECTING &&
+       } else if (new_state == NVME_CTRL_CONNECTING &&
                old_state == NVME_CTRL_RESETTING) {
                nvme_start_failfast_work(ctrl);
        }
  */
 static bool nvme_state_terminal(struct nvme_ctrl *ctrl)
 {
-       switch (ctrl->state) {
+       switch (nvme_ctrl_state(ctrl)) {
        case NVME_CTRL_NEW:
        case NVME_CTRL_LIVE:
        case NVME_CTRL_RESETTING:
        wait_event(ctrl->state_wq,
                   nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING) ||
                   nvme_state_terminal(ctrl));
-       return ctrl->state == NVME_CTRL_RESETTING;
+       return nvme_ctrl_state(ctrl) == NVME_CTRL_RESETTING;
 }
 EXPORT_SYMBOL_GPL(nvme_wait_reset);
 
 blk_status_t nvme_fail_nonready_command(struct nvme_ctrl *ctrl,
                struct request *rq)
 {
-       if (ctrl->state != NVME_CTRL_DELETING_NOIO &&
-           ctrl->state != NVME_CTRL_DELETING &&
-           ctrl->state != NVME_CTRL_DEAD &&
+       enum nvme_ctrl_state state = nvme_ctrl_state(ctrl);
+
+       if (state != NVME_CTRL_DELETING_NOIO &&
+           state != NVME_CTRL_DELETING &&
+           state != NVME_CTRL_DEAD &&
            !test_bit(NVME_CTRL_FAILFAST_EXPIRED, &ctrl->flags) &&
            !blk_noretry_request(rq) && !(rq->cmd_flags & REQ_NVME_MPATH))
                return BLK_STS_RESOURCE;
                 * command, which is require to set the queue live in the
                 * appropinquate states.
                 */
-               switch (ctrl->state) {
+               switch (nvme_ctrl_state(ctrl)) {
                case NVME_CTRL_CONNECTING:
                        if (blk_rq_is_passthrough(rq) && nvme_is_fabrics(req->cmd) &&
                            (req->cmd->fabrics.fctype == nvme_fabrics_type_connect ||
 
        if (ctrl->ps_max_latency_us != latency) {
                ctrl->ps_max_latency_us = latency;
-               if (ctrl->state == NVME_CTRL_LIVE)
+               if (nvme_ctrl_state(ctrl) == NVME_CTRL_LIVE)
                        nvme_configure_apst(ctrl);
        }
 }
        struct nvme_ctrl *ctrl =
                container_of(inode->i_cdev, struct nvme_ctrl, cdev);
 
-       switch (ctrl->state) {
+       switch (nvme_ctrl_state(ctrl)) {
        case NVME_CTRL_LIVE:
                break;
        default:
        int ret;
 
        /* No tagset on a live ctrl means IO queues could not created */
-       if (ctrl->state != NVME_CTRL_LIVE || !ctrl->tagset)
+       if (nvme_ctrl_state(ctrl) != NVME_CTRL_LIVE || !ctrl->tagset)
                return;
 
        /*
         * removing the namespaces' disks; fail all the queues now to avoid
         * potentially having to clean up the failed sync later.
         */
-       if (ctrl->state == NVME_CTRL_DEAD)
+       if (nvme_ctrl_state(ctrl) == NVME_CTRL_DEAD)
                nvme_mark_namespaces_dead(ctrl);
 
        /* this is a no-op when called from the controller reset handler */
         * flushing ctrl async_event_work after changing the controller state
         * from LIVE and before freeing the admin queue.
        */
-       if (ctrl->state == NVME_CTRL_LIVE)
+       if (nvme_ctrl_state(ctrl) == NVME_CTRL_LIVE)
                ctrl->ops->submit_async_event(ctrl);
 }
 
 {
        int ret;
 
-       ctrl->state = NVME_CTRL_NEW;
+       WRITE_ONCE(ctrl->state, NVME_CTRL_NEW);
        clear_bit(NVME_CTRL_FAILFAST_EXPIRED, &ctrl->flags);
        spin_lock_init(&ctrl->lock);
        mutex_init(&ctrl->scan_lock);
 
 static void
 nvme_fc_resume_controller(struct nvme_fc_ctrl *ctrl)
 {
-       switch (ctrl->ctrl.state) {
+       switch (nvme_ctrl_state(&ctrl->ctrl)) {
        case NVME_CTRL_NEW:
        case NVME_CTRL_CONNECTING:
                /*
                "NVME-FC{%d}: controller connectivity lost. Awaiting "
                "Reconnect", ctrl->cnum);
 
-       switch (ctrl->ctrl.state) {
+       switch (nvme_ctrl_state(&ctrl->ctrl)) {
        case NVME_CTRL_NEW:
        case NVME_CTRL_LIVE:
                /*
        unsigned long recon_delay = ctrl->ctrl.opts->reconnect_delay * HZ;
        bool recon = true;
 
-       if (ctrl->ctrl.state != NVME_CTRL_CONNECTING)
+       if (nvme_ctrl_state(&ctrl->ctrl) != NVME_CTRL_CONNECTING)
                return;
 
        if (portptr->port_state == FC_OBJSTATE_ONLINE) {
 
        bool nssro = dev->subsystem && (csts & NVME_CSTS_NSSRO);
 
        /* If there is a reset/reinit ongoing, we shouldn't reset again. */
-       switch (dev->ctrl.state) {
+       switch (nvme_ctrl_state(&dev->ctrl)) {
        case NVME_CTRL_RESETTING:
        case NVME_CTRL_CONNECTING:
                return false;
         * cancellation error. All outstanding requests are completed on
         * shutdown, so we return BLK_EH_DONE.
         */
-       switch (dev->ctrl.state) {
+       switch (nvme_ctrl_state(&dev->ctrl)) {
        case NVME_CTRL_CONNECTING:
                nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DELETING);
                fallthrough;
        /*
         * Controller is in wrong state, fail early.
         */
-       if (dev->ctrl.state != NVME_CTRL_CONNECTING) {
+       if (nvme_ctrl_state(&dev->ctrl) != NVME_CTRL_CONNECTING) {
                mutex_unlock(&dev->shutdown_lock);
                return -ENODEV;
        }
 
 static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown)
 {
+       enum nvme_ctrl_state state = nvme_ctrl_state(&dev->ctrl);
        struct pci_dev *pdev = to_pci_dev(dev->dev);
        bool dead;
 
        mutex_lock(&dev->shutdown_lock);
        dead = nvme_pci_ctrl_is_dead(dev);
-       if (dev->ctrl.state == NVME_CTRL_LIVE ||
-           dev->ctrl.state == NVME_CTRL_RESETTING) {
+       if (state == NVME_CTRL_LIVE || state == NVME_CTRL_RESETTING) {
                if (pci_is_enabled(pdev))
                        nvme_start_freeze(&dev->ctrl);
                /*
        bool was_suspend = !!(dev->ctrl.ctrl_config & NVME_CC_SHN_NORMAL);
        int result;
 
-       if (dev->ctrl.state != NVME_CTRL_RESETTING) {
+       if (nvme_ctrl_state(&dev->ctrl) != NVME_CTRL_RESETTING) {
                dev_warn(dev->ctrl.device, "ctrl state %d is not RESETTING\n",
                         dev->ctrl.state);
                result = -ENODEV;
        nvme_wait_freeze(ctrl);
        nvme_sync_queues(ctrl);
 
-       if (ctrl->state != NVME_CTRL_LIVE)
+       if (nvme_ctrl_state(ctrl) != NVME_CTRL_LIVE)
                goto unfreeze;
 
        /*
 
 
 static void nvme_rdma_reconnect_or_remove(struct nvme_rdma_ctrl *ctrl)
 {
+       enum nvme_ctrl_state state = nvme_ctrl_state(&ctrl->ctrl);
+
        /* If we are resetting/deleting then do nothing */
-       if (ctrl->ctrl.state != NVME_CTRL_CONNECTING) {
-               WARN_ON_ONCE(ctrl->ctrl.state == NVME_CTRL_NEW ||
-                       ctrl->ctrl.state == NVME_CTRL_LIVE);
+       if (state != NVME_CTRL_CONNECTING) {
+               WARN_ON_ONCE(state == NVME_CTRL_NEW || state == NVME_CTRL_LIVE);
                return;
        }
 
                 * unless we're during creation of a new controller to
                 * avoid races with teardown flow.
                 */
-               WARN_ON_ONCE(ctrl->ctrl.state != NVME_CTRL_DELETING &&
-                            ctrl->ctrl.state != NVME_CTRL_DELETING_NOIO);
+               enum nvme_ctrl_state state = nvme_ctrl_state(&ctrl->ctrl);
+
+               WARN_ON_ONCE(state != NVME_CTRL_DELETING &&
+                            state != NVME_CTRL_DELETING_NOIO);
                WARN_ON_ONCE(new);
                ret = -EINVAL;
                goto destroy_io;
 
        if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) {
                /* state change failure is ok if we started ctrl delete */
-               WARN_ON_ONCE(ctrl->ctrl.state != NVME_CTRL_DELETING &&
-                            ctrl->ctrl.state != NVME_CTRL_DELETING_NOIO);
+               enum nvme_ctrl_state state = nvme_ctrl_state(&ctrl->ctrl);
+
+               WARN_ON_ONCE(state != NVME_CTRL_DELETING &&
+                            state != NVME_CTRL_DELETING_NOIO);
                return;
        }
 
        struct nvme_rdma_queue *queue = wc->qp->qp_context;
        struct nvme_rdma_ctrl *ctrl = queue->ctrl;
 
-       if (ctrl->ctrl.state == NVME_CTRL_LIVE)
+       if (nvme_ctrl_state(&ctrl->ctrl) == NVME_CTRL_LIVE)
                dev_info(ctrl->ctrl.device,
                             "%s for CQE 0x%p failed with status %s (%d)\n",
                             op, wc->wr_cqe,
        dev_warn(ctrl->ctrl.device, "I/O %d QID %d timeout\n",
                 rq->tag, nvme_rdma_queue_idx(queue));
 
-       if (ctrl->ctrl.state != NVME_CTRL_LIVE) {
+       if (nvme_ctrl_state(&ctrl->ctrl) != NVME_CTRL_LIVE) {
                /*
                 * If we are resetting, connecting or deleting we should
                 * complete immediately because we may block controller
 
 
 static void nvme_tcp_reconnect_or_remove(struct nvme_ctrl *ctrl)
 {
+       enum nvme_ctrl_state state = nvme_ctrl_state(ctrl);
+
        /* If we are resetting/deleting then do nothing */
-       if (ctrl->state != NVME_CTRL_CONNECTING) {
-               WARN_ON_ONCE(ctrl->state == NVME_CTRL_NEW ||
-                       ctrl->state == NVME_CTRL_LIVE);
+       if (state != NVME_CTRL_CONNECTING) {
+               WARN_ON_ONCE(state == NVME_CTRL_NEW || state == NVME_CTRL_LIVE);
                return;
        }
 
                 * unless we're during creation of a new controller to
                 * avoid races with teardown flow.
                 */
-               WARN_ON_ONCE(ctrl->state != NVME_CTRL_DELETING &&
-                            ctrl->state != NVME_CTRL_DELETING_NOIO);
+               enum nvme_ctrl_state state = nvme_ctrl_state(ctrl);
+
+               WARN_ON_ONCE(state != NVME_CTRL_DELETING &&
+                            state != NVME_CTRL_DELETING_NOIO);
                WARN_ON_ONCE(new);
                ret = -EINVAL;
                goto destroy_io;
 
        if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_CONNECTING)) {
                /* state change failure is ok if we started ctrl delete */
-               WARN_ON_ONCE(ctrl->state != NVME_CTRL_DELETING &&
-                            ctrl->state != NVME_CTRL_DELETING_NOIO);
+               enum nvme_ctrl_state state = nvme_ctrl_state(ctrl);
+
+               WARN_ON_ONCE(state != NVME_CTRL_DELETING &&
+                            state != NVME_CTRL_DELETING_NOIO);
                return;
        }
 
 
        if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_CONNECTING)) {
                /* state change failure is ok if we started ctrl delete */
-               WARN_ON_ONCE(ctrl->state != NVME_CTRL_DELETING &&
-                            ctrl->state != NVME_CTRL_DELETING_NOIO);
+               enum nvme_ctrl_state state = nvme_ctrl_state(ctrl);
+
+               WARN_ON_ONCE(state != NVME_CTRL_DELETING &&
+                            state != NVME_CTRL_DELETING_NOIO);
                return;
        }
 
                nvme_tcp_queue_id(req->queue), nvme_cid(rq), pdu->hdr.type,
                opc, nvme_opcode_str(qid, opc, fctype));
 
-       if (ctrl->state != NVME_CTRL_LIVE) {
+       if (nvme_ctrl_state(ctrl) != NVME_CTRL_LIVE) {
                /*
                 * If we are resetting, connecting or deleting we should
                 * complete immediately because we may block controller