u64 data_units_read = 0, data_units_written = 0;
        struct nvmet_ns *ns;
        struct nvmet_ctrl *ctrl;
+       unsigned long idx;
 
        ctrl = req->sq->ctrl;
-
-       rcu_read_lock();
-       list_for_each_entry_rcu(ns, &ctrl->subsys->namespaces, dev_link) {
+       xa_for_each(&ctrl->subsys->namespaces, idx, ns) {
                /* we don't have the right data for file backed ns */
                if (!ns->bdev)
                        continue;
                host_writes += part_stat_read(ns->bdev->bd_part, ios[WRITE]);
                data_units_written += DIV_ROUND_UP(
                        part_stat_read(ns->bdev->bd_part, sectors[WRITE]), 1000);
-
        }
-       rcu_read_unlock();
 
        put_unaligned_le64(host_reads, &slog->host_reads[0]);
        put_unaligned_le64(data_units_read, &slog->data_units_read[0]);
 {
        struct nvmet_ctrl *ctrl = req->sq->ctrl;
        struct nvmet_ns *ns;
+       unsigned long idx;
        u32 count = 0;
 
        if (!(req->cmd->get_log_page.lsp & NVME_ANA_LOG_RGO)) {
-               rcu_read_lock();
-               list_for_each_entry_rcu(ns, &ctrl->subsys->namespaces, dev_link)
+               xa_for_each(&ctrl->subsys->namespaces, idx, ns)
                        if (ns->anagrpid == grpid)
                                desc->nsids[count++] = cpu_to_le32(ns->nsid);
-               rcu_read_unlock();
        }
 
        desc->grpid = cpu_to_le32(grpid);
        static const int buf_size = NVME_IDENTIFY_DATA_SIZE;
        struct nvmet_ctrl *ctrl = req->sq->ctrl;
        struct nvmet_ns *ns;
+       unsigned long idx;
        u32 min_nsid = le32_to_cpu(req->cmd->identify.nsid);
        __le32 *list;
        u16 status = 0;
                goto out;
        }
 
-       rcu_read_lock();
-       list_for_each_entry_rcu(ns, &ctrl->subsys->namespaces, dev_link) {
+       xa_for_each(&ctrl->subsys->namespaces, idx, ns) {
                if (ns->nsid <= min_nsid)
                        continue;
                list[i++] = cpu_to_le32(ns->nsid);
                if (i == buf_size / sizeof(__le32))
                        break;
        }
-       rcu_read_unlock();
 
        status = nvmet_copy_to_sgl(req, 0, list, buf_size);
 
 
 
 static unsigned int nvmet_max_nsid(struct nvmet_subsys *subsys)
 {
-       struct nvmet_ns *ns;
+       unsigned long nsid = 0;
+       struct nvmet_ns *cur;
+       unsigned long idx;
 
-       if (list_empty(&subsys->namespaces))
-               return 0;
+       xa_for_each(&subsys->namespaces, idx, cur)
+               nsid = cur->nsid;
 
-       ns = list_last_entry(&subsys->namespaces, struct nvmet_ns, dev_link);
-       return ns->nsid;
+       return nsid;
 }
 
 static u32 nvmet_async_event_result(struct nvmet_async_event *aen)
        cancel_delayed_work_sync(&ctrl->ka_work);
 }
 
-static struct nvmet_ns *__nvmet_find_namespace(struct nvmet_ctrl *ctrl,
-               __le32 nsid)
-{
-       struct nvmet_ns *ns;
-
-       list_for_each_entry_rcu(ns, &ctrl->subsys->namespaces, dev_link) {
-               if (ns->nsid == le32_to_cpu(nsid))
-                       return ns;
-       }
-
-       return NULL;
-}
-
 struct nvmet_ns *nvmet_find_namespace(struct nvmet_ctrl *ctrl, __le32 nsid)
 {
        struct nvmet_ns *ns;
 
-       rcu_read_lock();
-       ns = __nvmet_find_namespace(ctrl, nsid);
+       ns = xa_load(&ctrl->subsys->namespaces, le32_to_cpu(nsid));
        if (ns)
                percpu_ref_get(&ns->ref);
-       rcu_read_unlock();
 
        return ns;
 }
        if (ns->nsid > subsys->max_nsid)
                subsys->max_nsid = ns->nsid;
 
-       /*
-        * The namespaces list needs to be sorted to simplify the implementation
-        * of the Identify Namepace List subcommand.
-        */
-       if (list_empty(&subsys->namespaces)) {
-               list_add_tail_rcu(&ns->dev_link, &subsys->namespaces);
-       } else {
-               struct nvmet_ns *old;
-
-               list_for_each_entry_rcu(old, &subsys->namespaces, dev_link,
-                                       lockdep_is_held(&subsys->lock)) {
-                       BUG_ON(ns->nsid == old->nsid);
-                       if (ns->nsid < old->nsid)
-                               break;
-               }
+       ret = xa_insert(&subsys->namespaces, ns->nsid, ns, GFP_KERNEL);
+       if (ret)
+               goto out_restore_subsys_maxnsid;
 
-               list_add_tail_rcu(&ns->dev_link, &old->dev_link);
-       }
        subsys->nr_namespaces++;
 
        nvmet_ns_changed(subsys, ns->nsid);
 out_unlock:
        mutex_unlock(&subsys->lock);
        return ret;
+
+out_restore_subsys_maxnsid:
+       subsys->max_nsid = nvmet_max_nsid(subsys);
+       percpu_ref_exit(&ns->ref);
 out_dev_put:
        list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry)
                pci_dev_put(radix_tree_delete(&ctrl->p2p_ns_map, ns->nsid));
                goto out_unlock;
 
        ns->enabled = false;
-       list_del_rcu(&ns->dev_link);
+       xa_erase(&ns->subsys->namespaces, ns->nsid);
        if (ns->nsid == subsys->max_nsid)
                subsys->max_nsid = nvmet_max_nsid(subsys);
 
        if (!ns)
                return NULL;
 
-       INIT_LIST_HEAD(&ns->dev_link);
        init_completion(&ns->disable_done);
 
        ns->nsid = nsid;
                struct nvmet_req *req)
 {
        struct nvmet_ns *ns;
+       unsigned long idx;
 
        if (!req->p2p_client)
                return;
 
        ctrl->p2p_client = get_device(req->p2p_client);
 
-       list_for_each_entry_rcu(ns, &ctrl->subsys->namespaces, dev_link,
-                               lockdep_is_held(&ctrl->subsys->lock))
+       xa_for_each(&ctrl->subsys->namespaces, idx, ns)
                nvmet_p2pmem_ns_add_p2p(ctrl, ns);
 }
 
        kref_init(&subsys->ref);
 
        mutex_init(&subsys->lock);
-       INIT_LIST_HEAD(&subsys->namespaces);
+       xa_init(&subsys->namespaces);
        INIT_LIST_HEAD(&subsys->ctrls);
        INIT_LIST_HEAD(&subsys->hosts);
 
        struct nvmet_subsys *subsys =
                container_of(ref, struct nvmet_subsys, ref);
 
-       WARN_ON_ONCE(!list_empty(&subsys->namespaces));
+       WARN_ON_ONCE(!xa_empty(&subsys->namespaces));
 
+       xa_destroy(&subsys->namespaces);
        kfree(subsys->subsysnqn);
        kfree_rcu(subsys->model, rcuhead);
        kfree(subsys);