mutex_lock(&subsys->lock);
        ret = 0;
+
+       if (nvmet_passthru_ctrl(subsys)) {
+               pr_info("cannot enable both passthru and regular namespaces for a single subsystem");
+               goto out_unlock;
+       }
+
        if (ns->enabled)
                goto out_unlock;
 
        if (!subsys)
                return ERR_PTR(-ENOMEM);
 
-       subsys->ver = NVME_VS(1, 3, 0); /* NVMe 1.3.0 */
+       subsys->ver = NVMET_DEFAULT_VS;
        /* generate a random serial number as our controllers are ephemeral: */
        get_random_bytes(&subsys->serial, sizeof(subsys->serial));
 
        WARN_ON_ONCE(!xa_empty(&subsys->namespaces));
 
        xa_destroy(&subsys->namespaces);
+       nvmet_passthru_subsys_free(subsys);
+
        kfree(subsys->subsysnqn);
        kfree_rcu(subsys->model, rcuhead);
        kfree(subsys);
 
 #include <linux/radix-tree.h>
 #include <linux/t10-pi.h>
 
+#define NVMET_DEFAULT_VS               NVME_VS(1, 3, 0)
+
 #define NVMET_ASYNC_EVENTS             4
 #define NVMET_ERROR_LOG_SLOTS          128
 #define NVMET_NO_ERROR_LOC             ((u16)-1)
 
 #ifdef CONFIG_NVME_TARGET_PASSTHRU
        struct nvme_ctrl        *passthru_ctrl;
+       char                    *passthru_ctrl_path;
 #endif /* CONFIG_NVME_TARGET_PASSTHRU */
 };
 
 }
 
 #ifdef CONFIG_NVME_TARGET_PASSTHRU
+void nvmet_passthru_subsys_free(struct nvmet_subsys *subsys);
+int nvmet_passthru_ctrl_enable(struct nvmet_subsys *subsys);
+void nvmet_passthru_ctrl_disable(struct nvmet_subsys *subsys);
 u16 nvmet_parse_passthru_admin_cmd(struct nvmet_req *req);
 u16 nvmet_parse_passthru_io_cmd(struct nvmet_req *req);
 static inline struct nvme_ctrl *nvmet_passthru_ctrl(struct nvmet_subsys *subsys)
        return subsys->passthru_ctrl;
 }
 #else /* CONFIG_NVME_TARGET_PASSTHRU */
+static inline void nvmet_passthru_subsys_free(struct nvmet_subsys *subsys)
+{
+}
+static inline void nvmet_passthru_ctrl_disable(struct nvmet_subsys *subsys)
+{
+}
 static inline u16 nvmet_parse_passthru_admin_cmd(struct nvmet_req *req)
 {
        return 0;
 
 
 MODULE_IMPORT_NS(NVME_TARGET_PASSTHRU);
 
+/*
+ * xarray to maintain one passthru subsystem per nvme controller.
+ */
+static DEFINE_XARRAY(passthru_subsystems);
+
 static u16 nvmet_passthru_override_id_ctrl(struct nvmet_req *req)
 {
        struct nvmet_ctrl *ctrl = req->sq->ctrl;
                return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
        }
 }
+
+int nvmet_passthru_ctrl_enable(struct nvmet_subsys *subsys)
+{
+       struct nvme_ctrl *ctrl;
+       int ret = -EINVAL;
+       void *old;
+
+       mutex_lock(&subsys->lock);
+       if (!subsys->passthru_ctrl_path)
+               goto out_unlock;
+       if (subsys->passthru_ctrl)
+               goto out_unlock;
+
+       if (subsys->nr_namespaces) {
+               pr_info("cannot enable both passthru and regular namespaces for a single subsystem");
+               goto out_unlock;
+       }
+
+       ctrl = nvme_ctrl_get_by_path(subsys->passthru_ctrl_path);
+       if (IS_ERR(ctrl)) {
+               ret = PTR_ERR(ctrl);
+               pr_err("failed to open nvme controller %s\n",
+                      subsys->passthru_ctrl_path);
+
+               goto out_unlock;
+       }
+
+       old = xa_cmpxchg(&passthru_subsystems, ctrl->cntlid, NULL,
+                        subsys, GFP_KERNEL);
+       if (xa_is_err(old)) {
+               ret = xa_err(old);
+               goto out_put_ctrl;
+       }
+
+       if (old)
+               goto out_put_ctrl;
+
+       subsys->passthru_ctrl = ctrl;
+       subsys->ver = ctrl->vs;
+
+       if (subsys->ver < NVME_VS(1, 2, 1)) {
+               pr_warn("nvme controller version is too old: %llu.%llu.%llu, advertising 1.2.1\n",
+                       NVME_MAJOR(subsys->ver), NVME_MINOR(subsys->ver),
+                       NVME_TERTIARY(subsys->ver));
+               subsys->ver = NVME_VS(1, 2, 1);
+       }
+
+       mutex_unlock(&subsys->lock);
+       return 0;
+
+out_put_ctrl:
+       nvme_put_ctrl(ctrl);
+out_unlock:
+       mutex_unlock(&subsys->lock);
+       return ret;
+}
+
+static void __nvmet_passthru_ctrl_disable(struct nvmet_subsys *subsys)
+{
+       if (subsys->passthru_ctrl) {
+               xa_erase(&passthru_subsystems, subsys->passthru_ctrl->cntlid);
+               nvme_put_ctrl(subsys->passthru_ctrl);
+       }
+       subsys->passthru_ctrl = NULL;
+       subsys->ver = NVMET_DEFAULT_VS;
+}
+
+void nvmet_passthru_ctrl_disable(struct nvmet_subsys *subsys)
+{
+       mutex_lock(&subsys->lock);
+       __nvmet_passthru_ctrl_disable(subsys);
+       mutex_unlock(&subsys->lock);
+}
+
+void nvmet_passthru_subsys_free(struct nvmet_subsys *subsys)
+{
+       mutex_lock(&subsys->lock);
+       __nvmet_passthru_ctrl_disable(subsys);
+       mutex_unlock(&subsys->lock);
+       kfree(subsys->passthru_ctrl_path);
+}