TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED;
                goto process_tmr;
        }
-       cmd->se_tmr_req = core_tmr_alloc_req(cmd, NULL, tcm_tmr, GFP_KERNEL);
-       if (!cmd->se_tmr_req) {
+       res = core_tmr_alloc_req(cmd, NULL, tcm_tmr, GFP_KERNEL);
+       if (res < 0) {
                send_ioctx->cmd.se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
                send_ioctx->cmd.se_tmr_req->response = TMR_FUNCTION_REJECTED;
                goto process_tmr;
 
 {
        struct iscsi_cmd *cmd;
        struct se_cmd *se_cmd;
+       int rc;
        u8 tcm_function;
 
        cmd = iscsit_allocate_cmd(conn, GFP_KERNEL);
                goto out;
        }
 
-       se_cmd->se_tmr_req = core_tmr_alloc_req(se_cmd,
-                               cmd->tmr_req, tcm_function,
-                               GFP_KERNEL);
-       if (!se_cmd->se_tmr_req)
+       rc = core_tmr_alloc_req(se_cmd, cmd->tmr_req, tcm_function, GFP_KERNEL);
+       if (rc < 0)
                goto out;
 
        cmd->tmr_req->se_tmr_req = se_cmd->se_tmr_req;
 
         * pointer.  These will be released directly in tcm_loop_device_reset()
         * with transport_generic_free_cmd().
         */
-       if (se_cmd->se_tmr_req)
+       if (se_cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)
                return 0;
        /*
         * Release the struct se_cmd, which will make a callback to release
        struct tcm_loop_nexus *tl_nexus;
        struct tcm_loop_tmr *tl_tmr = NULL;
        struct tcm_loop_tpg *tl_tpg;
-       int ret = FAILED;
+       int ret = FAILED, rc;
        /*
         * Locate the tcm_loop_hba_t pointer
         */
        transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess, 0,
                                DMA_NONE, MSG_SIMPLE_TAG,
                                &tl_cmd->tl_sense_buf[0]);
-       /*
-        * Allocate the LUN_RESET TMR
-        */
-       se_cmd->se_tmr_req = core_tmr_alloc_req(se_cmd, tl_tmr,
-                                               TMR_LUN_RESET, GFP_KERNEL);
-       if (IS_ERR(se_cmd->se_tmr_req))
+
+       rc = core_tmr_alloc_req(se_cmd, tl_tmr, TMR_LUN_RESET, GFP_KERNEL);
+       if (rc < 0)
                goto release;
        /*
         * Locate the underlying TCM struct se_lun from sc->device->lun
 
 #include "target_core_alua.h"
 #include "target_core_pr.h"
 
-struct se_tmr_req *core_tmr_alloc_req(
+int core_tmr_alloc_req(
        struct se_cmd *se_cmd,
        void *fabric_tmr_ptr,
        u8 function,
 {
        struct se_tmr_req *tmr;
 
-       tmr = kmem_cache_zalloc(se_tmr_req_cache, gfp_flags);
+       tmr = kzalloc(sizeof(struct se_tmr_req), gfp_flags);
        if (!tmr) {
                pr_err("Unable to allocate struct se_tmr_req\n");
-               return ERR_PTR(-ENOMEM);
+               return -ENOMEM;
        }
+
+       se_cmd->se_cmd_flags |= SCF_SCSI_TMR_CDB;
+       se_cmd->se_tmr_req = tmr;
        tmr->task_cmd = se_cmd;
        tmr->fabric_tmr_ptr = fabric_tmr_ptr;
        tmr->function = function;
        INIT_LIST_HEAD(&tmr->tmr_list);
 
-       return tmr;
+       return 0;
 }
 EXPORT_SYMBOL(core_tmr_alloc_req);
 
        unsigned long flags;
 
        if (!dev) {
-               kmem_cache_free(se_tmr_req_cache, tmr);
+               kfree(tmr);
                return;
        }
 
        list_del(&tmr->tmr_list);
        spin_unlock_irqrestore(&dev->se_tmr_lock, flags);
 
-       kmem_cache_free(se_tmr_req_cache, tmr);
+       kfree(tmr);
 }
 
 static void core_tmr_handle_tas_abort(
 
 
 static struct workqueue_struct *target_completion_wq;
 static struct kmem_cache *se_sess_cache;
-struct kmem_cache *se_tmr_req_cache;
 struct kmem_cache *se_ua_cache;
 struct kmem_cache *t10_pr_reg_cache;
 struct kmem_cache *t10_alua_lu_gp_cache;
 
 int init_se_kmem_caches(void)
 {
-       se_tmr_req_cache = kmem_cache_create("se_tmr_cache",
-                       sizeof(struct se_tmr_req), __alignof__(struct se_tmr_req),
-                       0, NULL);
-       if (!se_tmr_req_cache) {
-               pr_err("kmem_cache_create() for struct se_tmr_req"
-                               " failed\n");
-               goto out;
-       }
        se_sess_cache = kmem_cache_create("se_sess_cache",
                        sizeof(struct se_session), __alignof__(struct se_session),
                        0, NULL);
        if (!se_sess_cache) {
                pr_err("kmem_cache_create() for struct se_session"
                                " failed\n");
-               goto out_free_tmr_req_cache;
+               goto out;
        }
        se_ua_cache = kmem_cache_create("se_ua_cache",
                        sizeof(struct se_ua), __alignof__(struct se_ua),
        kmem_cache_destroy(se_ua_cache);
 out_free_sess_cache:
        kmem_cache_destroy(se_sess_cache);
-out_free_tmr_req_cache:
-       kmem_cache_destroy(se_tmr_req_cache);
 out:
        return -ENOMEM;
 }
 void release_se_kmem_caches(void)
 {
        destroy_workqueue(target_completion_wq);
-       kmem_cache_destroy(se_tmr_req_cache);
        kmem_cache_destroy(se_sess_cache);
        kmem_cache_destroy(se_ua_cache);
        kmem_cache_destroy(t10_pr_reg_cache);
 
 void transport_cmd_finish_abort(struct se_cmd *cmd, int remove)
 {
-       if (!cmd->se_tmr_req)
+       if (!(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB))
                transport_lun_remove_cmd(cmd);
 
        if (transport_cmd_check_stop_to_fabric(cmd))
 {
        BUG_ON(!cmd->se_tfo);
 
-       if (cmd->se_tmr_req)
+       if (cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)
                core_tmr_release_req(cmd->se_tmr_req);
        if (cmd->t_task_cdb != cmd->__t_task_cdb)
                kfree(cmd->t_task_cdb);
 void transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks)
 {
        if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD)) {
-               if (wait_for_tasks && cmd->se_tmr_req)
+               if (wait_for_tasks && (cmd->se_cmd_flags & SCF_SCSI_TMR_CDB))
                         transport_wait_for_tasks(cmd);
 
                transport_release_cmd(cmd);
        unsigned long flags;
 
        spin_lock_irqsave(&cmd->t_state_lock, flags);
-       if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD) && !(cmd->se_tmr_req)) {
+       if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD) &&
+           !(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) {
                spin_unlock_irqrestore(&cmd->t_state_lock, flags);
                return false;
        }
         * Only perform a possible wait_for_tasks if SCF_SUPPORTED_SAM_OPCODE
         * has been set in transport_set_supported_SAM_opcode().
         */
-       if (!(cmd->se_cmd_flags & SCF_SUPPORTED_SAM_OPCODE) && !cmd->se_tmr_req) {
+       if (!(cmd->se_cmd_flags & SCF_SUPPORTED_SAM_OPCODE) &&
+           !(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) {
                spin_unlock_irqrestore(&cmd->t_state_lock, flags);
                return false;
        }
 
        struct se_tmr_req *tmr;
        struct fcp_cmnd *fcp;
        struct ft_sess *sess;
+       int rc;
        u8 tm_func;
 
        transport_init_se_cmd(&cmd->se_cmd, &ft_configfs->tf_ops,
        }
 
        pr_debug("alloc tm cmd fn %d\n", tm_func);
-       tmr = core_tmr_alloc_req(&cmd->se_cmd, cmd, tm_func, GFP_KERNEL);
-       if (!tmr) {
+       rc = core_tmr_alloc_req(&cmd->se_cmd, cmd, tm_func, GFP_KERNEL);
+       if (rc < 0) {
                pr_debug("alloc failed\n");
                ft_send_resp_code_and_free(cmd, FCP_TMF_FAILED);
                return;
        }
-       cmd->se_cmd.se_tmr_req = tmr;
 
        switch (fcp->fc_tm_flags) {
        case FCP_TMF_LUN_RESET:
 
        SCF_EMULATED_TASK_SENSE         = 0x00000004,
        SCF_SCSI_DATA_SG_IO_CDB         = 0x00000008,
        SCF_SCSI_CONTROL_SG_IO_CDB      = 0x00000010,
-       SCF_SCSI_NON_DATA_CDB           = 0x00000040,
+       SCF_SCSI_NON_DATA_CDB           = 0x00000020,
+       SCF_SCSI_TMR_CDB                = 0x00000040,
        SCF_SCSI_CDB_EXCEPTION          = 0x00000080,
        SCF_SCSI_RESERVATION_CONFLICT   = 0x00000100,
        SCF_FUA                         = 0x00000200,
        struct completion       task_stop_comp;
 };
 
+struct se_tmr_req {
+       /* Task Management function to be performed */
+       u8                      function;
+       /* Task Management response to send */
+       u8                      response;
+       int                     call_transport;
+       /* Reference to ITT that Task Mgmt should be performed */
+       u32                     ref_task_tag;
+       /* 64-bit encoded SAM LUN from $FABRIC_MOD TMR header */
+       u64                     ref_task_lun;
+       void                    *fabric_tmr_ptr;
+       struct se_cmd           *task_cmd;
+       struct se_cmd           *ref_cmd;
+       struct se_device        *tmr_dev;
+       struct se_lun           *tmr_lun;
+       struct list_head        tmr_list;
+};
+
 struct se_cmd {
        /* SAM response code being sent to initiator */
        u8                      scsi_status;
 
 };
 
-struct se_tmr_req {
-       /* Task Management function to be performed */
-       u8                      function;
-       /* Task Management response to send */
-       u8                      response;
-       int                     call_transport;
-       /* Reference to ITT that Task Mgmt should be performed */
-       u32                     ref_task_tag;
-       /* 64-bit encoded SAM LUN from $FABRIC_MOD TMR header */
-       u64                     ref_task_lun;
-       void                    *fabric_tmr_ptr;
-       struct se_cmd           *task_cmd;
-       struct se_cmd           *ref_cmd;
-       struct se_device        *tmr_dev;
-       struct se_lun           *tmr_lun;
-       struct list_head        tmr_list;
-};
-
 struct se_ua {
        u8                      ua_asc;
        u8                      ua_ascq;
 
 
 int    core_alua_check_nonop_delay(struct se_cmd *);
 
-struct se_tmr_req *core_tmr_alloc_req(struct se_cmd *, void *, u8, gfp_t);
+int    core_tmr_alloc_req(struct se_cmd *, void *, u8, gfp_t);
 void   core_tmr_release_req(struct se_tmr_req *);
 int    transport_generic_handle_tmr(struct se_cmd *);
 int    transport_lookup_tmr_lun(struct se_cmd *, u32);