kfree(chap);
}
-static void __nvme_auth_work(struct work_struct *work)
+static void nvme_queue_auth_work(struct work_struct *work)
{
struct nvme_dhchap_queue_context *chap =
container_of(work, struct nvme_dhchap_queue_context, auth_work);
return -ENOMEM;
}
- INIT_WORK(&chap->auth_work, __nvme_auth_work);
+ INIT_WORK(&chap->auth_work, nvme_queue_auth_work);
list_add(&chap->entry, &ctrl->dhchap_auth_list);
mutex_unlock(&ctrl->dhchap_auth_mutex);
queue_work(nvme_wq, &chap->auth_work);
}
EXPORT_SYMBOL_GPL(nvme_auth_reset);
-static void nvme_dhchap_auth_work(struct work_struct *work)
+static void nvme_ctrl_auth_work(struct work_struct *work)
{
struct nvme_ctrl *ctrl =
container_of(work, struct nvme_ctrl, dhchap_auth_work);
void nvme_auth_init_ctrl(struct nvme_ctrl *ctrl)
{
INIT_LIST_HEAD(&ctrl->dhchap_auth_list);
- INIT_WORK(&ctrl->dhchap_auth_work, nvme_dhchap_auth_work);
+ INIT_WORK(&ctrl->dhchap_auth_work, nvme_ctrl_auth_work);
mutex_init(&ctrl->dhchap_auth_mutex);
if (!ctrl->opts)
return;