svcrdma: Add an async version of svc_rdma_write_info_free()
authorChuck Lever <chuck.lever@oracle.com>
Tue, 21 Nov 2023 16:40:39 +0000 (11:40 -0500)
committerChuck Lever <chuck.lever@oracle.com>
Sun, 7 Jan 2024 22:54:27 +0000 (17:54 -0500)
DMA unmapping can take quite some time, so it should not be handled
in a single-threaded completion handler. Defer releasing write_info
structs to the recently-added workqueue.

With this patch, DMA unmapping can be handled in parallel, and it
does not cause head-of-queue blocking of Write completions.

Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
net/sunrpc/xprtrdma/svc_rdma_rw.c

index e460e25a1d6daccdd6af92aed3a1e59eeaa1d30e..de1ec3220aabcf8a151a59a8b1874ce0b6b882a4 100644 (file)
@@ -227,6 +227,7 @@ struct svc_rdma_write_info {
        unsigned int            wi_next_off;
 
        struct svc_rdma_chunk_ctxt      wi_cc;
+       struct work_struct      wi_work;
 };
 
 static struct svc_rdma_write_info *
@@ -248,12 +249,21 @@ svc_rdma_write_info_alloc(struct svcxprt_rdma *rdma,
        return info;
 }
 
-static void svc_rdma_write_info_free(struct svc_rdma_write_info *info)
+static void svc_rdma_write_info_free_async(struct work_struct *work)
 {
+       struct svc_rdma_write_info *info;
+
+       info = container_of(work, struct svc_rdma_write_info, wi_work);
        svc_rdma_cc_release(&info->wi_cc, DMA_TO_DEVICE);
        kfree(info);
 }
 
+static void svc_rdma_write_info_free(struct svc_rdma_write_info *info)
+{
+       INIT_WORK(&info->wi_work, svc_rdma_write_info_free_async);
+       queue_work(svcrdma_wq, &info->wi_work);
+}
+
 /**
  * svc_rdma_write_done - Write chunk completion
  * @cq: controlling Completion Queue