io_uring/net: improve the usercopy for sendmsg/recvmsg
authorJens Axboe <axboe@kernel.dk>
Mon, 26 Feb 2024 23:43:01 +0000 (16:43 -0700)
committerJens Axboe <axboe@kernel.dk>
Tue, 27 Feb 2024 18:16:00 +0000 (11:16 -0700)
We're spending a considerable amount of the sendmsg/recvmsg time just
copying in the message header. And for provided buffers, the known
single entry iovec.

Be a bit smarter about it and enable/disable user access around our
copying. In a test case that does both sendmsg and recvmsg, the
runtime before this change (averaged over multiple runs, very stable
times however):

Kernel Time Diff
====================================
-git 4720 usec
-git+commit 4311 usec -8.7%

and looking at a profile diff, we see the following:

0.25%     +9.33%  [kernel.kallsyms]     [k] _copy_from_user
4.47%     -3.32%  [kernel.kallsyms]     [k] __io_msg_copy_hdr.constprop.0

where we drop more than 9% of _copy_from_user() time, and consequently
add time to __io_msg_copy_hdr() where the copies are now attributed to,
but with a net win of 6%.

In comparison, the same test case with send/recv runs in 3745 usec, which
is (expectedly) still quite a bit faster. But at least sendmsg/recvmsg is
now only ~13% slower, where it was ~21% slower before.

Signed-off-by: Jens Axboe <axboe@kernel.dk>
io_uring/net.c

index 7a07c5563d660b0a1779fcd43a84c3bd36c3f163..83fba2882720d986e21df0e555ed0dc35a6bd753 100644 (file)
@@ -255,27 +255,42 @@ static int io_msg_copy_hdr(struct io_kiocb *req, struct io_async_msghdr *iomsg,
        struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
        int ret;
 
-       if (copy_from_user(msg, sr->umsg, sizeof(*sr->umsg)))
+       if (!user_access_begin(sr->umsg, sizeof(*sr->umsg)))
                return -EFAULT;
 
+       ret = -EFAULT;
+       unsafe_get_user(msg->msg_name, &sr->umsg->msg_name, ua_end);
+       unsafe_get_user(msg->msg_namelen, &sr->umsg->msg_namelen, ua_end);
+       unsafe_get_user(msg->msg_iov, &sr->umsg->msg_iov, ua_end);
+       unsafe_get_user(msg->msg_iovlen, &sr->umsg->msg_iovlen, ua_end);
+       unsafe_get_user(msg->msg_control, &sr->umsg->msg_control, ua_end);
+       unsafe_get_user(msg->msg_controllen, &sr->umsg->msg_controllen, ua_end);
+       msg->msg_flags = 0;
+
        if (req->flags & REQ_F_BUFFER_SELECT) {
                if (msg->msg_iovlen == 0) {
                        sr->len = iomsg->fast_iov[0].iov_len = 0;
                        iomsg->fast_iov[0].iov_base = NULL;
                        iomsg->free_iov = NULL;
                } else if (msg->msg_iovlen > 1) {
-                       return -EINVAL;
+                       ret = -EINVAL;
+                       goto ua_end;
                } else {
-                       if (copy_from_user(iomsg->fast_iov, msg->msg_iov,
-                                          sizeof(*msg->msg_iov)))
-                               return -EFAULT;
+                       /* we only need the length for provided buffers */
+                       if (!access_ok(&msg->msg_iov[0].iov_len, sizeof(__kernel_size_t)))
+                               goto ua_end;
+                       unsafe_get_user(iomsg->fast_iov[0].iov_len,
+                                       &msg->msg_iov[0].iov_len, ua_end);
                        sr->len = iomsg->fast_iov[0].iov_len;
                        iomsg->free_iov = NULL;
                }
-
-               return 0;
+               ret = 0;
+ua_end:
+               user_access_end();
+               return ret;
        }
 
+       user_access_end();
        iomsg->free_iov = iomsg->fast_iov;
        ret = __import_iovec(ddir, msg->msg_iov, msg->msg_iovlen, UIO_FASTIOV,
                                &iomsg->free_iov, &iomsg->msg.msg_iter, false);