struct io_kiocb **nxt, bool force_nonblock)
 {
 #if defined(CONFIG_NET)
+       struct io_async_msghdr *kmsg = NULL;
        struct socket *sock;
        int ret;
 
        if (sock) {
                struct io_async_ctx io, *copy;
                struct sockaddr_storage addr;
-               struct msghdr *kmsg;
                unsigned flags;
 
                flags = READ_ONCE(sqe->msg_flags);
                        flags |= MSG_DONTWAIT;
 
                if (req->io) {
-                       kmsg = &req->io->msg.msg;
-                       kmsg->msg_name = &addr;
+                       kmsg = &req->io->msg;
+                       kmsg->msg.msg_name = &addr;
+                       /* if iov is set, it's allocated already */
+                       if (!kmsg->iov)
+                               kmsg->iov = kmsg->fast_iov;
+                       kmsg->msg.msg_iter.iov = kmsg->iov;
                } else {
-                       kmsg = &io.msg.msg;
-                       kmsg->msg_name = &addr;
+                       kmsg = &io.msg;
+                       kmsg->msg.msg_name = &addr;
                        ret = io_sendmsg_prep(req, &io);
                        if (ret)
                                goto out;
                }
 
-               ret = __sys_sendmsg_sock(sock, kmsg, flags);
+               ret = __sys_sendmsg_sock(sock, &kmsg->msg, flags);
                if (force_nonblock && ret == -EAGAIN) {
                        copy = kmalloc(sizeof(*copy), GFP_KERNEL);
                        if (!copy) {
                        req->io = copy;
                        memcpy(&req->io->sqe, req->sqe, sizeof(*req->sqe));
                        req->sqe = &req->io->sqe;
-                       return ret;
+                       return -EAGAIN;
                }
                if (ret == -ERESTARTSYS)
                        ret = -EINTR;
        }
 
 out:
+       if (kmsg && kmsg->iov != kmsg->fast_iov)
+               kfree(kmsg->iov);
        io_cqring_add_event(req, ret);
        if (ret < 0)
                req_set_fail_links(req);
                      struct io_kiocb **nxt, bool force_nonblock)
 {
 #if defined(CONFIG_NET)
+       struct io_async_msghdr *kmsg = NULL;
        struct socket *sock;
        int ret;
 
                struct user_msghdr __user *msg;
                struct io_async_ctx io, *copy;
                struct sockaddr_storage addr;
-               struct msghdr *kmsg;
                unsigned flags;
 
                flags = READ_ONCE(sqe->msg_flags);
                msg = (struct user_msghdr __user *) (unsigned long)
                        READ_ONCE(sqe->addr);
                if (req->io) {
-                       kmsg = &req->io->msg.msg;
-                       kmsg->msg_name = &addr;
+                       kmsg = &req->io->msg;
+                       kmsg->msg.msg_name = &addr;
+                       /* if iov is set, it's allocated already */
+                       if (!kmsg->iov)
+                               kmsg->iov = kmsg->fast_iov;
+                       kmsg->msg.msg_iter.iov = kmsg->iov;
                } else {
-                       kmsg = &io.msg.msg;
-                       kmsg->msg_name = &addr;
+                       kmsg = &io.msg;
+                       kmsg->msg.msg_name = &addr;
                        ret = io_recvmsg_prep(req, &io);
                        if (ret)
                                goto out;
                }
 
-               ret = __sys_recvmsg_sock(sock, kmsg, msg, io.msg.uaddr, flags);
+               ret = __sys_recvmsg_sock(sock, &kmsg->msg, msg, kmsg->uaddr, flags);
                if (force_nonblock && ret == -EAGAIN) {
                        copy = kmalloc(sizeof(*copy), GFP_KERNEL);
                        if (!copy) {
                        req->io = copy;
                        memcpy(&req->io->sqe, req->sqe, sizeof(*req->sqe));
                        req->sqe = &req->io->sqe;
-                       return ret;
+                       return -EAGAIN;
                }
                if (ret == -ERESTARTSYS)
                        ret = -EINTR;
        }
 
 out:
+       if (kmsg && kmsg->iov != kmsg->fast_iov)
+               kfree(kmsg->iov);
        io_cqring_add_event(req, ret);
        if (ret < 0)
                req_set_fail_links(req);