netfs, 9p: Implement helpers for new write code
authorDavid Howells <dhowells@redhat.com>
Mon, 18 Mar 2024 20:29:53 +0000 (20:29 +0000)
committerDavid Howells <dhowells@redhat.com>
Wed, 1 May 2024 17:07:37 +0000 (18:07 +0100)
Implement the helpers for the new write code in 9p.  There's now an
optional ->prepare_write() that allows the filesystem to set the parameters
for the next write, such as maximum size and maximum segment count, and an
->issue_write() that is called to initiate an (asynchronous) write
operation.

Signed-off-by: David Howells <dhowells@redhat.com>
Reviewed-by: Jeff Layton <jlayton@kernel.org>
cc: Eric Van Hensbergen <ericvh@kernel.org>
cc: Latchesar Ionkov <lucho@ionkov.net>
cc: Dominique Martinet <asmadeus@codewreck.org>
cc: Christian Schoenebeck <linux_oss@crudebyte.com>
cc: v9fs@lists.linux.dev
cc: netfs@lists.linux.dev
cc: linux-fsdevel@vger.kernel.org

fs/9p/vfs_addr.c
include/net/9p/client.h
net/9p/Kconfig
net/9p/client.c

index 5a943c122d831785864caf4817177f8fbe368cf5..07d03efdd594e8d17f3fcef1bb71e9db31b26582 100644 (file)
 #include "cache.h"
 #include "fid.h"
 
+/*
+ * Writeback calls this when it finds a folio that needs uploading.  This isn't
+ * called if writeback only has copy-to-cache to deal with.
+ */
+static void v9fs_begin_writeback(struct netfs_io_request *wreq)
+{
+       struct p9_fid *fid;
+
+       fid = v9fs_fid_find_inode(wreq->inode, true, INVALID_UID, true);
+       if (!fid) {
+               WARN_ONCE(1, "folio expected an open fid inode->i_ino=%lx\n",
+                         wreq->inode->i_ino);
+               return;
+       }
+
+       wreq->wsize = fid->clnt->msize - P9_IOHDRSZ;
+       if (fid->iounit)
+               wreq->wsize = min(wreq->wsize, fid->iounit);
+       wreq->netfs_priv = fid;
+       wreq->io_streams[0].avail = true;
+}
+
+/*
+ * Issue a subrequest to write to the server.
+ */
+static void v9fs_issue_write(struct netfs_io_subrequest *subreq)
+{
+       struct p9_fid *fid = subreq->rreq->netfs_priv;
+       int err, len;
+
+       len = p9_client_write(fid, subreq->start, &subreq->io_iter, &err);
+       netfs_write_subrequest_terminated(subreq, len ?: err, false);
+}
+
 static void v9fs_upload_to_server(struct netfs_io_subrequest *subreq)
 {
        struct p9_fid *fid = subreq->rreq->netfs_priv;
@@ -92,6 +126,14 @@ static int v9fs_init_request(struct netfs_io_request *rreq, struct file *file)
                        rreq->origin == NETFS_UNBUFFERED_WRITE ||
                        rreq->origin == NETFS_DIO_WRITE);
 
+#if 0 // TODO: Cut over
+       if (rreq->origin == NETFS_WRITEBACK)
+               return 0; /* We don't get the write handle until we find we
+                          * have actually dirty data and not just
+                          * copy-to-cache data.
+                          */
+#endif
+
        if (file) {
                fid = file->private_data;
                if (!fid)
@@ -103,6 +145,10 @@ static int v9fs_init_request(struct netfs_io_request *rreq, struct file *file)
                        goto no_fid;
        }
 
+       rreq->wsize = fid->clnt->msize - P9_IOHDRSZ;
+       if (fid->iounit)
+               rreq->wsize = min(rreq->wsize, fid->iounit);
+
        /* we might need to read from a fid that was opened write-only
         * for read-modify-write of page cache, use the writeback fid
         * for that */
@@ -131,6 +177,8 @@ const struct netfs_request_ops v9fs_req_ops = {
        .init_request           = v9fs_init_request,
        .free_request           = v9fs_free_request,
        .issue_read             = v9fs_issue_read,
+       .begin_writeback        = v9fs_begin_writeback,
+       .issue_write            = v9fs_issue_write,
        .create_write_requests  = v9fs_create_write_requests,
 };
 
index 78ebcf782ce57d5b24e018679b925ae00c42eb94..4f785098c67a1402a3377c9abb67d2be1bb23d04 100644 (file)
@@ -207,6 +207,8 @@ int p9_client_read(struct p9_fid *fid, u64 offset, struct iov_iter *to, int *err
 int p9_client_read_once(struct p9_fid *fid, u64 offset, struct iov_iter *to,
                int *err);
 int p9_client_write(struct p9_fid *fid, u64 offset, struct iov_iter *from, int *err);
+struct netfs_io_subrequest;
+void p9_client_write_subreq(struct netfs_io_subrequest *subreq);
 int p9_client_readdir(struct p9_fid *fid, char *data, u32 count, u64 offset);
 int p9dirent_read(struct p9_client *clnt, char *buf, int len,
                  struct p9_dirent *dirent);
index 00ebce9e5a6570eebf0be94ea3f219c4242a00a5..bcdab9c23b402d59cc9f0ba9590de2ceea075286 100644 (file)
@@ -5,6 +5,7 @@
 
 menuconfig NET_9P
        tristate "Plan 9 Resource Sharing Support (9P2000)"
+       select NETFS_SUPPORT
        help
          If you say Y here, you will get experimental support for
          Plan 9 resource sharing via the 9P2000 protocol.
index f7e90b4769bba92ef8187b0a96cb310f0c13d5f8..00774656eeac8368f538e59cb8b456af405a3469 100644 (file)
@@ -18,6 +18,7 @@
 #include <linux/sched/signal.h>
 #include <linux/uaccess.h>
 #include <linux/uio.h>
+#include <linux/netfs.h>
 #include <net/9p/9p.h>
 #include <linux/parser.h>
 #include <linux/seq_file.h>
@@ -1661,6 +1662,54 @@ p9_client_write(struct p9_fid *fid, u64 offset, struct iov_iter *from, int *err)
 }
 EXPORT_SYMBOL(p9_client_write);
 
+void
+p9_client_write_subreq(struct netfs_io_subrequest *subreq)
+{
+       struct netfs_io_request *wreq = subreq->rreq;
+       struct p9_fid *fid = wreq->netfs_priv;
+       struct p9_client *clnt = fid->clnt;
+       struct p9_req_t *req;
+       unsigned long long start = subreq->start + subreq->transferred;
+       int written, len = subreq->len - subreq->transferred;
+       int err;
+
+       p9_debug(P9_DEBUG_9P, ">>> TWRITE fid %d offset %llu len %d\n",
+                fid->fid, start, len);
+
+       /* Don't bother zerocopy for small IO (< 1024) */
+       if (clnt->trans_mod->zc_request && len > 1024) {
+               req = p9_client_zc_rpc(clnt, P9_TWRITE, NULL, &subreq->io_iter,
+                                      0, wreq->len, P9_ZC_HDR_SZ, "dqd",
+                                      fid->fid, start, len);
+       } else {
+               req = p9_client_rpc(clnt, P9_TWRITE, "dqV", fid->fid,
+                                   start, len, &subreq->io_iter);
+       }
+       if (IS_ERR(req)) {
+               netfs_write_subrequest_terminated(subreq, PTR_ERR(req), false);
+               return;
+       }
+
+       err = p9pdu_readf(&req->rc, clnt->proto_version, "d", &written);
+       if (err) {
+               trace_9p_protocol_dump(clnt, &req->rc);
+               p9_req_put(clnt, req);
+               netfs_write_subrequest_terminated(subreq, err, false);
+               return;
+       }
+
+       if (written > len) {
+               pr_err("bogus RWRITE count (%d > %u)\n", written, len);
+               written = len;
+       }
+
+       p9_debug(P9_DEBUG_9P, "<<< RWRITE count %d\n", len);
+
+       p9_req_put(clnt, req);
+       netfs_write_subrequest_terminated(subreq, written, false);
+}
+EXPORT_SYMBOL(p9_client_write_subreq);
+
 struct p9_wstat *p9_client_stat(struct p9_fid *fid)
 {
        int err;