#include "fs_context.h"
 #include "cifs_ioctl.h"
 #include "cached_dir.h"
+#include <trace/events/netfs.h>
+
+static int cifs_reopen_file(struct cifsFileInfo *cfile, bool can_flush);
+
+/*
+ * Prepare a subrequest to upload to the server.  We need to allocate credits
+ * so that we know the maximum amount of data that we can include in it.
+ */
+static void cifs_prepare_write(struct netfs_io_subrequest *subreq)
+{
+       struct cifs_io_subrequest *wdata =
+               container_of(subreq, struct cifs_io_subrequest, subreq);
+       struct cifs_io_request *req = wdata->req;
+       struct TCP_Server_Info *server;
+       struct cifsFileInfo *open_file = req->cfile;
+       size_t wsize = req->rreq.wsize;
+       int rc;
+
+       if (!wdata->have_xid) {
+               wdata->xid = get_xid();
+               wdata->have_xid = true;
+       }
+
+       server = cifs_pick_channel(tlink_tcon(open_file->tlink)->ses);
+       wdata->server = server;
+
+retry:
+       if (open_file->invalidHandle) {
+               rc = cifs_reopen_file(open_file, false);
+               if (rc < 0) {
+                       if (rc == -EAGAIN)
+                               goto retry;
+                       subreq->error = rc;
+                       return netfs_prepare_write_failed(subreq);
+               }
+       }
+
+       rc = server->ops->wait_mtu_credits(server, wsize, &wdata->subreq.max_len,
+                                          &wdata->credits);
+       if (rc < 0) {
+               subreq->error = rc;
+               return netfs_prepare_write_failed(subreq);
+       }
+
+#ifdef CONFIG_CIFS_SMB_DIRECT
+       if (server->smbd_conn)
+               subreq->max_nr_segs = server->smbd_conn->max_frmr_depth;
+#endif
+}
+
+/*
+ * Issue a subrequest to upload to the server.
+ */
+static void cifs_issue_write(struct netfs_io_subrequest *subreq)
+{
+       struct cifs_io_subrequest *wdata =
+               container_of(subreq, struct cifs_io_subrequest, subreq);
+       struct cifs_sb_info *sbi = CIFS_SB(subreq->rreq->inode->i_sb);
+       int rc;
+
+       if (cifs_forced_shutdown(sbi)) {
+               rc = -EIO;
+               goto fail;
+       }
+
+       rc = adjust_credits(wdata->server, &wdata->credits, wdata->subreq.len);
+       if (rc)
+               goto fail;
+
+       rc = -EAGAIN;
+       if (wdata->req->cfile->invalidHandle)
+               goto fail;
+
+       wdata->server->ops->async_writev(wdata);
+out:
+       return;
+
+fail:
+       if (rc == -EAGAIN)
+               trace_netfs_sreq(subreq, netfs_sreq_trace_retry);
+       else
+               trace_netfs_sreq(subreq, netfs_sreq_trace_fail);
+       add_credits_and_wake_if(wdata->server, &wdata->credits, 0);
+       netfs_write_subrequest_terminated(wdata, rc, false);
+       goto out;
+}
+
+/*
+ * Split the read up according to how many credits we can get for each piece.
+ * It's okay to sleep here if we need to wait for more credit to become
+ * available.
+ *
+ * We also choose the server and allocate an operation ID to be cleaned up
+ * later.
+ */
+static bool cifs_clamp_length(struct netfs_io_subrequest *subreq)
+{
+       struct netfs_io_request *rreq = subreq->rreq;
+       struct TCP_Server_Info *server;
+       struct cifs_io_subrequest *rdata = container_of(subreq, struct cifs_io_subrequest, subreq);
+       struct cifs_io_request *req = container_of(subreq->rreq, struct cifs_io_request, rreq);
+       struct cifs_sb_info *cifs_sb = CIFS_SB(rreq->inode->i_sb);
+       size_t rsize = 0;
+       int rc;
+
+       rdata->xid = get_xid();
+       rdata->have_xid = true;
+
+       server = cifs_pick_channel(tlink_tcon(req->cfile->tlink)->ses);
+       rdata->server = server;
+
+       if (cifs_sb->ctx->rsize == 0)
+               cifs_sb->ctx->rsize =
+                       server->ops->negotiate_rsize(tlink_tcon(req->cfile->tlink),
+                                                    cifs_sb->ctx);
+
+
+       rc = server->ops->wait_mtu_credits(server, cifs_sb->ctx->rsize, &rsize,
+                                          &rdata->credits);
+       if (rc) {
+               subreq->error = rc;
+               return false;
+       }
+
+       subreq->len = min_t(size_t, subreq->len, rsize);
+#ifdef CONFIG_CIFS_SMB_DIRECT
+       if (server->smbd_conn)
+               subreq->max_nr_segs = server->smbd_conn->max_frmr_depth;
+#endif
+       return true;
+}
+
+/*
+ * Issue a read operation on behalf of the netfs helper functions.  We're asked
+ * to make a read of a certain size at a point in the file.  We are permitted
+ * to only read a portion of that, but as long as we read something, the netfs
+ * helper will call us again so that we can issue another read.
+ */
+static void cifs_req_issue_read(struct netfs_io_subrequest *subreq)
+{
+       struct netfs_io_request *rreq = subreq->rreq;
+       struct cifs_io_subrequest *rdata = container_of(subreq, struct cifs_io_subrequest, subreq);
+       struct cifs_io_request *req = container_of(subreq->rreq, struct cifs_io_request, rreq);
+       struct cifs_sb_info *cifs_sb = CIFS_SB(rreq->inode->i_sb);
+       pid_t pid;
+       int rc = 0;
+
+       if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
+               pid = req->cfile->pid;
+       else
+               pid = current->tgid; // Ummm...  This may be a workqueue
+
+       cifs_dbg(FYI, "%s: op=%08x[%x] mapping=%p len=%zu/%zu\n",
+                __func__, rreq->debug_id, subreq->debug_index, rreq->mapping,
+                subreq->transferred, subreq->len);
+
+       if (req->cfile->invalidHandle) {
+               do {
+                       rc = cifs_reopen_file(req->cfile, true);
+               } while (rc == -EAGAIN);
+               if (rc)
+                       goto out;
+       }
+
+       __set_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags);
+       rdata->pid = pid;
+
+       rc = adjust_credits(rdata->server, &rdata->credits, rdata->subreq.len);
+       if (!rc) {
+               if (rdata->req->cfile->invalidHandle)
+                       rc = -EAGAIN;
+               else
+                       rc = rdata->server->ops->async_readv(rdata);
+       }
+
+out:
+       if (rc)
+               netfs_subreq_terminated(subreq, rc, false);
+}
+
+/*
+ * Writeback calls this when it finds a folio that needs uploading.  This isn't
+ * called if writeback only has copy-to-cache to deal with.
+ */
+static void cifs_begin_writeback(struct netfs_io_request *wreq)
+{
+       struct cifs_io_request *req = container_of(wreq, struct cifs_io_request, rreq);
+       int ret;
+
+       ret = cifs_get_writable_file(CIFS_I(wreq->inode), FIND_WR_ANY, &req->cfile);
+       if (ret) {
+               cifs_dbg(VFS, "No writable handle in writepages ret=%d\n", ret);
+               return;
+       }
+
+       wreq->io_streams[0].avail = true;
+}
+
+/*
+ * Initialise a request.
+ */
+static int cifs_init_request(struct netfs_io_request *rreq, struct file *file)
+{
+       struct cifs_io_request *req = container_of(rreq, struct cifs_io_request, rreq);
+       struct cifs_sb_info *cifs_sb = CIFS_SB(rreq->inode->i_sb);
+       struct cifsFileInfo *open_file = NULL;
+
+       rreq->rsize = cifs_sb->ctx->rsize;
+       rreq->wsize = cifs_sb->ctx->wsize;
+
+       if (file) {
+               open_file = file->private_data;
+               rreq->netfs_priv = file->private_data;
+               req->cfile = cifsFileInfo_get(open_file);
+       } else if (rreq->origin != NETFS_WRITEBACK) {
+               WARN_ON_ONCE(1);
+               return -EIO;
+       }
+
+       return 0;
+}
+
+/*
+ * Expand the size of a readahead to the size of the rsize, if at least as
+ * large as a page, allowing for the possibility that rsize is not pow-2
+ * aligned.
+ */
+static void cifs_expand_readahead(struct netfs_io_request *rreq)
+{
+       unsigned int rsize = rreq->rsize;
+       loff_t misalignment, i_size = i_size_read(rreq->inode);
+
+       if (rsize < PAGE_SIZE)
+               return;
+
+       if (rsize < INT_MAX)
+               rsize = roundup_pow_of_two(rsize);
+       else
+               rsize = ((unsigned int)INT_MAX + 1) / 2;
+
+       misalignment = rreq->start & (rsize - 1);
+       if (misalignment) {
+               rreq->start -= misalignment;
+               rreq->len += misalignment;
+       }
+
+       rreq->len = round_up(rreq->len, rsize);
+       if (rreq->start < i_size && rreq->len > i_size - rreq->start)
+               rreq->len = i_size - rreq->start;
+}
+
+/*
+ * Completion of a request operation.
+ */
+static void cifs_rreq_done(struct netfs_io_request *rreq)
+{
+       struct timespec64 atime, mtime;
+       struct inode *inode = rreq->inode;
+
+       /* we do not want atime to be less than mtime, it broke some apps */
+       atime = inode_set_atime_to_ts(inode, current_time(inode));
+       mtime = inode_get_mtime(inode);
+       if (timespec64_compare(&atime, &mtime))
+               inode_set_atime_to_ts(inode, inode_get_mtime(inode));
+}
+
+static void cifs_post_modify(struct inode *inode)
+{
+       /* Indication to update ctime and mtime as close is deferred */
+       set_bit(CIFS_INO_MODIFIED_ATTR, &CIFS_I(inode)->flags);
+}
+
+static void cifs_free_request(struct netfs_io_request *rreq)
+{
+       struct cifs_io_request *req = container_of(rreq, struct cifs_io_request, rreq);
+
+       if (req->cfile)
+               cifsFileInfo_put(req->cfile);
+}
+
+static void cifs_free_subrequest(struct netfs_io_subrequest *subreq)
+{
+       struct cifs_io_subrequest *rdata =
+               container_of(subreq, struct cifs_io_subrequest, subreq);
+       int rc = subreq->error;
+
+       if (rdata->subreq.source == NETFS_DOWNLOAD_FROM_SERVER) {
+#ifdef CONFIG_CIFS_SMB_DIRECT
+               if (rdata->mr) {
+                       smbd_deregister_mr(rdata->mr);
+                       rdata->mr = NULL;
+               }
+#endif
+       }
+
+       add_credits_and_wake_if(rdata->server, &rdata->credits, 0);
+       if (rdata->have_xid)
+               free_xid(rdata->xid);
+}
+
+const struct netfs_request_ops cifs_req_ops = {
+       .request_pool           = &cifs_io_request_pool,
+       .subrequest_pool        = &cifs_io_subrequest_pool,
+       .init_request           = cifs_init_request,
+       .free_request           = cifs_free_request,
+       .free_subrequest        = cifs_free_subrequest,
+       .expand_readahead       = cifs_expand_readahead,
+       .clamp_length           = cifs_clamp_length,
+       .issue_read             = cifs_req_issue_read,
+       .done                   = cifs_rreq_done,
+       .post_modify            = cifs_post_modify,
+       .begin_writeback        = cifs_begin_writeback,
+       .prepare_write          = cifs_prepare_write,
+       .issue_write            = cifs_issue_write,
+};
 
 /*
  * Remove the dirty flags from a span of pages.