netfs: Switch to using unsigned long long rather than loff_t
authorDavid Howells <dhowells@redhat.com>
Mon, 18 Mar 2024 16:57:31 +0000 (16:57 +0000)
committerDavid Howells <dhowells@redhat.com>
Wed, 1 May 2024 17:07:35 +0000 (18:07 +0100)
Switch to using unsigned long long rather than loff_t in netfslib to avoid
problems with the sign flipping in the maths when we're dealing with the
byte at position 0x7fffffffffffffff.

Signed-off-by: David Howells <dhowells@redhat.com>
Reviewed-by: Jeff Layton <jlayton@kernel.org>
cc: Ilya Dryomov <idryomov@gmail.com>
cc: Xiubo Li <xiubli@redhat.com>
cc: netfs@lists.linux.dev
cc: ceph-devel@vger.kernel.org
cc: linux-fsdevel@vger.kernel.org

fs/cachefiles/io.c
fs/ceph/addr.c
fs/netfs/buffered_read.c
fs/netfs/buffered_write.c
fs/netfs/io.c
fs/netfs/main.c
fs/netfs/output.c
include/linux/netfs.h
include/trace/events/netfs.h

index 1d685357e67fc71ffc2be73513b00f7efd8ee906..5ba5c7814fe41386f6120fefeb12e50ed488af9f 100644 (file)
@@ -493,7 +493,7 @@ out_no_object:
  * boundary as appropriate.
  */
 static enum netfs_io_source cachefiles_prepare_read(struct netfs_io_subrequest *subreq,
-                                                   loff_t i_size)
+                                                   unsigned long long i_size)
 {
        return cachefiles_do_prepare_read(&subreq->rreq->cache_resources,
                                          subreq->start, &subreq->len, i_size,
index 74bfd10b1b1a3027cb30d04da6e4fa54f511dea9..8c16bc5250ef56cb8d0864d3b4b054466d57f250 100644 (file)
@@ -193,7 +193,7 @@ static void ceph_netfs_expand_readahead(struct netfs_io_request *rreq)
         * block, but do not exceed the file size, unless the original
         * request already exceeds it.
         */
-       new_end = min(round_up(end, lo->stripe_unit), rreq->i_size);
+       new_end = umin(round_up(end, lo->stripe_unit), rreq->i_size);
        if (new_end > end && new_end <= rreq->start + max_len)
                rreq->len = new_end - rreq->start;
 
index 1622cce535a3ab4e463bd08be54ce5553248a52a..47603f08680e4988061eb4c8b12ebcc48397fd3d 100644 (file)
@@ -130,7 +130,9 @@ void netfs_rreq_unlock_folios(struct netfs_io_request *rreq)
 }
 
 static void netfs_cache_expand_readahead(struct netfs_io_request *rreq,
-                                        loff_t *_start, size_t *_len, loff_t i_size)
+                                        unsigned long long *_start,
+                                        unsigned long long *_len,
+                                        unsigned long long i_size)
 {
        struct netfs_cache_resources *cres = &rreq->cache_resources;
 
index d8f66ce94575c594a37e59dccc31dfbe32f2360f..eba49bfafe6438b9c87ebdfc37327ac8b0eecb83 100644 (file)
@@ -664,7 +664,7 @@ static void netfs_pages_written_back(struct netfs_io_request *wreq)
        last = (wreq->start + wreq->len - 1) / PAGE_SIZE;
        xas_for_each(&xas, folio, last) {
                WARN(!folio_test_writeback(folio),
-                    "bad %zx @%llx page %lx %lx\n",
+                    "bad %llx @%llx page %lx %lx\n",
                     wreq->len, wreq->start, folio->index, last);
 
                if ((finfo = netfs_folio_info(folio))) {
index 8de581ac0cfbab46ef8804535607623484a0310a..6cfecfcd02e14f372f4d3b8adde8da0f1eb30061 100644 (file)
@@ -476,7 +476,7 @@ netfs_rreq_prepare_read(struct netfs_io_request *rreq,
 
 set:
        if (subreq->len > rreq->len)
-               pr_warn("R=%08x[%u] SREQ>RREQ %zx > %zx\n",
+               pr_warn("R=%08x[%u] SREQ>RREQ %zx > %llx\n",
                        rreq->debug_id, subreq->debug_index,
                        subreq->len, rreq->len);
 
@@ -513,7 +513,7 @@ static bool netfs_rreq_submit_slice(struct netfs_io_request *rreq,
        subreq->start           = rreq->start + rreq->submitted;
        subreq->len             = io_iter->count;
 
-       _debug("slice %llx,%zx,%zx", subreq->start, subreq->len, rreq->submitted);
+       _debug("slice %llx,%zx,%llx", subreq->start, subreq->len, rreq->submitted);
        list_add_tail(&subreq->rreq_link, &rreq->subrequests);
 
        /* Call out to the cache to find out what it can do with the remaining
@@ -588,7 +588,7 @@ int netfs_begin_read(struct netfs_io_request *rreq, bool sync)
        atomic_set(&rreq->nr_outstanding, 1);
        io_iter = rreq->io_iter;
        do {
-               _debug("submit %llx + %zx >= %llx",
+               _debug("submit %llx + %llx >= %llx",
                       rreq->start, rreq->submitted, rreq->i_size);
                if (rreq->origin == NETFS_DIO_READ &&
                    rreq->start + rreq->submitted >= rreq->i_size)
index 4805b937736471607886e53a3226dc8aade58c3d..5f0f438e5d211d85de06f2c41d1d98144f404639 100644 (file)
@@ -62,7 +62,7 @@ static int netfs_requests_seq_show(struct seq_file *m, void *v)
 
        rreq = list_entry(v, struct netfs_io_request, proc_link);
        seq_printf(m,
-                  "%08x %s %3d %2lx %4d %3d @%04llx %zx/%zx",
+                  "%08x %s %3d %2lx %4d %3d @%04llx %llx/%llx",
                   rreq->debug_id,
                   netfs_origins[rreq->origin],
                   refcount_read(&rreq->ref),
index e586396d6b729d777fc60c45ecc9410c5c6031a5..85374322f10fbb61e5d889c06e7e6d34aaee9d1d 100644 (file)
@@ -439,7 +439,7 @@ static void netfs_submit_writethrough(struct netfs_io_request *wreq, bool final)
  */
 int netfs_advance_writethrough(struct netfs_io_request *wreq, size_t copied, bool to_page_end)
 {
-       _enter("ic=%zu sb=%zu ws=%u cp=%zu tp=%u",
+       _enter("ic=%zu sb=%llu ws=%u cp=%zu tp=%u",
               wreq->iter.count, wreq->submitted, wreq->wsize, copied, to_page_end);
 
        wreq->iter.count += copied;
@@ -457,7 +457,7 @@ int netfs_end_writethrough(struct netfs_io_request *wreq, struct kiocb *iocb)
 {
        int ret = -EIOCBQUEUED;
 
-       _enter("ic=%zu sb=%zu ws=%u",
+       _enter("ic=%zu sb=%llu ws=%u",
               wreq->iter.count, wreq->submitted, wreq->wsize);
 
        if (wreq->submitted < wreq->io_iter.count)
index 0b6c2c2d3c23894e866f594a5bfbdb0ba28a89c6..88269681d4fcfea5f4ca30f0acb9d2c6ea8f6457 100644 (file)
@@ -149,7 +149,7 @@ struct netfs_io_subrequest {
        struct work_struct      work;
        struct list_head        rreq_link;      /* Link in rreq->subrequests */
        struct iov_iter         io_iter;        /* Iterator for this subrequest */
-       loff_t                  start;          /* Where to start the I/O */
+       unsigned long long      start;          /* Where to start the I/O */
        size_t                  len;            /* Size of the I/O */
        size_t                  transferred;    /* Amount of data transferred */
        refcount_t              ref;
@@ -205,15 +205,15 @@ struct netfs_io_request {
        atomic_t                subreq_counter; /* Next subreq->debug_index */
        atomic_t                nr_outstanding; /* Number of ops in progress */
        atomic_t                nr_copy_ops;    /* Number of copy-to-cache ops in progress */
-       size_t                  submitted;      /* Amount submitted for I/O so far */
-       size_t                  len;            /* Length of the request */
        size_t                  upper_len;      /* Length can be extended to here */
+       unsigned long long      submitted;      /* Amount submitted for I/O so far */
+       unsigned long long      len;            /* Length of the request */
        size_t                  transferred;    /* Amount to be indicated as transferred */
        short                   error;          /* 0 or error that occurred */
        enum netfs_io_origin    origin;         /* Origin of the request */
        bool                    direct_bv_unpin; /* T if direct_bv[] must be unpinned */
-       loff_t                  i_size;         /* Size of the file */
-       loff_t                  start;          /* Start position */
+       unsigned long long      i_size;         /* Size of the file */
+       unsigned long long      start;          /* Start position */
        pgoff_t                 no_unlock_folio; /* Don't unlock this folio after read */
        refcount_t              ref;
        unsigned long           flags;
@@ -294,13 +294,15 @@ struct netfs_cache_ops {
 
        /* Expand readahead request */
        void (*expand_readahead)(struct netfs_cache_resources *cres,
-                                loff_t *_start, size_t *_len, loff_t i_size);
+                                unsigned long long *_start,
+                                unsigned long long *_len,
+                                unsigned long long i_size);
 
        /* Prepare a read operation, shortening it to a cached/uncached
         * boundary as appropriate.
         */
        enum netfs_io_source (*prepare_read)(struct netfs_io_subrequest *subreq,
-                                            loff_t i_size);
+                                            unsigned long long i_size);
 
        /* Prepare a write operation, working out what part of the write we can
         * actually do.
index 30769103638f4a169ccac11e6f7f7f193bf2f920..7126d2ea459ca0f349f883f82673f51abaf31b81 100644 (file)
@@ -280,7 +280,7 @@ TRACE_EVENT(netfs_sreq,
                    __entry->start      = sreq->start;
                           ),
 
-           TP_printk("R=%08x[%u] %s %s f=%02x s=%llx %zx/%zx e=%d",
+           TP_printk("R=%08x[%x] %s %s f=%02x s=%llx %zx/%zx e=%d",
                      __entry->rreq, __entry->index,
                      __print_symbolic(__entry->source, netfs_sreq_sources),
                      __print_symbolic(__entry->what, netfs_sreq_traces),
@@ -320,7 +320,7 @@ TRACE_EVENT(netfs_failure,
                    __entry->start      = sreq ? sreq->start : 0;
                           ),
 
-           TP_printk("R=%08x[%d] %s f=%02x s=%llx %zx/%zx %s e=%d",
+           TP_printk("R=%08x[%x] %s f=%02x s=%llx %zx/%zx %s e=%d",
                      __entry->rreq, __entry->index,
                      __print_symbolic(__entry->source, netfs_sreq_sources),
                      __entry->flags,
@@ -436,7 +436,7 @@ TRACE_EVENT(netfs_write,
                    __field(unsigned int,               cookie          )
                    __field(enum netfs_write_trace,     what            )
                    __field(unsigned long long,         start           )
-                   __field(size_t,                     len             )
+                   __field(unsigned long long,         len             )
                             ),
 
            TP_fast_assign(