iov_iter: Turn iov_iter_fault_in_readable into fault_in_iov_iter_readable
authorAndreas Gruenbacher <agruenba@redhat.com>
Thu, 14 Apr 2022 22:28:40 +0000 (06:28 +0800)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Sun, 1 May 2022 15:22:28 +0000 (17:22 +0200)
commit a6294593e8a1290091d0b078d5d33da5e0cd3dfe upstream

Turn iov_iter_fault_in_readable into a function that returns the number
of bytes not faulted in, similar to copy_to_user, instead of returning a
non-zero value when any of the requested pages couldn't be faulted in.
This supports the existing users that require all pages to be faulted in
as well as new users that are happy if any pages can be faulted in.

Rename iov_iter_fault_in_readable to fault_in_iov_iter_readable to make
sure this change doesn't silently break things.

Signed-off-by: Andreas Gruenbacher <agruenba@redhat.com>
Signed-off-by: Anand Jain <anand.jain@oracle.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
fs/btrfs/file.c
fs/f2fs/file.c
fs/fuse/file.c
fs/iomap/buffered-io.c
fs/ntfs/file.c
fs/ntfs3/file.c
include/linux/uio.h
lib/iov_iter.c
mm/filemap.c

index dc1e4d1b72914427e9ae23662c5f1eb5c6a0ca0a..0525dd13f1f980cc4bd9f6e64143b0285c30384d 100644 (file)
@@ -1709,7 +1709,7 @@ static noinline ssize_t btrfs_buffered_write(struct kiocb *iocb,
                 * Fault pages before locking them in prepare_pages
                 * to avoid recursive lock
                 */
-               if (unlikely(iov_iter_fault_in_readable(i, write_bytes))) {
+               if (unlikely(fault_in_iov_iter_readable(i, write_bytes))) {
                        ret = -EFAULT;
                        break;
                }
index 0e14dc41ed4e6cd4a3df4eb881320f8aaa7006f9..8ef92719c6799dc1253709dabb11fca775f2dcc6 100644 (file)
@@ -4279,7 +4279,7 @@ static ssize_t f2fs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
                size_t target_size = 0;
                int err;
 
-               if (iov_iter_fault_in_readable(from, iov_iter_count(from)))
+               if (fault_in_iov_iter_readable(from, iov_iter_count(from)))
                        set_inode_flag(inode, FI_NO_PREALLOC);
 
                if ((iocb->ki_flags & IOCB_NOWAIT)) {
index bc50a9fa84a0c0bfce2974bd6b36baf0fac83d37..71e9e301e569d0d27db6e046d9e714d1787fc98f 100644 (file)
@@ -1164,7 +1164,7 @@ static ssize_t fuse_fill_write_pages(struct fuse_io_args *ia,
 
  again:
                err = -EFAULT;
-               if (iov_iter_fault_in_readable(ii, bytes))
+               if (fault_in_iov_iter_readable(ii, bytes))
                        break;
 
                err = -ENOMEM;
index 97119ec3b8503a05e53d04cccdb01abee56dd149..fe10d8a30f6bdff13e966c2c42497403c06b1269 100644 (file)
@@ -757,7 +757,7 @@ again:
                 * same page as we're writing to, without it being marked
                 * up-to-date.
                 */
-               if (unlikely(iov_iter_fault_in_readable(i, bytes))) {
+               if (unlikely(fault_in_iov_iter_readable(i, bytes))) {
                        status = -EFAULT;
                        break;
                }
index ab4f3362466d068d06c76497b68c70ed5481245f..a43adeacd930c8c7af47381e5c15f1b16385f4ec 100644 (file)
@@ -1829,7 +1829,7 @@ again:
                 * pages being swapped out between us bringing them into memory
                 * and doing the actual copying.
                 */
-               if (unlikely(iov_iter_fault_in_readable(i, bytes))) {
+               if (unlikely(fault_in_iov_iter_readable(i, bytes))) {
                        status = -EFAULT;
                        break;
                }
index 43b1451bff539576cd03c0f624708bf65b959270..54b9599640ef4adc9388e53716de885c899d9787 100644 (file)
@@ -989,7 +989,7 @@ static ssize_t ntfs_compress_write(struct kiocb *iocb, struct iov_iter *from)
                frame_vbo = pos & ~(frame_size - 1);
                index = frame_vbo >> PAGE_SHIFT;
 
-               if (unlikely(iov_iter_fault_in_readable(from, bytes))) {
+               if (unlikely(fault_in_iov_iter_readable(from, bytes))) {
                        err = -EFAULT;
                        goto out;
                }
index 207101a9c5c326958eaae5b4be2208f0b6e2028e..d18458af6681f2bc6211b575e5b4b127068c2746 100644 (file)
@@ -133,7 +133,7 @@ size_t copy_page_from_iter_atomic(struct page *page, unsigned offset,
                                  size_t bytes, struct iov_iter *i);
 void iov_iter_advance(struct iov_iter *i, size_t bytes);
 void iov_iter_revert(struct iov_iter *i, size_t bytes);
-int iov_iter_fault_in_readable(const struct iov_iter *i, size_t bytes);
+size_t fault_in_iov_iter_readable(const struct iov_iter *i, size_t bytes);
 size_t iov_iter_single_seg_count(const struct iov_iter *i);
 size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
                         struct iov_iter *i);
index 2e07a4b083ed95ae51b231fed532188e77b4bbd0..b8de180420c7fb2968a8ec22f1cc4c186a39f075 100644 (file)
@@ -431,33 +431,42 @@ out:
 }
 
 /*
+ * fault_in_iov_iter_readable - fault in iov iterator for reading
+ * @i: iterator
+ * @size: maximum length
+ *
  * Fault in one or more iovecs of the given iov_iter, to a maximum length of
- * bytes.  For each iovec, fault in each page that constitutes the iovec.
+ * @size.  For each iovec, fault in each page that constitutes the iovec.
+ *
+ * Returns the number of bytes not faulted in (like copy_to_user() and
+ * copy_from_user()).
  *
- * Return 0 on success, or non-zero if the memory could not be accessed (i.e.
- * because it is an invalid address).
+ * Always returns 0 for non-userspace iterators.
  */
-int iov_iter_fault_in_readable(const struct iov_iter *i, size_t bytes)
+size_t fault_in_iov_iter_readable(const struct iov_iter *i, size_t size)
 {
        if (iter_is_iovec(i)) {
+               size_t count = min(size, iov_iter_count(i));
                const struct iovec *p;
                size_t skip;
 
-               if (bytes > i->count)
-                       bytes = i->count;
-               for (p = i->iov, skip = i->iov_offset; bytes; p++, skip = 0) {
-                       size_t len = min(bytes, p->iov_len - skip);
+               size -= count;
+               for (p = i->iov, skip = i->iov_offset; count; p++, skip = 0) {
+                       size_t len = min(count, p->iov_len - skip);
+                       size_t ret;
 
                        if (unlikely(!len))
                                continue;
-                       if (fault_in_readable(p->iov_base + skip, len))
-                               return -EFAULT;
-                       bytes -= len;
+                       ret = fault_in_readable(p->iov_base + skip, len);
+                       count -= len - ret;
+                       if (ret)
+                               break;
                }
+               return count + size;
        }
        return 0;
 }
-EXPORT_SYMBOL(iov_iter_fault_in_readable);
+EXPORT_SYMBOL(fault_in_iov_iter_readable);
 
 void iov_iter_init(struct iov_iter *i, unsigned int direction,
                        const struct iovec *iov, unsigned long nr_segs,
index d697b3446a4a5c97b062d7113dbe87408d971f7c..00e391e758801722a905352111be8000240b442d 100644 (file)
@@ -3760,7 +3760,7 @@ again:
                 * same page as we're writing to, without it being marked
                 * up-to-date.
                 */
-               if (unlikely(iov_iter_fault_in_readable(i, bytes))) {
+               if (unlikely(fault_in_iov_iter_readable(i, bytes))) {
                        status = -EFAULT;
                        break;
                }