xfs: xfs_buf_item_size_segment() needs to pass segment offset
authorDave Chinner <dchinner@redhat.com>
Mon, 22 Mar 2021 16:52:04 +0000 (09:52 -0700)
committerDarrick J. Wong <djwong@kernel.org>
Thu, 25 Mar 2021 23:47:51 +0000 (16:47 -0700)
Otherwise it doesn't correctly calculate the number of vectors
in a logged buffer that has a contiguous map that gets split into
multiple regions because the range spans discontigous memory.

Probably never been hit in practice - we don't log contiguous ranges
on unmapped buffers (inode clusters).

Signed-off-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: Darrick J. Wong <djwong@kernel.org>
Reviewed-by: Brian Foster <bfoster@redhat.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Darrick J. Wong <djwong@kernel.org>
fs/xfs/xfs_buf_item.c

index cb8fd8afd1409c27853b5b34843a50009257bda2..189a4534e0b2e102df1830c999795c9351c40eb5 100644 (file)
@@ -55,6 +55,18 @@ xfs_buf_log_format_size(
                        (blfp->blf_map_size * sizeof(blfp->blf_data_map[0]));
 }
 
+static inline bool
+xfs_buf_item_straddle(
+       struct xfs_buf          *bp,
+       uint                    offset,
+       int                     next_bit,
+       int                     last_bit)
+{
+       return xfs_buf_offset(bp, offset + (next_bit << XFS_BLF_SHIFT)) !=
+               (xfs_buf_offset(bp, offset + (last_bit << XFS_BLF_SHIFT)) +
+                XFS_BLF_CHUNK);
+}
+
 /*
  * This returns the number of log iovecs needed to log the
  * given buf log item.
@@ -69,6 +81,7 @@ STATIC void
 xfs_buf_item_size_segment(
        struct xfs_buf_log_item         *bip,
        struct xfs_buf_log_format       *blfp,
+       uint                            offset,
        int                             *nvecs,
        int                             *nbytes)
 {
@@ -103,12 +116,8 @@ xfs_buf_item_size_segment(
                 */
                if (next_bit == -1) {
                        break;
-               } else if (next_bit != last_bit + 1) {
-                       last_bit = next_bit;
-                       (*nvecs)++;
-               } else if (xfs_buf_offset(bp, next_bit * XFS_BLF_CHUNK) !=
-                          (xfs_buf_offset(bp, last_bit * XFS_BLF_CHUNK) +
-                           XFS_BLF_CHUNK)) {
+               } else if (next_bit != last_bit + 1 ||
+                          xfs_buf_item_straddle(bp, offset, next_bit, last_bit)) {
                        last_bit = next_bit;
                        (*nvecs)++;
                } else {
@@ -142,8 +151,10 @@ xfs_buf_item_size(
        int                     *nbytes)
 {
        struct xfs_buf_log_item *bip = BUF_ITEM(lip);
+       struct xfs_buf          *bp = bip->bli_buf;
        int                     i;
        int                     bytes;
+       uint                    offset = 0;
 
        ASSERT(atomic_read(&bip->bli_refcount) > 0);
        if (bip->bli_flags & XFS_BLI_STALE) {
@@ -185,8 +196,9 @@ xfs_buf_item_size(
         */
        bytes = 0;
        for (i = 0; i < bip->bli_format_count; i++) {
-               xfs_buf_item_size_segment(bip, &bip->bli_formats[i],
+               xfs_buf_item_size_segment(bip, &bip->bli_formats[i], offset,
                                          nvecs, &bytes);
+               offset += BBTOB(bp->b_maps[i].bm_len);
        }
 
        /*
@@ -213,18 +225,6 @@ xfs_buf_item_copy_iovec(
                        nbits * XFS_BLF_CHUNK);
 }
 
-static inline bool
-xfs_buf_item_straddle(
-       struct xfs_buf          *bp,
-       uint                    offset,
-       int                     next_bit,
-       int                     last_bit)
-{
-       return xfs_buf_offset(bp, offset + (next_bit << XFS_BLF_SHIFT)) !=
-               (xfs_buf_offset(bp, offset + (last_bit << XFS_BLF_SHIFT)) +
-                XFS_BLF_CHUNK);
-}
-
 static void
 xfs_buf_item_format_segment(
        struct xfs_buf_log_item *bip,