btrfs: add a cached_state to try_lock_extent
authorJosef Bacik <josef@toxicpanda.com>
Fri, 30 Sep 2022 20:45:09 +0000 (16:45 -0400)
committerDavid Sterba <dsterba@suse.com>
Mon, 5 Dec 2022 17:00:35 +0000 (18:00 +0100)
With nowait becoming more pervasive throughout our codebase go ahead and
add a cached_state to try_lock_extent().  This allows us to be faster
about clearing the locked area if we have contention, and then gives us
the same optimization for unlock if we are able to lock the range.

Reviewed-by: Filipe Manana <fdmanana@suse.com>
Signed-off-by: Josef Bacik <josef@toxicpanda.com>
Reviewed-by: David Sterba <dsterba@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
fs/btrfs/extent-io-tree.c
fs/btrfs/extent-io-tree.h
fs/btrfs/extent_io.c
fs/btrfs/file.c
fs/btrfs/inode.c
fs/btrfs/ordered-data.c
fs/btrfs/relocation.c

index 83cb0378096f209ae6b0c05896359575103eb4b3..1b0a45b51f4c1065ed354b06fc3248d4589481d4 100644 (file)
@@ -1615,17 +1615,18 @@ int clear_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
                                  changeset);
 }
 
-int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end)
+int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end,
+                   struct extent_state **cached)
 {
        int err;
        u64 failed_start;
 
        err = __set_extent_bit(tree, start, end, EXTENT_LOCKED, &failed_start,
-                              NULL, NULL, GFP_NOFS);
+                              cached, NULL, GFP_NOFS);
        if (err == -EEXIST) {
                if (failed_start > start)
                        clear_extent_bit(tree, start, failed_start - 1,
-                                        EXTENT_LOCKED, NULL);
+                                        EXTENT_LOCKED, cached);
                return 0;
        }
        return 1;
index a855f40dd61d49804cd2f13f174c66a32e02a2f8..786be8f38f0bdf85609d5ffcb43974f04fed3b2a 100644 (file)
@@ -106,7 +106,8 @@ void extent_io_tree_release(struct extent_io_tree *tree);
 int lock_extent(struct extent_io_tree *tree, u64 start, u64 end,
                struct extent_state **cached);
 
-int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end);
+int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end,
+                   struct extent_state **cached);
 
 int __init extent_state_init_cachep(void);
 void __cold extent_state_free_cachep(void);
index 4dcf22e051ff88665573bd957c841f195b166f41..c7e94a0e60d549f6bd2a035cb5a58784ea4be4b3 100644 (file)
@@ -4959,7 +4959,8 @@ static int read_extent_buffer_subpage(struct extent_buffer *eb, int wait,
        io_tree = &BTRFS_I(fs_info->btree_inode)->io_tree;
 
        if (wait == WAIT_NONE) {
-               if (!try_lock_extent(io_tree, eb->start, eb->start + eb->len - 1))
+               if (!try_lock_extent(io_tree, eb->start, eb->start + eb->len - 1,
+                                    NULL))
                        return -EAGAIN;
        } else {
                ret = lock_extent(io_tree, eb->start, eb->start + eb->len - 1, NULL);
index d01631d478067e788264a6bc3dcc8a3e94302748..98107466572b34ac2374e4041ed505bfe3a819de 100644 (file)
@@ -1302,7 +1302,8 @@ lock_and_cleanup_extent_if_need(struct btrfs_inode *inode, struct page **pages,
                struct btrfs_ordered_extent *ordered;
 
                if (nowait) {
-                       if (!try_lock_extent(&inode->io_tree, start_pos, last_pos)) {
+                       if (!try_lock_extent(&inode->io_tree, start_pos, last_pos,
+                                            cached_state)) {
                                for (i = 0; i < num_pages; i++) {
                                        unlock_page(pages[i]);
                                        put_page(pages[i]);
index 0e516aefbf51b8c31ed7e3fa6c56a8ac179c41eb..2ba2d8b9cefc501412a94c594da700f9a0f240ab 100644 (file)
@@ -7255,7 +7255,8 @@ static int lock_extent_direct(struct inode *inode, u64 lockstart, u64 lockend,
 
        while (1) {
                if (nowait) {
-                       if (!try_lock_extent(io_tree, lockstart, lockend))
+                       if (!try_lock_extent(io_tree, lockstart, lockend,
+                                            cached_state))
                                return -EAGAIN;
                } else {
                        lock_extent(io_tree, lockstart, lockend, cached_state);
index e54f8280031fa14e29ae276e73b0eaf5b431cd3b..b648c9d4ea0fcae2a2ec42a0e7798c01e5a5ffc7 100644 (file)
@@ -1073,7 +1073,7 @@ bool btrfs_try_lock_ordered_range(struct btrfs_inode *inode, u64 start, u64 end)
 {
        struct btrfs_ordered_extent *ordered;
 
-       if (!try_lock_extent(&inode->io_tree, start, end))
+       if (!try_lock_extent(&inode->io_tree, start, end, NULL))
                return false;
 
        ordered = btrfs_lookup_ordered_range(inode, start, end - start + 1);
index 666a37a0ee89786ed45ec596f252d79e78d86cc5..e81a21082e58f3d2990df4cec576aa54fcb11d12 100644 (file)
@@ -1120,7 +1120,7 @@ int replace_file_extents(struct btrfs_trans_handle *trans,
                                WARN_ON(!IS_ALIGNED(end, fs_info->sectorsize));
                                end--;
                                ret = try_lock_extent(&BTRFS_I(inode)->io_tree,
-                                                     key.offset, end);
+                                                     key.offset, end, NULL);
                                if (!ret)
                                        continue;