btrfs: cleanup btrfs_discard_update_discardable usage
authorJosef Bacik <josef@toxicpanda.com>
Fri, 23 Oct 2020 13:58:07 +0000 (09:58 -0400)
committerDavid Sterba <dsterba@suse.com>
Tue, 8 Dec 2020 14:54:02 +0000 (15:54 +0100)
This passes in the block_group and the free_space_ctl, but we can get
this from the block group itself.  Part of this is because we call it
from __load_free_space_cache, which can be called for the inode cache as
well.

Move that call into the block group specific load section, wrap it in
the right lock that we need for the assertion (but otherwise this is
safe without the lock because this happens in single-thread context).

Fix up the arguments to only take the block group.  Add a lockdep_assert
as well for good measure to make sure we don't mess up the locking
again.

Reviewed-by: Filipe Manana <fdmanana@suse.com>
Signed-off-by: Josef Bacik <josef@toxicpanda.com>
Signed-off-by: David Sterba <dsterba@suse.com>
fs/btrfs/discard.c
fs/btrfs/discard.h
fs/btrfs/free-space-cache.c

index 741c7e19c32f2cb3116f9507ed3cc307b1fb5af3..5a88b584276f3e5f97bc0ef9cc41f22c7ea9a11b 100644 (file)
@@ -563,15 +563,14 @@ void btrfs_discard_calc_delay(struct btrfs_discard_ctl *discard_ctl)
 /**
  * btrfs_discard_update_discardable - propagate discard counters
  * @block_group: block_group of interest
- * @ctl: free_space_ctl of @block_group
  *
  * This propagates deltas of counters up to the discard_ctl.  It maintains a
  * current counter and a previous counter passing the delta up to the global
  * stat.  Then the current counter value becomes the previous counter value.
  */
-void btrfs_discard_update_discardable(struct btrfs_block_group *block_group,
-                                     struct btrfs_free_space_ctl *ctl)
+void btrfs_discard_update_discardable(struct btrfs_block_group *block_group)
 {
+       struct btrfs_free_space_ctl *ctl;
        struct btrfs_discard_ctl *discard_ctl;
        s32 extents_delta;
        s64 bytes_delta;
@@ -581,8 +580,10 @@ void btrfs_discard_update_discardable(struct btrfs_block_group *block_group,
            !btrfs_is_block_group_data_only(block_group))
                return;
 
+       ctl = block_group->free_space_ctl;
        discard_ctl = &block_group->fs_info->discard_ctl;
 
+       lockdep_assert_held(&ctl->tree_lock);
        extents_delta = ctl->discardable_extents[BTRFS_STAT_CURR] -
                        ctl->discardable_extents[BTRFS_STAT_PREV];
        if (extents_delta) {
index 353228d62f5a1232bea3363d3f702aa9b4ee842b..57b9202f427f869a42b108f47dfeb08f26b79472 100644 (file)
@@ -28,8 +28,7 @@ bool btrfs_run_discard_work(struct btrfs_discard_ctl *discard_ctl);
 
 /* Update operations */
 void btrfs_discard_calc_delay(struct btrfs_discard_ctl *discard_ctl);
-void btrfs_discard_update_discardable(struct btrfs_block_group *block_group,
-                                     struct btrfs_free_space_ctl *ctl);
+void btrfs_discard_update_discardable(struct btrfs_block_group *block_group);
 
 /* Setup/cleanup operations */
 void btrfs_discard_punt_unused_bgs_list(struct btrfs_fs_info *fs_info);
index 5ea36a06e5147b65f0f77ca81e22aec90e109352..0787339c7b93d1ee5a5d47c5bf02913e55fd9c8a 100644 (file)
@@ -828,7 +828,6 @@ static int __load_free_space_cache(struct btrfs_root *root, struct inode *inode,
        merge_space_tree(ctl);
        ret = 1;
 out:
-       btrfs_discard_update_discardable(ctl->private, ctl);
        io_ctl_free(&io_ctl);
        return ret;
 free_cache:
@@ -929,6 +928,9 @@ out:
                           block_group->start);
        }
 
+       spin_lock(&ctl->tree_lock);
+       btrfs_discard_update_discardable(block_group);
+       spin_unlock(&ctl->tree_lock);
        iput(inode);
        return ret;
 }
@@ -2508,7 +2510,7 @@ link:
        if (ret)
                kmem_cache_free(btrfs_free_space_cachep, info);
 out:
-       btrfs_discard_update_discardable(block_group, ctl);
+       btrfs_discard_update_discardable(block_group);
        spin_unlock(&ctl->tree_lock);
 
        if (ret) {
@@ -2643,7 +2645,7 @@ again:
                goto again;
        }
 out_lock:
-       btrfs_discard_update_discardable(block_group, ctl);
+       btrfs_discard_update_discardable(block_group);
        spin_unlock(&ctl->tree_lock);
 out:
        return ret;
@@ -2779,7 +2781,7 @@ void __btrfs_remove_free_space_cache(struct btrfs_free_space_ctl *ctl)
        spin_lock(&ctl->tree_lock);
        __btrfs_remove_free_space_cache_locked(ctl);
        if (ctl->private)
-               btrfs_discard_update_discardable(ctl->private, ctl);
+               btrfs_discard_update_discardable(ctl->private);
        spin_unlock(&ctl->tree_lock);
 }
 
@@ -2801,7 +2803,7 @@ void btrfs_remove_free_space_cache(struct btrfs_block_group *block_group)
                cond_resched_lock(&ctl->tree_lock);
        }
        __btrfs_remove_free_space_cache_locked(ctl);
-       btrfs_discard_update_discardable(block_group, ctl);
+       btrfs_discard_update_discardable(block_group);
        spin_unlock(&ctl->tree_lock);
 
 }
@@ -2885,7 +2887,7 @@ u64 btrfs_find_space_for_alloc(struct btrfs_block_group *block_group,
                        link_free_space(ctl, entry);
        }
 out:
-       btrfs_discard_update_discardable(block_group, ctl);
+       btrfs_discard_update_discardable(block_group);
        spin_unlock(&ctl->tree_lock);
 
        if (align_gap_len)