btrfs: zoned: calculate free space from zone capacity
authorNaohiro Aota <naohiro.aota@wdc.com>
Thu, 19 Aug 2021 12:19:10 +0000 (21:19 +0900)
committerDavid Sterba <dsterba@suse.com>
Tue, 26 Oct 2021 17:07:58 +0000 (19:07 +0200)
Now that we introduced capacity in a block group, we need to calculate free
space using the capacity instead of the length. Thus, bytes we account
capacity - alloc_pointer as free, and account bytes [capacity, length] as
zone unusable.

Reviewed-by: Johannes Thumshirn <johannes.thumshirn@wdc.com>
Signed-off-by: Naohiro Aota <naohiro.aota@wdc.com>
Signed-off-by: David Sterba <dsterba@suse.com>
fs/btrfs/block-group.c
fs/btrfs/extent-tree.c
fs/btrfs/free-space-cache.c
fs/btrfs/zoned.c

index 4f43825ac75453c629353154b4f6f64227cdf487..e9295e3c2cb3a92ed7adbbac5966d54b6ed85ed2 100644 (file)
@@ -2484,7 +2484,8 @@ struct btrfs_block_group *btrfs_make_block_group(struct btrfs_trans_handle *tran
         */
        trace_btrfs_add_block_group(fs_info, cache, 1);
        btrfs_update_space_info(fs_info, cache->flags, size, bytes_used,
-                               cache->bytes_super, 0, &cache->space_info);
+                               cache->bytes_super, cache->zone_unusable,
+                               &cache->space_info);
        btrfs_update_global_block_rsv(fs_info);
 
        link_block_group(cache);
@@ -2599,7 +2600,9 @@ void btrfs_dec_block_group_ro(struct btrfs_block_group *cache)
        if (!--cache->ro) {
                if (btrfs_is_zoned(cache->fs_info)) {
                        /* Migrate zone_unusable bytes back */
-                       cache->zone_unusable = cache->alloc_offset - cache->used;
+                       cache->zone_unusable =
+                               (cache->alloc_offset - cache->used) +
+                               (cache->length - cache->zone_capacity);
                        sinfo->bytes_zone_unusable += cache->zone_unusable;
                        sinfo->bytes_readonly -= cache->zone_unusable;
                }
index 0ab456cb4bf801fc9f6f89623100ea182964b8f4..165acee66b072f433be12293bb3d2d2ea4289e9b 100644 (file)
@@ -3796,7 +3796,8 @@ static int do_allocation_zoned(struct btrfs_block_group *block_group,
                goto out;
        }
 
-       avail = block_group->length - block_group->alloc_offset;
+       WARN_ON_ONCE(block_group->alloc_offset > block_group->zone_capacity);
+       avail = block_group->zone_capacity - block_group->alloc_offset;
        if (avail < num_bytes) {
                if (ffe_ctl->max_extent_size < avail) {
                        /*
index da0eee7c9e5f38ba00dc7d3290302a6d83bf051d..b76b608b081f1aa4c57f0f669da06c01919f769e 100644 (file)
@@ -2539,10 +2539,15 @@ static int __btrfs_add_free_space_zoned(struct btrfs_block_group *block_group,
        u64 offset = bytenr - block_group->start;
        u64 to_free, to_unusable;
        const int bg_reclaim_threshold = READ_ONCE(fs_info->bg_reclaim_threshold);
+       bool initial = (size == block_group->length);
+
+       WARN_ON(!initial && offset + size > block_group->zone_capacity);
 
        spin_lock(&ctl->tree_lock);
        if (!used)
                to_free = size;
+       else if (initial)
+               to_free = block_group->zone_capacity;
        else if (offset >= block_group->alloc_offset)
                to_free = size;
        else if (offset + size <= block_group->alloc_offset)
@@ -2755,7 +2760,7 @@ void btrfs_dump_free_space(struct btrfs_block_group *block_group,
         */
        if (btrfs_is_zoned(fs_info)) {
                btrfs_info(fs_info, "free space %llu",
-                          block_group->length - block_group->alloc_offset);
+                          block_group->zone_capacity - block_group->alloc_offset);
                return;
        }
 
index dd545af225e86e3ccf71b9ada40187de725303be..a92d6e52321d588fa863e153db9306a354421e9d 100644 (file)
@@ -1265,8 +1265,9 @@ void btrfs_calc_zone_unusable(struct btrfs_block_group *cache)
                return;
 
        WARN_ON(cache->bytes_super != 0);
-       unusable = cache->alloc_offset - cache->used;
-       free = cache->length - cache->alloc_offset;
+       unusable = (cache->alloc_offset - cache->used) +
+                  (cache->length - cache->zone_capacity);
+       free = cache->zone_capacity - cache->alloc_offset;
 
        /* We only need ->free_space in ALLOC_SEQ block groups */
        cache->last_byte_to_unpin = (u64)-1;