< block_group->zone_unusable);
                WARN_ON(block_group->space_info->disk_total
                        < block_group->length * factor);
+               WARN_ON(block_group->zone_is_active &&
+                       block_group->space_info->active_total_bytes
+                       < block_group->length);
        }
        block_group->space_info->total_bytes -= block_group->length;
+       if (block_group->zone_is_active)
+               block_group->space_info->active_total_bytes -= block_group->length;
        block_group->space_info->bytes_readonly -=
                (block_group->length - block_group->zone_unusable);
        block_group->space_info->bytes_zone_unusable -=
        trace_btrfs_add_block_group(info, cache, 0);
        btrfs_update_space_info(info, cache->flags, cache->length,
                                cache->used, cache->bytes_super,
-                               cache->zone_unusable, &space_info);
+                               cache->zone_unusable, cache->zone_is_active,
+                               &space_info);
 
        cache->space_info = space_info;
 
                }
 
                btrfs_update_space_info(fs_info, bg->flags, em->len, em->len,
-                                       0, 0, &space_info);
+                                       0, 0, false, &space_info);
                bg->space_info = space_info;
                link_block_group(bg);
 
        trace_btrfs_add_block_group(fs_info, cache, 1);
        btrfs_update_space_info(fs_info, cache->flags, size, bytes_used,
                                cache->bytes_super, cache->zone_unusable,
-                               &cache->space_info);
+                               cache->zone_is_active, &cache->space_info);
        btrfs_update_global_block_rsv(fs_info);
 
        link_block_group(cache);
 
 void btrfs_update_space_info(struct btrfs_fs_info *info, u64 flags,
                             u64 total_bytes, u64 bytes_used,
                             u64 bytes_readonly, u64 bytes_zone_unusable,
-                            struct btrfs_space_info **space_info)
+                            bool active, struct btrfs_space_info **space_info)
 {
        struct btrfs_space_info *found;
        int factor;
        ASSERT(found);
        spin_lock(&found->lock);
        found->total_bytes += total_bytes;
+       if (active)
+               found->active_total_bytes += total_bytes;
        found->disk_total += total_bytes * factor;
        found->bytes_used += bytes_used;
        found->disk_used += bytes_used * factor;
        return avail;
 }
 
+static inline u64 writable_total_bytes(struct btrfs_fs_info *fs_info,
+                                      struct btrfs_space_info *space_info)
+{
+       /*
+        * On regular filesystem, all total_bytes are always writable. On zoned
+        * filesystem, there may be a limitation imposed by max_active_zones.
+        * For metadata allocation, we cannot finish an existing active block
+        * group to avoid a deadlock. Thus, we need to consider only the active
+        * groups to be writable for metadata space.
+        */
+       if (!btrfs_is_zoned(fs_info) || (space_info->flags & BTRFS_BLOCK_GROUP_DATA))
+               return space_info->total_bytes;
+
+       return space_info->active_total_bytes;
+}
+
 int btrfs_can_overcommit(struct btrfs_fs_info *fs_info,
                         struct btrfs_space_info *space_info, u64 bytes,
                         enum btrfs_reserve_flush_enum flush)
        used = btrfs_space_info_used(space_info, true);
        avail = calc_available_free_space(fs_info, space_info, flush);
 
-       if (used + bytes < space_info->total_bytes + avail)
+       if (used + bytes < writable_total_bytes(fs_info, space_info) + avail)
                return 1;
        return 0;
 }
                ticket = list_first_entry(head, struct reserve_ticket, list);
 
                /* Check and see if our ticket can be satisfied now. */
-               if ((used + ticket->bytes <= space_info->total_bytes) ||
+               if ((used + ticket->bytes <= writable_total_bytes(fs_info, space_info)) ||
                    btrfs_can_overcommit(fs_info, space_info, ticket->bytes,
                                         flush)) {
                        btrfs_space_info_update_bytes_may_use(fs_info,
 {
        u64 used;
        u64 avail;
+       u64 total;
        u64 to_reclaim = space_info->reclaim_size;
 
        lockdep_assert_held(&space_info->lock);
         * space.  If that's the case add in our overage so we make sure to put
         * appropriate pressure on the flushing state machine.
         */
-       if (space_info->total_bytes + avail < used)
-               to_reclaim += used - (space_info->total_bytes + avail);
+       total = writable_total_bytes(fs_info, space_info);
+       if (total + avail < used)
+               to_reclaim += used - (total + avail);
 
        return to_reclaim;
 }
 {
        u64 global_rsv_size = fs_info->global_block_rsv.reserved;
        u64 ordered, delalloc;
-       u64 thresh = div_factor_fine(space_info->total_bytes, 90);
+       u64 total = writable_total_bytes(fs_info, space_info);
+       u64 thresh;
        u64 used;
 
+       thresh = div_factor_fine(total, 90);
+
        lockdep_assert_held(&space_info->lock);
 
        /* If we're just plain full then async reclaim just slows us down. */
                                           BTRFS_RESERVE_FLUSH_ALL);
        used = space_info->bytes_used + space_info->bytes_reserved +
               space_info->bytes_readonly + global_rsv_size;
-       if (used < space_info->total_bytes)
-               thresh += space_info->total_bytes - used;
+       if (used < total)
+               thresh += total - used;
        thresh >>= space_info->clamp;
 
        used = space_info->bytes_pinned;
         * can_overcommit() to ensure we can overcommit to continue.
         */
        if (!pending_tickets &&
-           ((used + orig_bytes <= space_info->total_bytes) ||
+           ((used + orig_bytes <= writable_total_bytes(fs_info, space_info)) ||
             btrfs_can_overcommit(fs_info, space_info, orig_bytes, flush))) {
                btrfs_space_info_update_bytes_may_use(fs_info, space_info,
                                                      orig_bytes);
 
        u64 bytes_may_use;      /* number of bytes that may be used for
                                   delalloc/allocations */
        u64 bytes_readonly;     /* total bytes that are read only */
+       /* Total bytes in the space, but only accounts active block groups. */
+       u64 active_total_bytes;
        u64 bytes_zone_unusable;        /* total bytes that are unusable until
                                           resetting the device zone */
 
 void btrfs_update_space_info(struct btrfs_fs_info *info, u64 flags,
                             u64 total_bytes, u64 bytes_used,
                             u64 bytes_readonly, u64 bytes_zone_unusable,
-                            struct btrfs_space_info **space_info);
+                            bool active, struct btrfs_space_info **space_info);
 void btrfs_update_space_info_chunk_size(struct btrfs_space_info *space_info,
                                        u64 chunk_size);
 struct btrfs_space_info *btrfs_find_space_info(struct btrfs_fs_info *info,
 
 bool btrfs_zone_activate(struct btrfs_block_group *block_group)
 {
        struct btrfs_fs_info *fs_info = block_group->fs_info;
+       struct btrfs_space_info *space_info = block_group->space_info;
        struct map_lookup *map;
        struct btrfs_device *device;
        u64 physical;
 
        map = block_group->physical_map;
 
+       spin_lock(&space_info->lock);
        spin_lock(&block_group->lock);
        if (block_group->zone_is_active) {
                ret = true;
 
        /* Successfully activated all the zones */
        block_group->zone_is_active = 1;
+       space_info->active_total_bytes += block_group->length;
        spin_unlock(&block_group->lock);
+       btrfs_try_granting_tickets(fs_info, space_info);
+       spin_unlock(&space_info->lock);
 
        /* For the active block group list */
        btrfs_get_block_group(block_group);
 
 out_unlock:
        spin_unlock(&block_group->lock);
+       spin_unlock(&space_info->lock);
        return ret;
 }