return num_csums;
 }
 
-int btrfs_check_space_for_delayed_refs(struct btrfs_trans_handle *trans)
+bool btrfs_check_space_for_delayed_refs(struct btrfs_fs_info *fs_info)
 {
-       struct btrfs_fs_info *fs_info = trans->fs_info;
-       struct btrfs_block_rsv *global_rsv;
-       u64 num_heads = trans->transaction->delayed_refs.num_heads_ready;
-       u64 csum_bytes = trans->transaction->delayed_refs.pending_csums;
-       unsigned int num_dirty_bgs = trans->transaction->num_dirty_bgs;
-       u64 num_bytes, num_dirty_bgs_bytes;
-       int ret = 0;
+       struct btrfs_block_rsv *delayed_refs_rsv = &fs_info->delayed_refs_rsv;
+       struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
+       bool ret = false;
+       u64 reserved;
 
-       num_bytes = btrfs_calc_trans_metadata_size(fs_info, 1);
-       num_heads = heads_to_leaves(fs_info, num_heads);
-       if (num_heads > 1)
-               num_bytes += (num_heads - 1) * fs_info->nodesize;
-       num_bytes <<= 1;
-       num_bytes += btrfs_csum_bytes_to_leaves(fs_info, csum_bytes) *
-                                                       fs_info->nodesize;
-       num_dirty_bgs_bytes = btrfs_calc_trans_metadata_size(fs_info,
-                                                            num_dirty_bgs);
-       global_rsv = &fs_info->global_block_rsv;
+       spin_lock(&global_rsv->lock);
+       reserved = global_rsv->reserved;
+       spin_unlock(&global_rsv->lock);
 
        /*
-        * If we can't allocate any more chunks lets make sure we have _lots_ of
-        * wiggle room since running delayed refs can create more delayed refs.
+        * Since the global reserve is just kind of magic we don't really want
+        * to rely on it to save our bacon, so if our size is more than the
+        * delayed_refs_rsv and the global rsv then it's time to think about
+        * bailing.
         */
-       if (global_rsv->space_info->full) {
-               num_dirty_bgs_bytes <<= 1;
-               num_bytes <<= 1;
-       }
-
-       spin_lock(&global_rsv->lock);
-       if (global_rsv->reserved <= num_bytes + num_dirty_bgs_bytes)
-               ret = 1;
-       spin_unlock(&global_rsv->lock);
+       spin_lock(&delayed_refs_rsv->lock);
+       reserved += delayed_refs_rsv->reserved;
+       if (delayed_refs_rsv->size >= reserved)
+               ret = true;
+       spin_unlock(&delayed_refs_rsv->lock);
        return ret;
 }
 
        if (val >= NSEC_PER_SEC / 2)
                return 2;
 
-       return btrfs_check_space_for_delayed_refs(trans);
+       return btrfs_check_space_for_delayed_refs(trans->fs_info);
 }
 
 struct async_delayed_refs {
 
                 * Try to steal from the global reserve if there is space for
                 * it.
                 */
-               if (!btrfs_check_space_for_delayed_refs(trans) &&
-                   !btrfs_block_rsv_migrate(global_rsv, rsv, rsv->size, false))
+               if (!btrfs_check_space_for_delayed_refs(fs_info) &&
+                   !btrfs_block_rsv_migrate(global_rsv, rsv, rsv->size, 0))
                        return trans;
 
                /* If not, commit and try again. */