{
struct fs_usage_sum sum = __fs_usage_sum(*stats);
s64 added = sum.data + sum.reserved;
+ s64 should_not_have_added;
/*
* Not allowed to reduce sectors_available except by getting a
* reservation:
*/
- BUG_ON(added > (s64) (disk_res ? disk_res->sectors : 0));
+ should_not_have_added = added - (s64) (disk_res ? disk_res->sectors : 0);
+ if (WARN_ON(should_not_have_added > 0)) {
+ atomic64_sub(should_not_have_added, &c->sectors_available);
+ added -= should_not_have_added;
+ }
if (added > 0) {
disk_res->sectors -= added;
stats->replicas
[!p.ptr.cached && replicas ? replicas - 1 : 0].data
[!p.ptr.cached ? data_type : BCH_DATA_CACHED] +=
- sectors;
+ disk_sectors;
bch2_mark_pointer(c, e, p, disk_sectors, data_type,
stats, journal_seq, flags);
if (!p.ptr.cached &&
p.crc.compression_type != BCH_COMPRESSION_NONE &&
p.crc.compressed_size < p.crc.live_size)
- ret = max_t(unsigned, ret, p.crc.compressed_size);
+ ret += p.crc.compressed_size;
}
}
switch (bch2_disk_reservation_add(trans->c,
trans->disk_res,
- sectors * bch2_extent_nr_dirty_ptrs(k),
- flags)) {
+ sectors, flags)) {
case 0:
break;
case -ENOSPC:
#include "btree_gc.h"
#include "btree_update.h"
#include "buckets.h"
+#include "disk_groups.h"
#include "inode.h"
#include "io.h"
#include "journal_reclaim.h"
}
break;
}
- case DATA_REWRITE:
+ case DATA_REWRITE: {
+ const union bch_extent_entry *entry;
+ struct extent_ptr_decoded p;
+ unsigned compressed_sectors = 0;
+
+ extent_for_each_ptr_decode(bkey_s_c_to_extent(k), p, entry)
+ if (!p.ptr.cached &&
+ p.crc.compression_type != BCH_COMPRESSION_NONE &&
+ bch2_dev_in_target(c, p.ptr.dev, data_opts.target))
+ compressed_sectors += p.crc.compressed_size;
+
+ if (compressed_sectors) {
+ ret = bch2_disk_reservation_add(c, &m->op.res,
+ compressed_sectors,
+ BCH_DISK_RESERVATION_NOFAIL);
+ if (ret)
+ return ret;
+ }
break;
+ }
case DATA_PROMOTE:
m->op.flags |= BCH_WRITE_ALLOC_NOWAIT;
m->op.flags |= BCH_WRITE_CACHED;