struct bch_reflink_p {
struct bch_val v;
__le64 idx;
- __le64 v2;
-};
+ /*
+ * A reflink pointer might point to an indirect extent which is then
+ * later split (by copygc or rebalance). If we only pointed to part of
+ * the original indirect extent, and then one of the fragments is
+ * outside the range we point to, we'd leak a refcount: so when creating
+ * reflink pointers, we need to store pad values to remember the full
+ * range we were taking a reference on.
+ */
+ __le32 front_pad;
+ __le32 back_pad;
+} __attribute__((packed, aligned(8)));
struct bch_reflink_v {
struct bch_val v;
__le64 refcount;
union bch_extent_entry start[0];
__u64 _data[0];
-};
+} __attribute__((packed, aligned(8)));
struct bch_indirect_inline_data {
struct bch_val v;
struct bkey_s_c_reflink_p p = bkey_s_c_to_reflink_p(k);
struct reflink_gc *ref;
size_t l, r, m;
- u64 idx = le64_to_cpu(p.v->idx);
- unsigned sectors = p.k->size;
+ u64 idx = le64_to_cpu(p.v->idx) - le32_to_cpu(p.v->front_pad);
+ u64 sectors = (u64) le32_to_cpu(p.v->front_pad) +
+ le32_to_cpu(p.v->back_pad) +
+ p.k->size;
s64 ret = 0;
BUG_ON((flags & (BTREE_TRIGGER_INSERT|BTREE_TRIGGER_OVERWRITE)) ==
bch2_fs_inconsistent(c,
"%llu:%llu len %u points to nonexistent indirect extent %llu",
p.k->p.inode, p.k->p.offset, p.k->size, idx);
- bch2_inconsistent_error(c);
ret = -EIO;
goto err;
}
- BUG_ON(!*refcount && (flags & BTREE_TRIGGER_OVERWRITE));
+ if (!*refcount && (flags & BTREE_TRIGGER_OVERWRITE)) {
+ bch2_fs_inconsistent(c,
+ "%llu:%llu len %u idx %llu indirect extent refcount underflow",
+ p.k->p.inode, p.k->p.offset, p.k->size, idx);
+ ret = -EIO;
+ goto err;
+ }
+
+ if (flags & BTREE_TRIGGER_INSERT) {
+ struct bch_reflink_p *v = (struct bch_reflink_p *) p.v;
+ u64 pad;
+
+ pad = max_t(s64, le32_to_cpu(v->front_pad),
+ le64_to_cpu(v->idx) - bkey_start_offset(k.k));
+ BUG_ON(pad > U32_MAX);
+ v->front_pad = cpu_to_le32(pad);
+
+ pad = max_t(s64, le32_to_cpu(v->back_pad),
+ k.k->p.offset - p.k->size - le64_to_cpu(v->idx));
+ BUG_ON(pad > U32_MAX);
+ v->back_pad = cpu_to_le32(pad);
+ }
+
le64_add_cpu(refcount, add);
if (!*refcount) {
struct bkey_s_c k, unsigned flags)
{
struct bkey_s_c_reflink_p p = bkey_s_c_to_reflink_p(k);
- u64 idx = le64_to_cpu(p.v->idx);
- unsigned sectors = p.k->size;
+ u64 idx, sectors;
s64 ret = 0;
+ if (flags & BTREE_TRIGGER_INSERT) {
+ struct bch_reflink_p *v = (struct bch_reflink_p *) p.v;
+
+ v->front_pad = v->back_pad = 0;
+ }
+
+ idx = le64_to_cpu(p.v->idx) - le32_to_cpu(p.v->front_pad);
+ sectors = (u64) le32_to_cpu(p.v->front_pad) +
+ le32_to_cpu(p.v->back_pad) +
+ p.k->size;
+
while (sectors) {
ret = __bch2_trans_mark_reflink_p(trans, p, idx, flags);
if (ret < 0)