percpu_ref_put(&c->writes);
}
-static int invalidate_one_bucket(struct btree_trans *trans, struct bch_dev *ca)
+static int invalidate_one_bucket(struct btree_trans *trans, struct bch_dev *ca,
+ struct bpos *bucket_pos, unsigned *cached_sectors)
{
struct bch_fs *c = trans->c;
struct btree_iter lru_iter, alloc_iter = { NULL };
if (ret)
goto out;
- if (!k.k || k.k->p.inode != ca->dev_idx)
+ if (!k.k || k.k->p.inode != ca->dev_idx) {
+ ret = 1;
goto out;
+ }
if (k.k->type != KEY_TYPE_lru) {
prt_printf(&buf, "non lru key in lru btree:\n ");
idx = k.k->p.offset;
bucket = le64_to_cpu(bkey_s_c_to_lru(k).v->idx);
- a = bch2_trans_start_alloc_update(trans, &alloc_iter,
- POS(ca->dev_idx, bucket));
+ *bucket_pos = POS(ca->dev_idx, bucket);
+
+ a = bch2_trans_start_alloc_update(trans, &alloc_iter, *bucket_pos);
ret = PTR_ERR_OR_ZERO(a);
if (ret)
goto out;
}
}
+ if (!a->v.cached_sectors)
+ bch_err(c, "invalidating empty bucket, confused");
+
+ *cached_sectors = a->v.cached_sectors;
+
SET_BCH_ALLOC_V4_NEED_INC_GEN(&a->v, false);
a->v.gen++;
a->v.data_type = 0;
BTREE_TRIGGER_BUCKET_INVALIDATE);
if (ret)
goto out;
-
- trace_invalidate_bucket(c, a->k.p.inode, a->k.p.offset);
out:
bch2_trans_iter_exit(trans, &alloc_iter);
bch2_trans_iter_exit(trans, &lru_iter);
struct bch_fs *c = container_of(work, struct bch_fs, invalidate_work);
struct bch_dev *ca;
struct btree_trans trans;
- unsigned i;
+ struct bpos bucket;
+ unsigned i, sectors;
int ret = 0;
bch2_trans_init(&trans, c, 0, 0);
ret = __bch2_trans_do(&trans, NULL, NULL,
BTREE_INSERT_USE_RESERVE|
BTREE_INSERT_NOFAIL,
- invalidate_one_bucket(&trans, ca));
+ invalidate_one_bucket(&trans, ca, &bucket,
+ §ors));
if (ret)
break;
+ trace_invalidate_bucket(c, bucket.inode, bucket.offset, sectors);
this_cpu_inc(c->counters[BCH_COUNTER_bucket_invalidate]);
}
}
);
TRACE_EVENT(invalidate_bucket,
- TP_PROTO(struct bch_fs *c, unsigned dev, u64 bucket),
- TP_ARGS(c, dev, bucket),
+ TP_PROTO(struct bch_fs *c, unsigned dev, u64 bucket, u32 sectors),
+ TP_ARGS(c, dev, bucket, sectors),
TP_STRUCT__entry(
__field(dev_t, dev )
__field(u32, dev_idx )
+ __field(u32, sectors )
__field(u64, bucket )
),
TP_fast_assign(
__entry->dev = c->dev;
__entry->dev_idx = dev;
+ __entry->sectors = sectors;
__entry->bucket = bucket;
),
- TP_printk("%d:%d invalidated %u:%llu",
+ TP_printk("%d:%d invalidated %u:%llu cached sectors %u",
MAJOR(__entry->dev), MINOR(__entry->dev),
- __entry->dev_idx, __entry->bucket)
+ __entry->dev_idx, __entry->bucket,
+ __entry->sectors)
);
/* Moving IO */