return ret;
}
+static void trace_move_extent_finish2(struct bch_fs *c, struct bkey_s_c k)
+{
+ if (trace_move_extent_finish_enabled()) {
+ struct printbuf buf = PRINTBUF;
+
+ bch2_bkey_val_to_text(&buf, c, k);
+ trace_move_extent_finish(c, buf.buf);
+ printbuf_exit(&buf);
+ }
+}
+
static void trace_move_extent_fail2(struct data_update *m,
struct bkey_s_c new,
struct bkey_s_c wrote,
bch2_btree_iter_set_pos(&iter, next_pos);
this_cpu_add(c->counters[BCH_COUNTER_move_extent_finish], new->k.size);
- trace_move_extent_finish(&new->k);
+ trace_move_extent_finish2(c, bkey_i_to_s_c(&new->k_i));
}
err:
if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
#include <linux/ioprio.h>
#include <linux/kthread.h>
+static void trace_move_extent2(struct bch_fs *c, struct bkey_s_c k)
+{
+ if (trace_move_extent_enabled()) {
+ struct printbuf buf = PRINTBUF;
+
+ bch2_bkey_val_to_text(&buf, c, k);
+ trace_move_extent(c, buf.buf);
+ printbuf_exit(&buf);
+ }
+}
+
+static void trace_move_extent_read2(struct bch_fs *c, struct bkey_s_c k)
+{
+ if (trace_move_extent_read_enabled()) {
+ struct printbuf buf = PRINTBUF;
+
+ bch2_bkey_val_to_text(&buf, c, k);
+ trace_move_extent_read(c, buf.buf);
+ printbuf_exit(&buf);
+ }
+}
+
+static void trace_move_extent_alloc_mem_fail2(struct bch_fs *c, struct bkey_s_c k)
+{
+ if (trace_move_extent_alloc_mem_fail_enabled()) {
+ struct printbuf buf = PRINTBUF;
+
+ bch2_bkey_val_to_text(&buf, c, k);
+ trace_move_extent_alloc_mem_fail(c, buf.buf);
+ printbuf_exit(&buf);
+ }
+}
+
static void progress_list_add(struct bch_fs *c, struct bch_move_stats *stats)
{
mutex_lock(&c->data_progress_lock);
unsigned sectors = k.k->size, pages;
int ret = -ENOMEM;
+ trace_move_extent2(c, k);
+
bch2_data_update_opts_normalize(k, &data_opts);
if (!data_opts.rewrite_ptrs &&
this_cpu_add(c->counters[BCH_COUNTER_io_move], k.k->size);
this_cpu_add(c->counters[BCH_COUNTER_move_extent_read], k.k->size);
- trace_move_extent_read(k.k);
-
+ trace_move_extent_read2(c, k);
mutex_lock(&ctxt->lock);
atomic_add(io->read_sectors, &ctxt->read_sectors);
err_free:
kfree(io);
err:
- trace_and_count(c, move_extent_alloc_mem_fail, k.k);
+ this_cpu_inc(c->counters[BCH_COUNTER_move_extent_alloc_mem_fail]);
+ trace_move_extent_alloc_mem_fail2(c, k);
return ret;
}
struct bpos bp_pos = POS_MIN;
int ret = 0;
+ trace_bucket_evacuate(c, &bucket);
+
bch2_bkey_buf_init(&sk);
/*
);
DECLARE_EVENT_CLASS(bkey,
- TP_PROTO(const struct bkey *k),
- TP_ARGS(k),
+ TP_PROTO(struct bch_fs *c, const char *k),
+ TP_ARGS(c, k),
TP_STRUCT__entry(
- __field(u64, inode )
- __field(u64, offset )
- __field(u32, size )
+ __string(k, k )
),
TP_fast_assign(
- __entry->inode = k->p.inode;
- __entry->offset = k->p.offset;
- __entry->size = k->size;
+ __assign_str(k, k);
),
- TP_printk("%llu:%llu len %u", __entry->inode,
- __entry->offset, __entry->size)
+ TP_printk("%s", __get_str(k))
);
DECLARE_EVENT_CLASS(btree_node,
/* Moving IO */
+TRACE_EVENT(bucket_evacuate,
+ TP_PROTO(struct bch_fs *c, struct bpos *bucket),
+ TP_ARGS(c, bucket),
+
+ TP_STRUCT__entry(
+ __field(dev_t, dev )
+ __field(u32, dev_idx )
+ __field(u64, bucket )
+ ),
+
+ TP_fast_assign(
+ __entry->dev = c->dev;
+ __entry->dev_idx = bucket->inode;
+ __entry->bucket = bucket->offset;
+ ),
+
+ TP_printk("%d:%d %u:%llu",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ __entry->dev_idx, __entry->bucket)
+);
+
+DEFINE_EVENT(bkey, move_extent,
+ TP_PROTO(struct bch_fs *c, const char *k),
+ TP_ARGS(c, k)
+);
+
DEFINE_EVENT(bkey, move_extent_read,
- TP_PROTO(const struct bkey *k),
- TP_ARGS(k)
+ TP_PROTO(struct bch_fs *c, const char *k),
+ TP_ARGS(c, k)
);
DEFINE_EVENT(bkey, move_extent_write,
- TP_PROTO(const struct bkey *k),
- TP_ARGS(k)
+ TP_PROTO(struct bch_fs *c, const char *k),
+ TP_ARGS(c, k)
);
DEFINE_EVENT(bkey, move_extent_finish,
- TP_PROTO(const struct bkey *k),
- TP_ARGS(k)
+ TP_PROTO(struct bch_fs *c, const char *k),
+ TP_ARGS(c, k)
);
TRACE_EVENT(move_extent_fail,
);
DEFINE_EVENT(bkey, move_extent_alloc_mem_fail,
- TP_PROTO(const struct bkey *k),
- TP_ARGS(k)
+ TP_PROTO(struct bch_fs *c, const char *k),
+ TP_ARGS(c, k)
);
TRACE_EVENT(move_data,