Like the normal 'perf lock contention' output, it'd print the number of
lost entries for BPF if exists or -v option is passed.
Currently it uses BROKEN_CONTENDED stat for the lost count (due to full
stack maps).
$ sudo perf lock con -a -b --map-nr-entries 128 sleep 5
...
=== output for debug===
bad: 43, total: 14903
bad rate: 0.29 %
histogram of events caused bad sequence
acquire: 0
acquired: 0
contended: 43
release: 0
Signed-off-by: Namhyung Kim <namhyung@kernel.org>
Cc: Blake Jones <blakejones@google.com>
Cc: Boqun Feng <boqun.feng@gmail.com>
Cc: Davidlohr Bueso <dave@stgolabs.net>
Cc: Ian Rogers <irogers@google.com>
Cc: Ingo Molnar <mingo@kernel.org>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Song Liu <songliubraving@fb.com>
Cc: Waiman Long <longman@redhat.com>
Cc: Will Deacon <will@kernel.org>
Link: https://lore.kernel.org/r/20220802191004.347740-3-namhyung@kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
pr_info(" %10s %s\n\n", "type", "caller");
bad = total = 0;
+ if (use_bpf)
+ bad = bad_hist[BROKEN_CONTENDED];
+
while ((st = pop_from_result())) {
- total++;
+ total += use_bpf ? st->nr_contended : 1;
if (st->broken)
bad++;
lock_contention_stop();
lock_contention_read(&con);
+
+ /* abuse bad hist stats for lost entries */
+ bad_hist[BROKEN_CONTENDED] = con.lost;
} else {
err = perf_session__process_events(session);
if (err)
/* should be same as bpf_skel/lock_contention.bpf.c */
struct lock_contention_key {
- u32 stack_id;
+ s32 stack_id;
};
struct lock_contention_data {
int lock_contention_read(struct lock_contention *con)
{
int fd, stack;
- u32 prev_key, key;
+ s32 prev_key, key;
struct lock_contention_data data;
struct lock_stat *st;
struct machine *machine = con->machine;
fd = bpf_map__fd(skel->maps.lock_stat);
stack = bpf_map__fd(skel->maps.stacks);
+ con->lost = skel->bss->lost;
+
prev_key = 0;
while (!bpf_map_get_next_key(fd, &prev_key, &key)) {
struct map *kmap;
#define MAX_ENTRIES 10240
struct contention_key {
- __u32 stack_id;
+ __s32 stack_id;
};
struct contention_data {
__u64 timestamp;
__u64 lock;
__u32 flags;
- __u32 stack_id;
+ __s32 stack_id;
};
/* callstack storage */
int has_cpu;
int has_task;
+/* error stat */
+unsigned long lost;
+
static inline int can_record(void)
{
if (has_cpu) {
pelem->flags = (__u32)ctx[1];
pelem->stack_id = bpf_get_stackid(ctx, &stacks, BPF_F_FAST_STACK_CMP);
+ if (pelem->stack_id < 0)
+ lost++;
return 0;
}
struct machine *machine;
struct hlist_head *result;
unsigned long map_nr_entries;
+ unsigned long lost;
};
#ifdef HAVE_BPF_SKEL