bpf: add a test case for helper bpf_perf_prog_read_value
authorYonghong Song <yhs@fb.com>
Thu, 5 Oct 2017 16:19:23 +0000 (09:19 -0700)
committerDavid S. Miller <davem@davemloft.net>
Sat, 7 Oct 2017 22:05:57 +0000 (23:05 +0100)
The bpf sample program trace_event is enhanced to use the new
helper to print out enabled/running time.

Signed-off-by: Yonghong Song <yhs@fb.com>
Acked-by: Alexei Starovoitov <ast@fb.com>
Acked-by: Daniel Borkmann <daniel@iogearbox.net>
Signed-off-by: David S. Miller <davem@davemloft.net>
samples/bpf/trace_event_kern.c
samples/bpf/trace_event_user.c
tools/include/uapi/linux/bpf.h
tools/testing/selftests/bpf/bpf_helpers.h

index 41b6115a32eb1b0c06f13a0736690903e352839e..a77a583d94d42ca375a7519a066ff4f648e05226 100644 (file)
@@ -37,10 +37,14 @@ struct bpf_map_def SEC("maps") stackmap = {
 SEC("perf_event")
 int bpf_prog1(struct bpf_perf_event_data *ctx)
 {
+       char time_fmt1[] = "Time Enabled: %llu, Time Running: %llu";
+       char time_fmt2[] = "Get Time Failed, ErrCode: %d";
        char fmt[] = "CPU-%d period %lld ip %llx";
        u32 cpu = bpf_get_smp_processor_id();
+       struct bpf_perf_event_value value_buf;
        struct key_t key;
        u64 *val, one = 1;
+       int ret;
 
        if (ctx->sample_period < 10000)
                /* ignore warmup */
@@ -54,6 +58,12 @@ int bpf_prog1(struct bpf_perf_event_data *ctx)
                return 0;
        }
 
+       ret = bpf_perf_prog_read_value(ctx, (void *)&value_buf, sizeof(struct bpf_perf_event_value));
+       if (!ret)
+         bpf_trace_printk(time_fmt1, sizeof(time_fmt1), value_buf.enabled, value_buf.running);
+       else
+         bpf_trace_printk(time_fmt2, sizeof(time_fmt2), ret);
+
        val = bpf_map_lookup_elem(&counts, &key);
        if (val)
                (*val)++;
index 7bd827b84a67988fd6fc8a36eec43fc22e623032..bf4f1b6d9a52e0d33e17189be30900f90defd266 100644 (file)
@@ -127,6 +127,9 @@ static void test_perf_event_all_cpu(struct perf_event_attr *attr)
        int *pmu_fd = malloc(nr_cpus * sizeof(int));
        int i, error = 0;
 
+       /* system wide perf event, no need to inherit */
+       attr->inherit = 0;
+
        /* open perf_event on all cpus */
        for (i = 0; i < nr_cpus; i++) {
                pmu_fd[i] = sys_perf_event_open(attr, -1, i, -1, 0);
@@ -154,6 +157,11 @@ static void test_perf_event_task(struct perf_event_attr *attr)
 {
        int pmu_fd;
 
+       /* per task perf event, enable inherit so the "dd ..." command can be traced properly.
+        * Enabling inherit will cause bpf_perf_prog_read_time helper failure.
+        */
+       attr->inherit = 1;
+
        /* open task bound event */
        pmu_fd = sys_perf_event_open(attr, 0, -1, -1, 0);
        if (pmu_fd < 0) {
@@ -175,14 +183,12 @@ static void test_bpf_perf_event(void)
                .freq = 1,
                .type = PERF_TYPE_HARDWARE,
                .config = PERF_COUNT_HW_CPU_CYCLES,
-               .inherit = 1,
        };
        struct perf_event_attr attr_type_sw = {
                .sample_freq = SAMPLE_FREQ,
                .freq = 1,
                .type = PERF_TYPE_SOFTWARE,
                .config = PERF_COUNT_SW_CPU_CLOCK,
-               .inherit = 1,
        };
        struct perf_event_attr attr_hw_cache_l1d = {
                .sample_freq = SAMPLE_FREQ,
@@ -192,7 +198,6 @@ static void test_bpf_perf_event(void)
                        PERF_COUNT_HW_CACHE_L1D |
                        (PERF_COUNT_HW_CACHE_OP_READ << 8) |
                        (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16),
-               .inherit = 1,
        };
        struct perf_event_attr attr_hw_cache_branch_miss = {
                .sample_freq = SAMPLE_FREQ,
@@ -202,7 +207,6 @@ static void test_bpf_perf_event(void)
                        PERF_COUNT_HW_CACHE_BPU |
                        (PERF_COUNT_HW_CACHE_OP_READ << 8) |
                        (PERF_COUNT_HW_CACHE_RESULT_MISS << 16),
-               .inherit = 1,
        };
        struct perf_event_attr attr_type_raw = {
                .sample_freq = SAMPLE_FREQ,
@@ -210,7 +214,6 @@ static void test_bpf_perf_event(void)
                .type = PERF_TYPE_RAW,
                /* Intel Instruction Retired */
                .config = 0xc0,
-               .inherit = 1,
        };
 
        printf("Test HW_CPU_CYCLES\n");
index cdf6c4f50b0f1acb91e2b913e3b596078ba6a4d0..0894fd20b12bd4d4f51c86dcc0af207d6e0b0fb3 100644 (file)
@@ -698,7 +698,8 @@ union bpf_attr {
        FN(sk_redirect_map),            \
        FN(sock_map_update),            \
        FN(xdp_adjust_meta),            \
-       FN(perf_event_read_value),
+       FN(perf_event_read_value),      \
+       FN(perf_prog_read_value),
 
 /* integer value in 'imm' field of BPF_CALL instruction selects which helper
  * function eBPF program intends to call
index c15ca83dbbd922e71d8ed4231e0d7a6b8dc6ed3a..e25dbf6038cf6d9ea8afb0d2db2fd7acd1b46d0f 100644 (file)
@@ -75,6 +75,9 @@ static int (*bpf_sock_map_update)(void *map, void *key, void *value,
 static int (*bpf_perf_event_read_value)(void *map, unsigned long long flags,
                                        void *buf, unsigned int buf_size) =
        (void *) BPF_FUNC_perf_event_read_value;
+static int (*bpf_perf_prog_read_value)(void *ctx, void *buf,
+                                      unsigned int buf_size) =
+       (void *) BPF_FUNC_perf_prog_read_value;
 
 
 /* llvm builtin functions that eBPF C program may use to