selftests/bpf: Fix some bugs in map_lookup_percpu_elem testcase
authorFeng Zhou <zhoufeng.zf@bytedance.com>
Wed, 18 May 2022 02:50:53 +0000 (10:50 +0800)
committerAndrii Nakryiko <andrii@kernel.org>
Fri, 20 May 2022 22:07:41 +0000 (15:07 -0700)
comments from Andrii Nakryiko, details in here:
https://lore.kernel.org/lkml/20220511093854.411-1-zhoufeng.zf@bytedance.com/T/

use /* */ instead of //
use libbpf_num_possible_cpus() instead of sysconf(_SC_NPROCESSORS_ONLN)
use 8 bytes for value size
fix memory leak
use ASSERT_EQ instead of ASSERT_OK
add bpf_loop to fetch values on each possible CPU

Fixes: ed7c13776e20c74486b0939a3c1de984c5efb6aa ("selftests/bpf: add test case for bpf_map_lookup_percpu_elem")
Signed-off-by: Feng Zhou <zhoufeng.zf@bytedance.com>
Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
Acked-by: Yonghong Song <yhs@fb.com>
Link: https://lore.kernel.org/bpf/20220518025053.20492-1-zhoufeng.zf@bytedance.com
tools/testing/selftests/bpf/prog_tests/map_lookup_percpu_elem.c
tools/testing/selftests/bpf/progs/test_map_lookup_percpu_elem.c

index 58b24c2112b0ad147a865af8e0b785ba8dc24dd3..bfb1bf3fd427b4e99bfcfcd55359b18bc0b9b24b 100644 (file)
@@ -1,30 +1,38 @@
 // SPDX-License-Identifier: GPL-2.0
-// Copyright (c) 2022 Bytedance
+/* Copyright (c) 2022 Bytedance */
 
 #include <test_progs.h>
-
 #include "test_map_lookup_percpu_elem.skel.h"
 
-#define TEST_VALUE  1
-
 void test_map_lookup_percpu_elem(void)
 {
        struct test_map_lookup_percpu_elem *skel;
-       int key = 0, ret;
-       int nr_cpus = sysconf(_SC_NPROCESSORS_ONLN);
-       int *buf;
+       __u64 key = 0, sum;
+       int ret, i, nr_cpus = libbpf_num_possible_cpus();
+       __u64 *buf;
 
-       buf = (int *)malloc(nr_cpus*sizeof(int));
+       buf = malloc(nr_cpus*sizeof(__u64));
        if (!ASSERT_OK_PTR(buf, "malloc"))
                return;
-       memset(buf, 0, nr_cpus*sizeof(int));
-       buf[0] = TEST_VALUE;
 
-       skel = test_map_lookup_percpu_elem__open_and_load();
-       if (!ASSERT_OK_PTR(skel, "test_map_lookup_percpu_elem__open_and_load"))
-               return;
+       for (i = 0; i < nr_cpus; i++)
+               buf[i] = i;
+       sum = (nr_cpus - 1) * nr_cpus / 2;
+
+       skel = test_map_lookup_percpu_elem__open();
+       if (!ASSERT_OK_PTR(skel, "test_map_lookup_percpu_elem__open"))
+               goto exit;
+
+       skel->rodata->my_pid = getpid();
+       skel->rodata->nr_cpus = nr_cpus;
+
+       ret = test_map_lookup_percpu_elem__load(skel);
+       if (!ASSERT_OK(ret, "test_map_lookup_percpu_elem__load"))
+               goto cleanup;
+
        ret = test_map_lookup_percpu_elem__attach(skel);
-       ASSERT_OK(ret, "test_map_lookup_percpu_elem__attach");
+       if (!ASSERT_OK(ret, "test_map_lookup_percpu_elem__attach"))
+               goto cleanup;
 
        ret = bpf_map_update_elem(bpf_map__fd(skel->maps.percpu_array_map), &key, buf, 0);
        ASSERT_OK(ret, "percpu_array_map update");
@@ -37,10 +45,14 @@ void test_map_lookup_percpu_elem(void)
 
        syscall(__NR_getuid);
 
-       ret = skel->bss->percpu_array_elem_val == TEST_VALUE &&
-             skel->bss->percpu_hash_elem_val == TEST_VALUE &&
-             skel->bss->percpu_lru_hash_elem_val == TEST_VALUE;
-       ASSERT_OK(!ret, "bpf_map_lookup_percpu_elem success");
+       test_map_lookup_percpu_elem__detach(skel);
+
+       ASSERT_EQ(skel->bss->percpu_array_elem_sum, sum, "percpu_array lookup percpu elem");
+       ASSERT_EQ(skel->bss->percpu_hash_elem_sum, sum, "percpu_hash lookup percpu elem");
+       ASSERT_EQ(skel->bss->percpu_lru_hash_elem_sum, sum, "percpu_lru_hash lookup percpu elem");
 
+cleanup:
        test_map_lookup_percpu_elem__destroy(skel);
+exit:
+       free(buf);
 }
index 5d4ef86cbf48d225313800df3f2b209d5683b7be..ca827b1092daa3b5dfd664a383eaa5a2a9194d28 100644 (file)
@@ -1,52 +1,74 @@
 // SPDX-License-Identifier: GPL-2.0
-// Copyright (c) 2022 Bytedance
+/* Copyright (c) 2022 Bytedance */
 
 #include "vmlinux.h"
 #include <bpf/bpf_helpers.h>
 
-int percpu_array_elem_val = 0;
-int percpu_hash_elem_val = 0;
-int percpu_lru_hash_elem_val = 0;
+__u64 percpu_array_elem_sum = 0;
+__u64 percpu_hash_elem_sum = 0;
+__u64 percpu_lru_hash_elem_sum = 0;
+const volatile int nr_cpus;
+const volatile int my_pid;
 
 struct {
        __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
        __uint(max_entries, 1);
        __type(key, __u32);
-       __type(value, __u32);
+       __type(value, __u64);
 } percpu_array_map SEC(".maps");
 
 struct {
        __uint(type, BPF_MAP_TYPE_PERCPU_HASH);
        __uint(max_entries, 1);
-       __type(key, __u32);
-       __type(value, __u32);
+       __type(key, __u64);
+       __type(value, __u64);
 } percpu_hash_map SEC(".maps");
 
 struct {
        __uint(type, BPF_MAP_TYPE_LRU_PERCPU_HASH);
        __uint(max_entries, 1);
-       __type(key, __u32);
-       __type(value, __u32);
+       __type(key, __u64);
+       __type(value, __u64);
 } percpu_lru_hash_map SEC(".maps");
 
+struct read_percpu_elem_ctx {
+       void *map;
+       __u64 sum;
+};
+
+static int read_percpu_elem_callback(__u32 index, struct read_percpu_elem_ctx *ctx)
+{
+       __u64 key = 0;
+       __u64 *value;
+
+       value = bpf_map_lookup_percpu_elem(ctx->map, &key, index);
+       if (value)
+               ctx->sum += *value;
+       return 0;
+}
+
 SEC("tp/syscalls/sys_enter_getuid")
 int sysenter_getuid(const void *ctx)
 {
-       __u32 key = 0;
-       __u32 cpu = 0;
-       __u32 *value;
+       struct read_percpu_elem_ctx map_ctx;
 
-       value = bpf_map_lookup_percpu_elem(&percpu_array_map, &key, cpu);
-       if (value)
-               percpu_array_elem_val = *value;
+       if (my_pid != (bpf_get_current_pid_tgid() >> 32))
+               return 0;
 
-       value = bpf_map_lookup_percpu_elem(&percpu_hash_map, &key, cpu);
-       if (value)
-               percpu_hash_elem_val = *value;
+       map_ctx.map = &percpu_array_map;
+       map_ctx.sum = 0;
+       bpf_loop(nr_cpus, read_percpu_elem_callback, &map_ctx, 0);
+       percpu_array_elem_sum = map_ctx.sum;
 
-       value = bpf_map_lookup_percpu_elem(&percpu_lru_hash_map, &key, cpu);
-       if (value)
-               percpu_lru_hash_elem_val = *value;
+       map_ctx.map = &percpu_hash_map;
+       map_ctx.sum = 0;
+       bpf_loop(nr_cpus, read_percpu_elem_callback, &map_ctx, 0);
+       percpu_hash_elem_sum = map_ctx.sum;
+
+       map_ctx.map = &percpu_lru_hash_map;
+       map_ctx.sum = 0;
+       bpf_loop(nr_cpus, read_percpu_elem_callback, &map_ctx, 0);
+       percpu_lru_hash_elem_sum = map_ctx.sum;
 
        return 0;
 }