selftests/bpf: Add some negative tests
authorYonghong Song <yonghong.song@linux.dev>
Sun, 27 Aug 2023 15:28:32 +0000 (08:28 -0700)
committerAlexei Starovoitov <ast@kernel.org>
Fri, 8 Sep 2023 15:42:18 +0000 (08:42 -0700)
Add a few negative tests for common mistakes with using percpu kptr
including:
  - store to percpu kptr.
  - type mistach in bpf_kptr_xchg arguments.
  - sleepable prog with untrusted arg for bpf_this_cpu_ptr().
  - bpf_percpu_obj_new && bpf_obj_drop, and bpf_obj_new && bpf_percpu_obj_drop
  - struct with ptr for bpf_percpu_obj_new
  - struct with special field (e.g., bpf_spin_lock) for bpf_percpu_obj_new

Signed-off-by: Yonghong Song <yonghong.song@linux.dev>
Link: https://lore.kernel.org/r/20230827152832.2002421-1-yonghong.song@linux.dev
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
tools/testing/selftests/bpf/prog_tests/percpu_alloc.c
tools/testing/selftests/bpf/progs/percpu_alloc_fail.c [new file with mode: 0644]

index 41bf784a4bb3caf1cd39d41485e9c314bf6a8edb..9541e9b3a0346e0a5fae29b15eebc8c6764bd471 100644 (file)
@@ -2,6 +2,7 @@
 #include <test_progs.h>
 #include "percpu_alloc_array.skel.h"
 #include "percpu_alloc_cgrp_local_storage.skel.h"
+#include "percpu_alloc_fail.skel.h"
 
 static void test_array(void)
 {
@@ -107,6 +108,10 @@ close_fd:
        close(cgroup_fd);
 }
 
+static void test_failure(void) {
+       RUN_TESTS(percpu_alloc_fail);
+}
+
 void test_percpu_alloc(void)
 {
        if (test__start_subtest("array"))
@@ -115,4 +120,6 @@ void test_percpu_alloc(void)
                test_array_sleepable();
        if (test__start_subtest("cgrp_local_storage"))
                test_cgrp_local_storage();
+       if (test__start_subtest("failure_tests"))
+               test_failure();
 }
diff --git a/tools/testing/selftests/bpf/progs/percpu_alloc_fail.c b/tools/testing/selftests/bpf/progs/percpu_alloc_fail.c
new file mode 100644 (file)
index 0000000..1a891d3
--- /dev/null
@@ -0,0 +1,164 @@
+#include "bpf_experimental.h"
+#include "bpf_misc.h"
+
+struct val_t {
+       long b, c, d;
+};
+
+struct val2_t {
+       long b;
+};
+
+struct val_with_ptr_t {
+       char *p;
+};
+
+struct val_with_rb_root_t {
+       struct bpf_spin_lock lock;
+};
+
+struct elem {
+       long sum;
+       struct val_t __percpu_kptr *pc;
+};
+
+struct {
+       __uint(type, BPF_MAP_TYPE_ARRAY);
+       __uint(max_entries, 1);
+       __type(key, int);
+       __type(value, struct elem);
+} array SEC(".maps");
+
+long ret;
+
+SEC("?fentry/bpf_fentry_test1")
+__failure __msg("store to referenced kptr disallowed")
+int BPF_PROG(test_array_map_1)
+{
+       struct val_t __percpu_kptr *p;
+       struct elem *e;
+       int index = 0;
+
+       e = bpf_map_lookup_elem(&array, &index);
+       if (!e)
+               return 0;
+
+       p = bpf_percpu_obj_new(struct val_t);
+       if (!p)
+               return 0;
+
+       p = bpf_kptr_xchg(&e->pc, p);
+       if (p)
+               bpf_percpu_obj_drop(p);
+
+       e->pc = (struct val_t __percpu_kptr *)ret;
+       return 0;
+}
+
+SEC("?fentry/bpf_fentry_test1")
+__failure __msg("invalid kptr access, R2 type=percpu_ptr_val2_t expected=ptr_val_t")
+int BPF_PROG(test_array_map_2)
+{
+       struct val2_t __percpu_kptr *p2;
+       struct val_t __percpu_kptr *p;
+       struct elem *e;
+       int index = 0;
+
+       e = bpf_map_lookup_elem(&array, &index);
+       if (!e)
+               return 0;
+
+       p2 = bpf_percpu_obj_new(struct val2_t);
+       if (!p2)
+               return 0;
+
+       p = bpf_kptr_xchg(&e->pc, p2);
+       if (p)
+               bpf_percpu_obj_drop(p);
+
+       return 0;
+}
+
+SEC("?fentry.s/bpf_fentry_test1")
+__failure __msg("R1 type=scalar expected=percpu_ptr_, percpu_rcu_ptr_, percpu_trusted_ptr_")
+int BPF_PROG(test_array_map_3)
+{
+       struct val_t __percpu_kptr *p, *p1;
+       struct val_t *v;
+       struct elem *e;
+       int index = 0;
+
+       e = bpf_map_lookup_elem(&array, &index);
+       if (!e)
+               return 0;
+
+       p = bpf_percpu_obj_new(struct val_t);
+       if (!p)
+               return 0;
+
+       p1 = bpf_kptr_xchg(&e->pc, p);
+       if (p1)
+               bpf_percpu_obj_drop(p1);
+
+       v = bpf_this_cpu_ptr(p);
+       ret = v->b;
+       return 0;
+}
+
+SEC("?fentry.s/bpf_fentry_test1")
+__failure __msg("arg#0 expected for bpf_percpu_obj_drop_impl()")
+int BPF_PROG(test_array_map_4)
+{
+       struct val_t __percpu_kptr *p;
+
+       p = bpf_percpu_obj_new(struct val_t);
+       if (!p)
+               return 0;
+
+       bpf_obj_drop(p);
+       return 0;
+}
+
+SEC("?fentry.s/bpf_fentry_test1")
+__failure __msg("arg#0 expected for bpf_obj_drop_impl()")
+int BPF_PROG(test_array_map_5)
+{
+       struct val_t *p;
+
+       p = bpf_obj_new(struct val_t);
+       if (!p)
+               return 0;
+
+       bpf_percpu_obj_drop(p);
+       return 0;
+}
+
+SEC("?fentry.s/bpf_fentry_test1")
+__failure __msg("bpf_percpu_obj_new type ID argument must be of a struct of scalars")
+int BPF_PROG(test_array_map_6)
+{
+       struct val_with_ptr_t __percpu_kptr *p;
+
+       p = bpf_percpu_obj_new(struct val_with_ptr_t);
+       if (!p)
+               return 0;
+
+       bpf_percpu_obj_drop(p);
+       return 0;
+}
+
+SEC("?fentry.s/bpf_fentry_test1")
+__failure __msg("bpf_percpu_obj_new type ID argument must not contain special fields")
+int BPF_PROG(test_array_map_7)
+{
+       struct val_with_rb_root_t __percpu_kptr *p;
+
+       p = bpf_percpu_obj_new(struct val_with_rb_root_t);
+       if (!p)
+               return 0;
+
+       bpf_percpu_obj_drop(p);
+       return 0;
+}
+
+char _license[] SEC("license") = "GPL";