selftests/bpf: Test bpf_refcount_acquire of node obtained via direct ld
authorDave Marchevsky <davemarchevsky@fb.com>
Tue, 7 Nov 2023 08:56:39 +0000 (00:56 -0800)
committerAlexei Starovoitov <ast@kernel.org>
Fri, 10 Nov 2023 03:07:51 +0000 (19:07 -0800)
This patch demonstrates that verifier changes earlier in this series
result in bpf_refcount_acquire(mapval->stashed_kptr) passing
verification. The added test additionally validates that stashing a kptr
in mapval and - in a separate BPF program - refcount_acquiring the kptr
without unstashing works as expected at runtime.

Signed-off-by: Dave Marchevsky <davemarchevsky@fb.com>
Link: https://lore.kernel.org/r/20231107085639.3016113-7-davemarchevsky@fb.com
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
tools/testing/selftests/bpf/prog_tests/local_kptr_stash.c
tools/testing/selftests/bpf/progs/local_kptr_stash.c

index b25b870f87ba9b067fa4dcc35ab817ecbc96a384..e6e50a394472c217bffc24c4b137101ff533297e 100644 (file)
@@ -73,6 +73,37 @@ static void test_local_kptr_stash_unstash(void)
        local_kptr_stash__destroy(skel);
 }
 
+static void test_refcount_acquire_without_unstash(void)
+{
+       LIBBPF_OPTS(bpf_test_run_opts, opts,
+                   .data_in = &pkt_v4,
+                   .data_size_in = sizeof(pkt_v4),
+                   .repeat = 1,
+       );
+       struct local_kptr_stash *skel;
+       int ret;
+
+       skel = local_kptr_stash__open_and_load();
+       if (!ASSERT_OK_PTR(skel, "local_kptr_stash__open_and_load"))
+               return;
+
+       ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.refcount_acquire_without_unstash),
+                                    &opts);
+       ASSERT_OK(ret, "refcount_acquire_without_unstash run");
+       ASSERT_EQ(opts.retval, 2, "refcount_acquire_without_unstash retval");
+
+       ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.stash_refcounted_node), &opts);
+       ASSERT_OK(ret, "stash_refcounted_node run");
+       ASSERT_OK(opts.retval, "stash_refcounted_node retval");
+
+       ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.refcount_acquire_without_unstash),
+                                    &opts);
+       ASSERT_OK(ret, "refcount_acquire_without_unstash (2) run");
+       ASSERT_EQ(opts.retval, 42, "refcount_acquire_without_unstash (2) retval");
+
+       local_kptr_stash__destroy(skel);
+}
+
 static void test_local_kptr_stash_fail(void)
 {
        RUN_TESTS(local_kptr_stash_fail);
@@ -86,6 +117,8 @@ void test_local_kptr_stash(void)
                test_local_kptr_stash_plain();
        if (test__start_subtest("local_kptr_stash_unstash"))
                test_local_kptr_stash_unstash();
+       if (test__start_subtest("refcount_acquire_without_unstash"))
+               test_refcount_acquire_without_unstash();
        if (test__start_subtest("local_kptr_stash_fail"))
                test_local_kptr_stash_fail();
 }
index b567a666d2b87fd70ca8f15eb47fc48cba0d6500..1769fdff6aeae6a849daa6724bda50e707bd5180 100644 (file)
@@ -14,6 +14,24 @@ struct node_data {
        struct bpf_rb_node node;
 };
 
+struct refcounted_node {
+       long data;
+       struct bpf_rb_node rb_node;
+       struct bpf_refcount refcount;
+};
+
+struct stash {
+       struct bpf_spin_lock l;
+       struct refcounted_node __kptr *stashed;
+};
+
+struct {
+       __uint(type, BPF_MAP_TYPE_ARRAY);
+       __type(key, int);
+       __type(value, struct stash);
+       __uint(max_entries, 10);
+} refcounted_node_stash SEC(".maps");
+
 struct plain_local {
        long key;
        long data;
@@ -38,6 +56,7 @@ struct map_value {
  * Had to do the same w/ bpf_kfunc_call_test_release below
  */
 struct node_data *just_here_because_btf_bug;
+struct refcounted_node *just_here_because_btf_bug2;
 
 struct {
        __uint(type, BPF_MAP_TYPE_ARRAY);
@@ -132,4 +151,56 @@ long stash_test_ref_kfunc(void *ctx)
        return 0;
 }
 
+SEC("tc")
+long refcount_acquire_without_unstash(void *ctx)
+{
+       struct refcounted_node *p;
+       struct stash *s;
+       int ret = 0;
+
+       s = bpf_map_lookup_elem(&refcounted_node_stash, &ret);
+       if (!s)
+               return 1;
+
+       if (!s->stashed)
+               /* refcount_acquire failure is expected when no refcounted_node
+                * has been stashed before this program executes
+                */
+               return 2;
+
+       p = bpf_refcount_acquire(s->stashed);
+       if (!p)
+               return 3;
+
+       ret = s->stashed ? s->stashed->data : -1;
+       bpf_obj_drop(p);
+       return ret;
+}
+
+/* Helper for refcount_acquire_without_unstash test */
+SEC("tc")
+long stash_refcounted_node(void *ctx)
+{
+       struct refcounted_node *p;
+       struct stash *s;
+       int key = 0;
+
+       s = bpf_map_lookup_elem(&refcounted_node_stash, &key);
+       if (!s)
+               return 1;
+
+       p = bpf_obj_new(typeof(*p));
+       if (!p)
+               return 2;
+       p->data = 42;
+
+       p = bpf_kptr_xchg(&s->stashed, p);
+       if (p) {
+               bpf_obj_drop(p);
+               return 3;
+       }
+
+       return 0;
+}
+
 char _license[] SEC("license") = "GPL";