selftests/bpf: add BPF object loading tests with explicit token passing
authorAndrii Nakryiko <andrii@kernel.org>
Wed, 13 Dec 2023 19:08:39 +0000 (11:08 -0800)
committerAlexei Starovoitov <ast@kernel.org>
Wed, 13 Dec 2023 23:47:05 +0000 (15:47 -0800)
Add a few tests that attempt to load BPF object containing privileged
map, program, and the one requiring mandatory BTF uploading into the
kernel (to validate token FD propagation to BPF_BTF_LOAD command).

Acked-by: John Fastabend <john.fastabend@gmail.com>
Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
Link: https://lore.kernel.org/r/20231213190842.3844987-8-andrii@kernel.org
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
tools/testing/selftests/bpf/prog_tests/token.c
tools/testing/selftests/bpf/progs/priv_map.c [new file with mode: 0644]
tools/testing/selftests/bpf/progs/priv_prog.c [new file with mode: 0644]

index dc03790c6272be16110549318539e676f2615abf..9812292336c958ce41ad179ad7bac325799c1ca5 100644 (file)
@@ -14,6 +14,9 @@
 #include <sys/socket.h>
 #include <sys/syscall.h>
 #include <sys/un.h>
+#include "priv_map.skel.h"
+#include "priv_prog.skel.h"
+#include "dummy_st_ops_success.skel.h"
 
 static inline int sys_mount(const char *dev_name, const char *dir_name,
                            const char *type, unsigned long flags,
@@ -643,6 +646,123 @@ cleanup:
        return err;
 }
 
+static int userns_obj_priv_map(int mnt_fd)
+{
+       LIBBPF_OPTS(bpf_object_open_opts, opts);
+       char buf[256];
+       struct priv_map *skel;
+       int err, token_fd;
+
+       skel = priv_map__open_and_load();
+       if (!ASSERT_ERR_PTR(skel, "obj_tokenless_load")) {
+               priv_map__destroy(skel);
+               return -EINVAL;
+       }
+
+       /* use bpf_token_path to provide BPF FS path */
+       snprintf(buf, sizeof(buf), "/proc/self/fd/%d", mnt_fd);
+       opts.bpf_token_path = buf;
+       skel = priv_map__open_opts(&opts);
+       if (!ASSERT_OK_PTR(skel, "obj_token_path_open"))
+               return -EINVAL;
+
+       err = priv_map__load(skel);
+       priv_map__destroy(skel);
+       if (!ASSERT_OK(err, "obj_token_path_load"))
+               return -EINVAL;
+
+       /* create token and pass it through bpf_token_fd */
+       token_fd = bpf_token_create(mnt_fd, NULL);
+       if (!ASSERT_GT(token_fd, 0, "create_token"))
+               return -EINVAL;
+
+       opts.bpf_token_path = NULL;
+       opts.bpf_token_fd = token_fd;
+       skel = priv_map__open_opts(&opts);
+       if (!ASSERT_OK_PTR(skel, "obj_token_fd_open"))
+               return -EINVAL;
+
+       /* we can close our token FD, bpf_object owns dup()'ed FD now */
+       close(token_fd);
+
+       err = priv_map__load(skel);
+       priv_map__destroy(skel);
+       if (!ASSERT_OK(err, "obj_token_fd_load"))
+               return -EINVAL;
+
+       return 0;
+}
+
+static int userns_obj_priv_prog(int mnt_fd)
+{
+       LIBBPF_OPTS(bpf_object_open_opts, opts);
+       char buf[256];
+       struct priv_prog *skel;
+       int err;
+
+       skel = priv_prog__open_and_load();
+       if (!ASSERT_ERR_PTR(skel, "obj_tokenless_load")) {
+               priv_prog__destroy(skel);
+               return -EINVAL;
+       }
+
+       /* use bpf_token_path to provide BPF FS path */
+       snprintf(buf, sizeof(buf), "/proc/self/fd/%d", mnt_fd);
+       opts.bpf_token_path = buf;
+       skel = priv_prog__open_opts(&opts);
+       if (!ASSERT_OK_PTR(skel, "obj_token_path_open"))
+               return -EINVAL;
+
+       err = priv_prog__load(skel);
+       priv_prog__destroy(skel);
+       if (!ASSERT_OK(err, "obj_token_path_load"))
+               return -EINVAL;
+
+       return 0;
+}
+
+/* this test is called with BPF FS that doesn't delegate BPF_BTF_LOAD command,
+ * which should cause struct_ops application to fail, as BTF won't be uploaded
+ * into the kernel, even if STRUCT_OPS programs themselves are allowed
+ */
+static int validate_struct_ops_load(int mnt_fd, bool expect_success)
+{
+       LIBBPF_OPTS(bpf_object_open_opts, opts);
+       char buf[256];
+       struct dummy_st_ops_success *skel;
+       int err;
+
+       snprintf(buf, sizeof(buf), "/proc/self/fd/%d", mnt_fd);
+       opts.bpf_token_path = buf;
+       skel = dummy_st_ops_success__open_opts(&opts);
+       if (!ASSERT_OK_PTR(skel, "obj_token_path_open"))
+               return -EINVAL;
+
+       err = dummy_st_ops_success__load(skel);
+       dummy_st_ops_success__destroy(skel);
+       if (expect_success) {
+               if (!ASSERT_OK(err, "obj_token_path_load"))
+                       return -EINVAL;
+       } else /* expect failure */ {
+               if (!ASSERT_ERR(err, "obj_token_path_load"))
+                       return -EINVAL;
+       }
+
+       return 0;
+}
+
+static int userns_obj_priv_btf_fail(int mnt_fd)
+{
+       return validate_struct_ops_load(mnt_fd, false /* should fail */);
+}
+
+static int userns_obj_priv_btf_success(int mnt_fd)
+{
+       return validate_struct_ops_load(mnt_fd, true /* should succeed */);
+}
+
+#define bit(n) (1ULL << (n))
+
 void test_token(void)
 {
        if (test__start_subtest("map_token")) {
@@ -669,4 +789,43 @@ void test_token(void)
 
                subtest_userns(&opts, userns_prog_load);
        }
+       if (test__start_subtest("obj_priv_map")) {
+               struct bpffs_opts opts = {
+                       .cmds = bit(BPF_MAP_CREATE),
+                       .maps = bit(BPF_MAP_TYPE_QUEUE),
+               };
+
+               subtest_userns(&opts, userns_obj_priv_map);
+       }
+       if (test__start_subtest("obj_priv_prog")) {
+               struct bpffs_opts opts = {
+                       .cmds = bit(BPF_PROG_LOAD),
+                       .progs = bit(BPF_PROG_TYPE_KPROBE),
+                       .attachs = ~0ULL,
+               };
+
+               subtest_userns(&opts, userns_obj_priv_prog);
+       }
+       if (test__start_subtest("obj_priv_btf_fail")) {
+               struct bpffs_opts opts = {
+                       /* disallow BTF loading */
+                       .cmds = bit(BPF_MAP_CREATE) | bit(BPF_PROG_LOAD),
+                       .maps = bit(BPF_MAP_TYPE_STRUCT_OPS),
+                       .progs = bit(BPF_PROG_TYPE_STRUCT_OPS),
+                       .attachs = ~0ULL,
+               };
+
+               subtest_userns(&opts, userns_obj_priv_btf_fail);
+       }
+       if (test__start_subtest("obj_priv_btf_success")) {
+               struct bpffs_opts opts = {
+                       /* allow BTF loading */
+                       .cmds = bit(BPF_BTF_LOAD) | bit(BPF_MAP_CREATE) | bit(BPF_PROG_LOAD),
+                       .maps = bit(BPF_MAP_TYPE_STRUCT_OPS),
+                       .progs = bit(BPF_PROG_TYPE_STRUCT_OPS),
+                       .attachs = ~0ULL,
+               };
+
+               subtest_userns(&opts, userns_obj_priv_btf_success);
+       }
 }
diff --git a/tools/testing/selftests/bpf/progs/priv_map.c b/tools/testing/selftests/bpf/progs/priv_map.c
new file mode 100644 (file)
index 0000000..9085be5
--- /dev/null
@@ -0,0 +1,13 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+
+char _license[] SEC("license") = "GPL";
+
+struct {
+       __uint(type, BPF_MAP_TYPE_QUEUE);
+       __uint(max_entries, 1);
+       __type(value, __u32);
+} priv_map SEC(".maps");
diff --git a/tools/testing/selftests/bpf/progs/priv_prog.c b/tools/testing/selftests/bpf/progs/priv_prog.c
new file mode 100644 (file)
index 0000000..3c7b2b6
--- /dev/null
@@ -0,0 +1,13 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+
+char _license[] SEC("license") = "GPL";
+
+SEC("kprobe")
+int kprobe_prog(void *ctx)
+{
+       return 1;
+}