bpf: Consistently use BPF token throughout BPF verifier logic
authorAndrii Nakryiko <andrii@kernel.org>
Wed, 24 Jan 2024 02:21:05 +0000 (18:21 -0800)
committerAlexei Starovoitov <ast@kernel.org>
Thu, 25 Jan 2024 00:21:01 +0000 (16:21 -0800)
Remove remaining direct queries to perfmon_capable() and bpf_capable()
in BPF verifier logic and instead use BPF token (if available) to make
decisions about privileges.

Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Link: https://lore.kernel.org/bpf/20240124022127.2379740-9-andrii@kernel.org
include/linux/bpf.h
include/linux/filter.h
kernel/bpf/arraymap.c
kernel/bpf/core.c
kernel/bpf/verifier.c
net/core/filter.c

index 1325225bf6022b30b3bfc2dffb41cf58c0b8cc27..4e146e9708be3ed7a4502aefd0bb7daef7ebb189 100644 (file)
@@ -2261,24 +2261,24 @@ extern int sysctl_unprivileged_bpf_disabled;
 
 bool bpf_token_capable(const struct bpf_token *token, int cap);
 
-static inline bool bpf_allow_ptr_leaks(void)
+static inline bool bpf_allow_ptr_leaks(const struct bpf_token *token)
 {
-       return perfmon_capable();
+       return bpf_token_capable(token, CAP_PERFMON);
 }
 
-static inline bool bpf_allow_uninit_stack(void)
+static inline bool bpf_allow_uninit_stack(const struct bpf_token *token)
 {
-       return perfmon_capable();
+       return bpf_token_capable(token, CAP_PERFMON);
 }
 
-static inline bool bpf_bypass_spec_v1(void)
+static inline bool bpf_bypass_spec_v1(const struct bpf_token *token)
 {
-       return cpu_mitigations_off() || perfmon_capable();
+       return cpu_mitigations_off() || bpf_token_capable(token, CAP_PERFMON);
 }
 
-static inline bool bpf_bypass_spec_v4(void)
+static inline bool bpf_bypass_spec_v4(const struct bpf_token *token)
 {
-       return cpu_mitigations_off() || perfmon_capable();
+       return cpu_mitigations_off() || bpf_token_capable(token, CAP_PERFMON);
 }
 
 int bpf_map_new_fd(struct bpf_map *map, int flags);
index 35f067fd3840a6d0f3a0644999f21a0b111f0b31..fee070b9826ee35354e0ff85697f5a01deee0f19 100644 (file)
@@ -1140,7 +1140,7 @@ static inline bool bpf_jit_blinding_enabled(struct bpf_prog *prog)
                return false;
        if (!bpf_jit_harden)
                return false;
-       if (bpf_jit_harden == 1 && bpf_capable())
+       if (bpf_jit_harden == 1 && bpf_token_capable(prog->aux->token, CAP_BPF))
                return false;
 
        return true;
index 0bdbbbeab1550746ede893f1a69bc1f4c2a7c0a9..13358675ff2edc723b64adf449f0edcd733428fd 100644 (file)
@@ -82,7 +82,7 @@ static struct bpf_map *array_map_alloc(union bpf_attr *attr)
        bool percpu = attr->map_type == BPF_MAP_TYPE_PERCPU_ARRAY;
        int numa_node = bpf_map_attr_numa_node(attr);
        u32 elem_size, index_mask, max_entries;
-       bool bypass_spec_v1 = bpf_bypass_spec_v1();
+       bool bypass_spec_v1 = bpf_bypass_spec_v1(NULL);
        u64 array_size, mask64;
        struct bpf_array *array;
 
index 00dccba2976980b6449b61b8c2fa991c30efd143..71c459a51d9e144b04d343f7de46d2ab8ce01dc7 100644 (file)
@@ -682,7 +682,7 @@ static bool bpf_prog_kallsyms_candidate(const struct bpf_prog *fp)
 void bpf_prog_kallsyms_add(struct bpf_prog *fp)
 {
        if (!bpf_prog_kallsyms_candidate(fp) ||
-           !bpf_capable())
+           !bpf_token_capable(fp->aux->token, CAP_BPF))
                return;
 
        bpf_prog_ksym_set_addr(fp);
index f31868ba0c2d889676a5af03cebd00b1137f0f7f..fe833e831cb64c82c3263ca57618243189cf88fe 100644 (file)
@@ -20830,7 +20830,12 @@ int bpf_check(struct bpf_prog **prog, union bpf_attr *attr, bpfptr_t uattr, __u3
        env->prog = *prog;
        env->ops = bpf_verifier_ops[env->prog->type];
        env->fd_array = make_bpfptr(attr->fd_array, uattr.is_kernel);
-       is_priv = bpf_capable();
+
+       env->allow_ptr_leaks = bpf_allow_ptr_leaks(env->prog->aux->token);
+       env->allow_uninit_stack = bpf_allow_uninit_stack(env->prog->aux->token);
+       env->bypass_spec_v1 = bpf_bypass_spec_v1(env->prog->aux->token);
+       env->bypass_spec_v4 = bpf_bypass_spec_v4(env->prog->aux->token);
+       env->bpf_capable = is_priv = bpf_token_capable(env->prog->aux->token, CAP_BPF);
 
        bpf_get_btf_vmlinux();
 
@@ -20862,12 +20867,6 @@ int bpf_check(struct bpf_prog **prog, union bpf_attr *attr, bpfptr_t uattr, __u3
        if (attr->prog_flags & BPF_F_ANY_ALIGNMENT)
                env->strict_alignment = false;
 
-       env->allow_ptr_leaks = bpf_allow_ptr_leaks();
-       env->allow_uninit_stack = bpf_allow_uninit_stack();
-       env->bypass_spec_v1 = bpf_bypass_spec_v1();
-       env->bypass_spec_v4 = bpf_bypass_spec_v4();
-       env->bpf_capable = bpf_capable();
-
        if (is_priv)
                env->test_state_freq = attr->prog_flags & BPF_F_TEST_STATE_FREQ;
        env->test_reg_invariants = attr->prog_flags & BPF_F_TEST_REG_INVARIANTS;
index 521bcd0f5e4d49f922252b04b8c76b6664cdc07e..40121475e8d15627438ba1f3247b87c41651abad 100644 (file)
@@ -8580,7 +8580,7 @@ static bool cg_skb_is_valid_access(int off, int size,
                return false;
        case bpf_ctx_range(struct __sk_buff, data):
        case bpf_ctx_range(struct __sk_buff, data_end):
-               if (!bpf_capable())
+               if (!bpf_token_capable(prog->aux->token, CAP_BPF))
                        return false;
                break;
        }
@@ -8592,7 +8592,7 @@ static bool cg_skb_is_valid_access(int off, int size,
                case bpf_ctx_range_till(struct __sk_buff, cb[0], cb[4]):
                        break;
                case bpf_ctx_range(struct __sk_buff, tstamp):
-                       if (!bpf_capable())
+                       if (!bpf_token_capable(prog->aux->token, CAP_BPF))
                                return false;
                        break;
                default: