bpf: inline bpf_map_lookup_elem() for PERCPU_ARRAY maps
authorAndrii Nakryiko <andrii@kernel.org>
Tue, 2 Apr 2024 02:13:04 +0000 (19:13 -0700)
committerAlexei Starovoitov <ast@kernel.org>
Wed, 3 Apr 2024 17:29:56 +0000 (10:29 -0700)
Using new per-CPU BPF instruction implement inlining for per-CPU ARRAY
map lookup helper, if BPF JIT support is present.

Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
Acked-by: John Fastabend <john.fastabend@gmail.com>
Link: https://lore.kernel.org/r/20240402021307.1012571-4-andrii@kernel.org
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
kernel/bpf/arraymap.c

index 13358675ff2edc723b64adf449f0edcd733428fd..8c1e6d7654bb688b762441c7ac870f9350724424 100644 (file)
@@ -246,6 +246,38 @@ static void *percpu_array_map_lookup_elem(struct bpf_map *map, void *key)
        return this_cpu_ptr(array->pptrs[index & array->index_mask]);
 }
 
+/* emit BPF instructions equivalent to C code of percpu_array_map_lookup_elem() */
+static int percpu_array_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf)
+{
+       struct bpf_array *array = container_of(map, struct bpf_array, map);
+       struct bpf_insn *insn = insn_buf;
+
+       if (!bpf_jit_supports_percpu_insn())
+               return -EOPNOTSUPP;
+
+       if (map->map_flags & BPF_F_INNER_MAP)
+               return -EOPNOTSUPP;
+
+       BUILD_BUG_ON(offsetof(struct bpf_array, map) != 0);
+       *insn++ = BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, offsetof(struct bpf_array, pptrs));
+
+       *insn++ = BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_2, 0);
+       if (!map->bypass_spec_v1) {
+               *insn++ = BPF_JMP_IMM(BPF_JGE, BPF_REG_0, map->max_entries, 6);
+               *insn++ = BPF_ALU32_IMM(BPF_AND, BPF_REG_0, array->index_mask);
+       } else {
+               *insn++ = BPF_JMP_IMM(BPF_JGE, BPF_REG_0, map->max_entries, 5);
+       }
+
+       *insn++ = BPF_ALU64_IMM(BPF_LSH, BPF_REG_0, 3);
+       *insn++ = BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1);
+       *insn++ = BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0);
+       *insn++ = BPF_MOV64_PERCPU_REG(BPF_REG_0, BPF_REG_0);
+       *insn++ = BPF_JMP_IMM(BPF_JA, 0, 0, 1);
+       *insn++ = BPF_MOV64_IMM(BPF_REG_0, 0);
+       return insn - insn_buf;
+}
+
 static void *percpu_array_map_lookup_percpu_elem(struct bpf_map *map, void *key, u32 cpu)
 {
        struct bpf_array *array = container_of(map, struct bpf_array, map);
@@ -776,6 +808,7 @@ const struct bpf_map_ops percpu_array_map_ops = {
        .map_free = array_map_free,
        .map_get_next_key = array_map_get_next_key,
        .map_lookup_elem = percpu_array_map_lookup_elem,
+       .map_gen_lookup = percpu_array_map_gen_lookup,
        .map_update_elem = array_map_update_elem,
        .map_delete_elem = array_map_delete_elem,
        .map_lookup_percpu_elem = percpu_array_map_lookup_percpu_elem,