bpf: Add support for certain atomics in bpf_arena to x86 JIT
authorAlexei Starovoitov <ast@kernel.org>
Fri, 5 Apr 2024 23:11:33 +0000 (16:11 -0700)
committerMartin KaFai Lau <martin.lau@kernel.org>
Tue, 9 Apr 2024 17:24:26 +0000 (10:24 -0700)
Support atomics in bpf_arena that can be JITed as a single x86 instruction.
Instructions that are JITed as loops are not supported at the moment,
since they require more complex extable and loop logic.

JITs can choose to do smarter things with bpf_jit_supports_insn().
Like arm64 may decide to support all bpf atomics instructions
when emit_lse_atomic is available and none in ll_sc mode.

bpf_jit_supports_percpu_insn(), bpf_jit_supports_ptr_xchg() and
other such callbacks can be replaced with bpf_jit_supports_insn()
in the future.

Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Acked-by: Eduard Zingerman <eddyz87@gmail.com>
Link: https://lore.kernel.org/r/20240405231134.17274-1-alexei.starovoitov@gmail.com
Signed-off-by: Martin KaFai Lau <martin.lau@kernel.org>
arch/x86/net/bpf_jit_comp.c
include/linux/filter.h
kernel/bpf/core.c
kernel/bpf/verifier.c

index 6cf9a5697c0988f4df0c29e4224e70db0e55cb29..2b5a475c4dd0dfc57f83b9ce916e7af9bc64a6e1 100644 (file)
@@ -1172,6 +1172,54 @@ static int emit_atomic(u8 **pprog, u8 atomic_op,
        return 0;
 }
 
+static int emit_atomic_index(u8 **pprog, u8 atomic_op, u32 size,
+                            u32 dst_reg, u32 src_reg, u32 index_reg, int off)
+{
+       u8 *prog = *pprog;
+
+       EMIT1(0xF0); /* lock prefix */
+       switch (size) {
+       case BPF_W:
+               EMIT1(add_3mod(0x40, dst_reg, src_reg, index_reg));
+               break;
+       case BPF_DW:
+               EMIT1(add_3mod(0x48, dst_reg, src_reg, index_reg));
+               break;
+       default:
+               pr_err("bpf_jit: 1 and 2 byte atomics are not supported\n");
+               return -EFAULT;
+       }
+
+       /* emit opcode */
+       switch (atomic_op) {
+       case BPF_ADD:
+       case BPF_AND:
+       case BPF_OR:
+       case BPF_XOR:
+               /* lock *(u32/u64*)(dst_reg + idx_reg + off) <op>= src_reg */
+               EMIT1(simple_alu_opcodes[atomic_op]);
+               break;
+       case BPF_ADD | BPF_FETCH:
+               /* src_reg = atomic_fetch_add(dst_reg + idx_reg + off, src_reg); */
+               EMIT2(0x0F, 0xC1);
+               break;
+       case BPF_XCHG:
+               /* src_reg = atomic_xchg(dst_reg + idx_reg + off, src_reg); */
+               EMIT1(0x87);
+               break;
+       case BPF_CMPXCHG:
+               /* r0 = atomic_cmpxchg(dst_reg + idx_reg + off, r0, src_reg); */
+               EMIT2(0x0F, 0xB1);
+               break;
+       default:
+               pr_err("bpf_jit: unknown atomic opcode %02x\n", atomic_op);
+               return -EFAULT;
+       }
+       emit_insn_suffix_SIB(&prog, dst_reg, src_reg, index_reg, off);
+       *pprog = prog;
+       return 0;
+}
+
 #define DONT_CLEAR 1
 
 bool ex_handler_bpf(const struct exception_table_entry *x, struct pt_regs *regs)
@@ -1982,6 +2030,15 @@ populate_extable:
                                return err;
                        break;
 
+               case BPF_STX | BPF_PROBE_ATOMIC | BPF_W:
+               case BPF_STX | BPF_PROBE_ATOMIC | BPF_DW:
+                       start_of_ldx = prog;
+                       err = emit_atomic_index(&prog, insn->imm, BPF_SIZE(insn->code),
+                                               dst_reg, src_reg, X86_REG_R12, insn->off);
+                       if (err)
+                               return err;
+                       goto populate_extable;
+
                        /* call */
                case BPF_JMP | BPF_CALL: {
                        int offs;
@@ -3486,6 +3543,21 @@ bool bpf_jit_supports_arena(void)
        return true;
 }
 
+bool bpf_jit_supports_insn(struct bpf_insn *insn, bool in_arena)
+{
+       if (!in_arena)
+               return true;
+       switch (insn->code) {
+       case BPF_STX | BPF_ATOMIC | BPF_W:
+       case BPF_STX | BPF_ATOMIC | BPF_DW:
+               if (insn->imm == (BPF_AND | BPF_FETCH) ||
+                   insn->imm == (BPF_OR | BPF_FETCH) ||
+                   insn->imm == (BPF_XOR | BPF_FETCH))
+                       return false;
+       }
+       return true;
+}
+
 bool bpf_jit_supports_ptr_xchg(void)
 {
        return true;
index 161d5f7b64ed1d7a73537699ab361567c5d904d7..7a27f19bf44d08839460105714ab93e044225b26 100644 (file)
@@ -75,6 +75,9 @@ struct ctl_table_header;
 /* unused opcode to mark special load instruction. Same as BPF_MSH */
 #define BPF_PROBE_MEM32        0xa0
 
+/* unused opcode to mark special atomic instruction */
+#define BPF_PROBE_ATOMIC 0xe0
+
 /* unused opcode to mark call to interpreter with arguments */
 #define BPF_CALL_ARGS  0xe0
 
@@ -997,6 +1000,7 @@ bool bpf_jit_supports_far_kfunc_call(void);
 bool bpf_jit_supports_exceptions(void);
 bool bpf_jit_supports_ptr_xchg(void);
 bool bpf_jit_supports_arena(void);
+bool bpf_jit_supports_insn(struct bpf_insn *insn, bool in_arena);
 void arch_bpf_stack_walk(bool (*consume_fn)(void *cookie, u64 ip, u64 sp, u64 bp), void *cookie);
 bool bpf_helper_changes_pkt_data(void *func);
 
index 7a33a3a7e63cb7d52244d09ad23eb98cd57e2f74..a41718eaeefe79bf4c4f2154091042d225a04e4b 100644 (file)
@@ -2965,6 +2965,11 @@ bool __weak bpf_jit_supports_arena(void)
        return false;
 }
 
+bool __weak bpf_jit_supports_insn(struct bpf_insn *insn, bool in_arena)
+{
+       return false;
+}
+
 /* Return TRUE if the JIT backend satisfies the following two conditions:
  * 1) JIT backend supports atomic_xchg() on pointer-sized words.
  * 2) Under the specific arch, the implementation of xchg() is the same
index 590db4e4c071cd3c32956811394ac4f606d0b110..2aad6d90550f05780c2450f921a91a1b82a8a17b 100644 (file)
@@ -6970,6 +6970,9 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn
        return err;
 }
 
+static int save_aux_ptr_type(struct bpf_verifier_env *env, enum bpf_reg_type type,
+                            bool allow_trust_missmatch);
+
 static int check_atomic(struct bpf_verifier_env *env, int insn_idx, struct bpf_insn *insn)
 {
        int load_reg;
@@ -7030,7 +7033,7 @@ static int check_atomic(struct bpf_verifier_env *env, int insn_idx, struct bpf_i
            is_pkt_reg(env, insn->dst_reg) ||
            is_flow_key_reg(env, insn->dst_reg) ||
            is_sk_reg(env, insn->dst_reg) ||
-           is_arena_reg(env, insn->dst_reg)) {
+           (is_arena_reg(env, insn->dst_reg) && !bpf_jit_supports_insn(insn, true))) {
                verbose(env, "BPF_ATOMIC stores into R%d %s is not allowed\n",
                        insn->dst_reg,
                        reg_type_str(env, reg_state(env, insn->dst_reg)->type));
@@ -7066,6 +7069,11 @@ static int check_atomic(struct bpf_verifier_env *env, int insn_idx, struct bpf_i
        if (err)
                return err;
 
+       if (is_arena_reg(env, insn->dst_reg)) {
+               err = save_aux_ptr_type(env, PTR_TO_ARENA, false);
+               if (err)
+                       return err;
+       }
        /* Check whether we can write into the same memory. */
        err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off,
                               BPF_SIZE(insn->code), BPF_WRITE, -1, true, false);
@@ -18955,6 +18963,12 @@ static int convert_ctx_accesses(struct bpf_verifier_env *env)
                           insn->code == (BPF_ST | BPF_MEM | BPF_W) ||
                           insn->code == (BPF_ST | BPF_MEM | BPF_DW)) {
                        type = BPF_WRITE;
+               } else if ((insn->code == (BPF_STX | BPF_ATOMIC | BPF_W) ||
+                           insn->code == (BPF_STX | BPF_ATOMIC | BPF_DW)) &&
+                          env->insn_aux_data[i + delta].ptr_type == PTR_TO_ARENA) {
+                       insn->code = BPF_STX | BPF_PROBE_ATOMIC | BPF_SIZE(insn->code);
+                       env->prog->aux->num_exentries++;
+                       continue;
                } else {
                        continue;
                }
@@ -19226,6 +19240,9 @@ static int jit_subprogs(struct bpf_verifier_env *env)
                             BPF_CLASS(insn->code) == BPF_ST) &&
                             BPF_MODE(insn->code) == BPF_PROBE_MEM32)
                                num_exentries++;
+                       if (BPF_CLASS(insn->code) == BPF_STX &&
+                            BPF_MODE(insn->code) == BPF_PROBE_ATOMIC)
+                               num_exentries++;
                }
                func[i]->aux->num_exentries = num_exentries;
                func[i]->aux->tail_call_reachable = env->subprog_info[i].tail_call_reachable;