bpf, tests: Add tests for ALU operations implemented with function calls
authorJohan Almbladh <johan.almbladh@anyfinetworks.com>
Mon, 9 Aug 2021 09:18:23 +0000 (11:18 +0200)
committerDaniel Borkmann <daniel@iogearbox.net>
Tue, 10 Aug 2021 09:33:15 +0000 (11:33 +0200)
32-bit JITs may implement complex ALU64 instructions using function calls.
The new tests check aspects related to this, such as register clobbering
and register argument re-ordering.

Signed-off-by: Johan Almbladh <johan.almbladh@anyfinetworks.com>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Link: https://lore.kernel.org/bpf/20210809091829.810076-9-johan.almbladh@anyfinetworks.com
lib/test_bpf.c

index 072f9c51bd9bc1d57393a1bf62b26c99be4fdd84..e3c2569630209a02038b11e5e90e5fd4b900e7ce 100644 (file)
@@ -1916,6 +1916,147 @@ static struct bpf_test tests[] = {
                { },
                { { 0, -1 } }
        },
+       {
+               /*
+                * Register (non-)clobbering test, in the case where a 32-bit
+                * JIT implements complex ALU64 operations via function calls.
+                * If so, the function call must be invisible in the eBPF
+                * registers. The JIT must then save and restore relevant
+                * registers during the call. The following tests check that
+                * the eBPF registers retain their values after such a call.
+                */
+               "INT: Register clobbering, R1 updated",
+               .u.insns_int = {
+                       BPF_ALU32_IMM(BPF_MOV, R0, 0),
+                       BPF_ALU32_IMM(BPF_MOV, R1, 123456789),
+                       BPF_ALU32_IMM(BPF_MOV, R2, 2),
+                       BPF_ALU32_IMM(BPF_MOV, R3, 3),
+                       BPF_ALU32_IMM(BPF_MOV, R4, 4),
+                       BPF_ALU32_IMM(BPF_MOV, R5, 5),
+                       BPF_ALU32_IMM(BPF_MOV, R6, 6),
+                       BPF_ALU32_IMM(BPF_MOV, R7, 7),
+                       BPF_ALU32_IMM(BPF_MOV, R8, 8),
+                       BPF_ALU32_IMM(BPF_MOV, R9, 9),
+                       BPF_ALU64_IMM(BPF_DIV, R1, 123456789),
+                       BPF_JMP_IMM(BPF_JNE, R0, 0, 10),
+                       BPF_JMP_IMM(BPF_JNE, R1, 1, 9),
+                       BPF_JMP_IMM(BPF_JNE, R2, 2, 8),
+                       BPF_JMP_IMM(BPF_JNE, R3, 3, 7),
+                       BPF_JMP_IMM(BPF_JNE, R4, 4, 6),
+                       BPF_JMP_IMM(BPF_JNE, R5, 5, 5),
+                       BPF_JMP_IMM(BPF_JNE, R6, 6, 4),
+                       BPF_JMP_IMM(BPF_JNE, R7, 7, 3),
+                       BPF_JMP_IMM(BPF_JNE, R8, 8, 2),
+                       BPF_JMP_IMM(BPF_JNE, R9, 9, 1),
+                       BPF_ALU32_IMM(BPF_MOV, R0, 1),
+                       BPF_EXIT_INSN(),
+               },
+               INTERNAL,
+               { },
+               { { 0, 1 } }
+       },
+       {
+               "INT: Register clobbering, R2 updated",
+               .u.insns_int = {
+                       BPF_ALU32_IMM(BPF_MOV, R0, 0),
+                       BPF_ALU32_IMM(BPF_MOV, R1, 1),
+                       BPF_ALU32_IMM(BPF_MOV, R2, 2 * 123456789),
+                       BPF_ALU32_IMM(BPF_MOV, R3, 3),
+                       BPF_ALU32_IMM(BPF_MOV, R4, 4),
+                       BPF_ALU32_IMM(BPF_MOV, R5, 5),
+                       BPF_ALU32_IMM(BPF_MOV, R6, 6),
+                       BPF_ALU32_IMM(BPF_MOV, R7, 7),
+                       BPF_ALU32_IMM(BPF_MOV, R8, 8),
+                       BPF_ALU32_IMM(BPF_MOV, R9, 9),
+                       BPF_ALU64_IMM(BPF_DIV, R2, 123456789),
+                       BPF_JMP_IMM(BPF_JNE, R0, 0, 10),
+                       BPF_JMP_IMM(BPF_JNE, R1, 1, 9),
+                       BPF_JMP_IMM(BPF_JNE, R2, 2, 8),
+                       BPF_JMP_IMM(BPF_JNE, R3, 3, 7),
+                       BPF_JMP_IMM(BPF_JNE, R4, 4, 6),
+                       BPF_JMP_IMM(BPF_JNE, R5, 5, 5),
+                       BPF_JMP_IMM(BPF_JNE, R6, 6, 4),
+                       BPF_JMP_IMM(BPF_JNE, R7, 7, 3),
+                       BPF_JMP_IMM(BPF_JNE, R8, 8, 2),
+                       BPF_JMP_IMM(BPF_JNE, R9, 9, 1),
+                       BPF_ALU32_IMM(BPF_MOV, R0, 1),
+                       BPF_EXIT_INSN(),
+               },
+               INTERNAL,
+               { },
+               { { 0, 1 } }
+       },
+       {
+               /*
+                * Test 32-bit JITs that implement complex ALU64 operations as
+                * function calls R0 = f(R1, R2), and must re-arrange operands.
+                */
+#define NUMER 0xfedcba9876543210ULL
+#define DENOM 0x0123456789abcdefULL
+               "ALU64_DIV X: Operand register permutations",
+               .u.insns_int = {
+                       /* R0 / R2 */
+                       BPF_LD_IMM64(R0, NUMER),
+                       BPF_LD_IMM64(R2, DENOM),
+                       BPF_ALU64_REG(BPF_DIV, R0, R2),
+                       BPF_JMP_IMM(BPF_JEQ, R0, NUMER / DENOM, 1),
+                       BPF_EXIT_INSN(),
+                       /* R1 / R0 */
+                       BPF_LD_IMM64(R1, NUMER),
+                       BPF_LD_IMM64(R0, DENOM),
+                       BPF_ALU64_REG(BPF_DIV, R1, R0),
+                       BPF_JMP_IMM(BPF_JEQ, R1, NUMER / DENOM, 1),
+                       BPF_EXIT_INSN(),
+                       /* R0 / R1 */
+                       BPF_LD_IMM64(R0, NUMER),
+                       BPF_LD_IMM64(R1, DENOM),
+                       BPF_ALU64_REG(BPF_DIV, R0, R1),
+                       BPF_JMP_IMM(BPF_JEQ, R0, NUMER / DENOM, 1),
+                       BPF_EXIT_INSN(),
+                       /* R2 / R0 */
+                       BPF_LD_IMM64(R2, NUMER),
+                       BPF_LD_IMM64(R0, DENOM),
+                       BPF_ALU64_REG(BPF_DIV, R2, R0),
+                       BPF_JMP_IMM(BPF_JEQ, R2, NUMER / DENOM, 1),
+                       BPF_EXIT_INSN(),
+                       /* R2 / R1 */
+                       BPF_LD_IMM64(R2, NUMER),
+                       BPF_LD_IMM64(R1, DENOM),
+                       BPF_ALU64_REG(BPF_DIV, R2, R1),
+                       BPF_JMP_IMM(BPF_JEQ, R2, NUMER / DENOM, 1),
+                       BPF_EXIT_INSN(),
+                       /* R1 / R2 */
+                       BPF_LD_IMM64(R1, NUMER),
+                       BPF_LD_IMM64(R2, DENOM),
+                       BPF_ALU64_REG(BPF_DIV, R1, R2),
+                       BPF_JMP_IMM(BPF_JEQ, R1, NUMER / DENOM, 1),
+                       BPF_EXIT_INSN(),
+                       /* R1 / R1 */
+                       BPF_LD_IMM64(R1, NUMER),
+                       BPF_ALU64_REG(BPF_DIV, R1, R1),
+                       BPF_JMP_IMM(BPF_JEQ, R1, 1, 1),
+                       BPF_EXIT_INSN(),
+                       /* R2 / R2 */
+                       BPF_LD_IMM64(R2, DENOM),
+                       BPF_ALU64_REG(BPF_DIV, R2, R2),
+                       BPF_JMP_IMM(BPF_JEQ, R2, 1, 1),
+                       BPF_EXIT_INSN(),
+                       /* R3 / R4 */
+                       BPF_LD_IMM64(R3, NUMER),
+                       BPF_LD_IMM64(R4, DENOM),
+                       BPF_ALU64_REG(BPF_DIV, R3, R4),
+                       BPF_JMP_IMM(BPF_JEQ, R3, NUMER / DENOM, 1),
+                       BPF_EXIT_INSN(),
+                       /* Successful return */
+                       BPF_LD_IMM64(R0, 1),
+                       BPF_EXIT_INSN(),
+               },
+               INTERNAL,
+               { },
+               { { 0, 1 } },
+#undef NUMER
+#undef DENOM
+       },
        {
                "check: missing ret",
                .u.insns = {