bpf: enhance BPF_JEQ/BPF_JNE is_branch_taken logic
authorAndrii Nakryiko <andrii@kernel.org>
Sun, 12 Nov 2023 01:05:59 +0000 (17:05 -0800)
committerAlexei Starovoitov <ast@kernel.org>
Wed, 15 Nov 2023 20:03:42 +0000 (12:03 -0800)
Use 32-bit subranges to prune some 64-bit BPF_JEQ/BPF_JNE conditions
that otherwise would be "inconclusive" (i.e., is_branch_taken() would
return -1). This can happen, for example, when registers are initialized
as 64-bit u64/s64, then compared for inequality as 32-bit subregisters,
and then followed by 64-bit equality/inequality check. That 32-bit
inequality can establish some pattern for lower 32 bits of a register
(e.g., s< 0 condition determines whether the bit #31 is zero or not),
while overall 64-bit value could be anything (according to a value range
representation).

This is not a fancy quirky special case, but actually a handling that's
necessary to prevent correctness issue with BPF verifier's range
tracking: set_range_min_max() assumes that register ranges are
non-overlapping, and if that condition is not guaranteed by
is_branch_taken() we can end up with invalid ranges, where min > max.

  [0] https://lore.kernel.org/bpf/CACkBjsY2q1_fUohD7hRmKGqv1MV=eP2f6XK8kjkYNw7BaiF8iQ@mail.gmail.com/

Acked-by: Shung-Hsi Yu <shung-hsi.yu@suse.com>
Acked-by: Eduard Zingerman <eddyz87@gmail.com>
Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
Link: https://lore.kernel.org/r/20231112010609.848406-4-andrii@kernel.org
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
kernel/bpf/verifier.c

index f459ad99256e12bca1b84f33ce287c106b851d5c..65570eedfe8882978ee0e91c4399d35ae12679bf 100644 (file)
@@ -14283,6 +14283,18 @@ static int is_scalar_branch_taken(struct bpf_reg_state *reg1, struct bpf_reg_sta
                        return 0;
                if (smin1 > smax2 || smax1 < smin2)
                        return 0;
+               if (!is_jmp32) {
+                       /* if 64-bit ranges are inconclusive, see if we can
+                        * utilize 32-bit subrange knowledge to eliminate
+                        * branches that can't be taken a priori
+                        */
+                       if (reg1->u32_min_value > reg2->u32_max_value ||
+                           reg1->u32_max_value < reg2->u32_min_value)
+                               return 0;
+                       if (reg1->s32_min_value > reg2->s32_max_value ||
+                           reg1->s32_max_value < reg2->s32_min_value)
+                               return 0;
+               }
                break;
        case BPF_JNE:
                /* constants, umin/umax and smin/smax checks would be
@@ -14295,6 +14307,18 @@ static int is_scalar_branch_taken(struct bpf_reg_state *reg1, struct bpf_reg_sta
                        return 1;
                if (smin1 > smax2 || smax1 < smin2)
                        return 1;
+               if (!is_jmp32) {
+                       /* if 64-bit ranges are inconclusive, see if we can
+                        * utilize 32-bit subrange knowledge to eliminate
+                        * branches that can't be taken a priori
+                        */
+                       if (reg1->u32_min_value > reg2->u32_max_value ||
+                           reg1->u32_max_value < reg2->u32_min_value)
+                               return 1;
+                       if (reg1->s32_min_value > reg2->s32_max_value ||
+                           reg1->s32_max_value < reg2->s32_min_value)
+                               return 1;
+               }
                break;
        case BPF_JSET:
                if (!is_reg_const(reg2, is_jmp32)) {