bpf: ensure state checkpointing at iter_next() call sites
authorAndrii Nakryiko <andrii@kernel.org>
Fri, 10 Mar 2023 06:01:49 +0000 (22:01 -0800)
committerAlexei Starovoitov <ast@kernel.org>
Fri, 10 Mar 2023 16:31:42 +0000 (08:31 -0800)
State equivalence check and checkpointing performed in is_state_visited()
employs certain heuristics to try to save memory by avoiding state checkpoints
if not enough jumps and instructions happened since last checkpoint. This leads
to unpredictability of whether a particular instruction will be checkpointed
and how regularly. While normally this is not causing much problems (except
inconveniences for predictable verifier tests, which we overcome with
BPF_F_TEST_STATE_FREQ flag), turns out it's not the case for open-coded
iterators.

Checking and saving state checkpoints at iter_next() call is crucial for fast
convergence of open-coded iterator loop logic, so we need to force it. If we
don't do that, is_state_visited() might skip saving a checkpoint, causing
unnecessarily long sequence of not checkpointed instructions and jumps, leading
to exhaustion of jump history buffer, and potentially other undesired outcomes.
It is expected that with correct open-coded iterators convergence will happen
quickly, so we don't run a risk of exhausting memory.

This patch adds, in addition to prune and jump instruction marks, also a
"forced checkpoint" mark, and makes sure that any iter_next() call instruction
is marked as such.

Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
Link: https://lore.kernel.org/r/20230310060149.625887-1-andrii@kernel.org
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
include/linux/bpf_verifier.h
kernel/bpf/verifier.c

index 0c052bc799401205f7c18b5890c7949f8b540322..81d525d057c7b366167f74e59e1204e23423e7fa 100644 (file)
@@ -477,8 +477,12 @@ struct bpf_insn_aux_data {
 
        /* below fields are initialized once */
        unsigned int orig_idx; /* original instruction index */
-       bool prune_point;
        bool jmp_point;
+       bool prune_point;
+       /* ensure we check state equivalence and save state checkpoint and
+        * this instruction, regardless of any heuristics
+        */
+       bool force_checkpoint;
 };
 
 #define MAX_USED_MAPS 64 /* max number of maps accessed by one eBPF program */
index 45a0822844648973dc822fdee71ab854addf13f9..13fd4c893f3b0d206fc5b80e44bdc666b715e4c1 100644 (file)
@@ -13865,6 +13865,17 @@ static bool is_prune_point(struct bpf_verifier_env *env, int insn_idx)
        return env->insn_aux_data[insn_idx].prune_point;
 }
 
+static void mark_force_checkpoint(struct bpf_verifier_env *env, int idx)
+{
+       env->insn_aux_data[idx].force_checkpoint = true;
+}
+
+static bool is_force_checkpoint(struct bpf_verifier_env *env, int insn_idx)
+{
+       return env->insn_aux_data[insn_idx].force_checkpoint;
+}
+
+
 enum {
        DONE_EXPLORING = 0,
        KEEP_EXPLORING = 1,
@@ -13984,8 +13995,21 @@ static int visit_insn(int t, struct bpf_verifier_env *env)
                        struct bpf_kfunc_call_arg_meta meta;
 
                        ret = fetch_kfunc_meta(env, insn, &meta, NULL);
-                       if (ret == 0 && is_iter_next_kfunc(&meta))
+                       if (ret == 0 && is_iter_next_kfunc(&meta)) {
                                mark_prune_point(env, t);
+                               /* Checking and saving state checkpoints at iter_next() call
+                                * is crucial for fast convergence of open-coded iterator loop
+                                * logic, so we need to force it. If we don't do that,
+                                * is_state_visited() might skip saving a checkpoint, causing
+                                * unnecessarily long sequence of not checkpointed
+                                * instructions and jumps, leading to exhaustion of jump
+                                * history buffer, and potentially other undesired outcomes.
+                                * It is expected that with correct open-coded iterators
+                                * convergence will happen quickly, so we don't run a risk of
+                                * exhausting memory.
+                                */
+                               mark_force_checkpoint(env, t);
+                       }
                }
                return visit_func_call_insn(t, insns, env, insn->src_reg == BPF_PSEUDO_CALL);
 
@@ -15172,7 +15196,8 @@ static int is_state_visited(struct bpf_verifier_env *env, int insn_idx)
        struct bpf_verifier_state_list *sl, **pprev;
        struct bpf_verifier_state *cur = env->cur_state, *new;
        int i, j, err, states_cnt = 0;
-       bool add_new_state = env->test_state_freq ? true : false;
+       bool force_new_state = env->test_state_freq || is_force_checkpoint(env, insn_idx);
+       bool add_new_state = force_new_state;
 
        /* bpf progs typically have pruning point every 4 instructions
         * http://vger.kernel.org/bpfconf2019.html#session-1
@@ -15269,7 +15294,7 @@ static int is_state_visited(struct bpf_verifier_env *env, int insn_idx)
                         * at the end of the loop are likely to be useful in pruning.
                         */
 skip_inf_loop_check:
-                       if (!env->test_state_freq &&
+                       if (!force_new_state &&
                            env->jmps_processed - env->prev_jmps_processed < 20 &&
                            env->insn_processed - env->prev_insn_processed < 100)
                                add_new_state = false;