return bpf_helper_call(insn) && is_async_callback_calling_function(insn->imm);
 }
 
+static bool is_may_goto_insn(struct bpf_insn *insn)
+{
+       return insn->code == (BPF_JMP | BPF_JCOND) && insn->src_reg == BPF_MAY_GOTO;
+}
+
+static bool is_may_goto_insn_at(struct bpf_verifier_env *env, int insn_idx)
+{
+       return is_may_goto_insn(&env->prog->insnsi[insn_idx]);
+}
+
 static bool is_storage_get_function(enum bpf_func_id func_id)
 {
        return func_id == BPF_FUNC_sk_storage_get ||
        dst_state->dfs_depth = src->dfs_depth;
        dst_state->callback_unroll_depth = src->callback_unroll_depth;
        dst_state->used_as_loop_entry = src->used_as_loop_entry;
+       dst_state->may_goto_depth = src->may_goto_depth;
        for (i = 0; i <= src->curframe; i++) {
                dst = dst_state->frame[i];
                if (!dst) {
        int err;
 
        /* Only conditional jumps are expected to reach here. */
-       if (opcode == BPF_JA || opcode > BPF_JSLE) {
+       if (opcode == BPF_JA || opcode > BPF_JCOND) {
                verbose(env, "invalid BPF_JMP/JMP32 opcode %x\n", opcode);
                return -EINVAL;
        }
 
+       if (opcode == BPF_JCOND) {
+               struct bpf_verifier_state *cur_st = env->cur_state, *queued_st, *prev_st;
+               int idx = *insn_idx;
+
+               if (insn->code != (BPF_JMP | BPF_JCOND) ||
+                   insn->src_reg != BPF_MAY_GOTO ||
+                   insn->dst_reg || insn->imm || insn->off == 0) {
+                       verbose(env, "invalid may_goto off %d imm %d\n",
+                               insn->off, insn->imm);
+                       return -EINVAL;
+               }
+               prev_st = find_prev_entry(env, cur_st->parent, idx);
+
+               /* branch out 'fallthrough' insn as a new state to explore */
+               queued_st = push_stack(env, idx + 1, idx, false);
+               if (!queued_st)
+                       return -ENOMEM;
+
+               queued_st->may_goto_depth++;
+               if (prev_st)
+                       widen_imprecise_scalars(env, prev_st, queued_st);
+               *insn_idx += insn->off;
+               return 0;
+       }
+
        /* check src2 operand */
        err = check_reg_arg(env, insn->dst_reg, SRC_OP);
        if (err)
        default:
                /* conditional jump with two edges */
                mark_prune_point(env, t);
+               if (is_may_goto_insn(insn))
+                       mark_force_checkpoint(env, t);
 
                ret = push_insn(t, t + 1, FALLTHROUGH, env);
                if (ret)
                                }
                                goto skip_inf_loop_check;
                        }
+                       if (is_may_goto_insn_at(env, insn_idx)) {
+                               if (states_equal(env, &sl->state, cur, true)) {
+                                       update_loop_entry(cur, &sl->state);
+                                       goto hit;
+                               }
+                               goto skip_inf_loop_check;
+                       }
                        if (calls_callback(env, insn_idx)) {
                                if (states_equal(env, &sl->state, cur, true))
                                        goto hit;
                        if (states_maybe_looping(&sl->state, cur) &&
                            states_equal(env, &sl->state, cur, true) &&
                            !iter_active_depths_differ(&sl->state, cur) &&
+                           sl->state.may_goto_depth == cur->may_goto_depth &&
                            sl->state.callback_unroll_depth == cur->callback_unroll_depth) {
                                verbose_linfo(env, insn_idx, "; ");
                                verbose(env, "infinite loop detected at insn %d\n", insn_idx);
        struct bpf_insn insn_buf[16];
        struct bpf_prog *new_prog;
        struct bpf_map *map_ptr;
-       int i, ret, cnt, delta = 0;
+       int i, ret, cnt, delta = 0, cur_subprog = 0;
+       struct bpf_subprog_info *subprogs = env->subprog_info;
+       u16 stack_depth = subprogs[cur_subprog].stack_depth;
+       u16 stack_depth_extra = 0;
 
        if (env->seen_exception && !env->exception_callback_subprog) {
                struct bpf_insn patch[] = {
                mark_subprog_exc_cb(env, env->exception_callback_subprog);
        }
 
-       for (i = 0; i < insn_cnt; i++, insn++) {
+       for (i = 0; i < insn_cnt;) {
                /* Make divide-by-zero exceptions impossible. */
                if (insn->code == (BPF_ALU64 | BPF_MOD | BPF_X) ||
                    insn->code == (BPF_ALU64 | BPF_DIV | BPF_X) ||
                        delta    += cnt - 1;
                        env->prog = prog = new_prog;
                        insn      = new_prog->insnsi + i + delta;
-                       continue;
+                       goto next_insn;
                }
 
                /* Implement LD_ABS and LD_IND with a rewrite, if supported by the program type. */
                        delta    += cnt - 1;
                        env->prog = prog = new_prog;
                        insn      = new_prog->insnsi + i + delta;
-                       continue;
+                       goto next_insn;
                }
 
                /* Rewrite pointer arithmetic to mitigate speculation attacks. */
                        aux = &env->insn_aux_data[i + delta];
                        if (!aux->alu_state ||
                            aux->alu_state == BPF_ALU_NON_POINTER)
-                               continue;
+                               goto next_insn;
 
                        isneg = aux->alu_state & BPF_ALU_NEG_VALUE;
                        issrc = (aux->alu_state & BPF_ALU_SANITIZE) ==
                        delta    += cnt - 1;
                        env->prog = prog = new_prog;
                        insn      = new_prog->insnsi + i + delta;
-                       continue;
+                       goto next_insn;
+               }
+
+               if (is_may_goto_insn(insn)) {
+                       int stack_off = -stack_depth - 8;
+
+                       stack_depth_extra = 8;
+                       insn_buf[0] = BPF_LDX_MEM(BPF_DW, BPF_REG_AX, BPF_REG_10, stack_off);
+                       insn_buf[1] = BPF_JMP_IMM(BPF_JEQ, BPF_REG_AX, 0, insn->off + 2);
+                       insn_buf[2] = BPF_ALU64_IMM(BPF_SUB, BPF_REG_AX, 1);
+                       insn_buf[3] = BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_AX, stack_off);
+                       cnt = 4;
+
+                       new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
+                       if (!new_prog)
+                               return -ENOMEM;
+
+                       delta += cnt - 1;
+                       env->prog = prog = new_prog;
+                       insn = new_prog->insnsi + i + delta;
+                       goto next_insn;
                }
 
                if (insn->code != (BPF_JMP | BPF_CALL))
-                       continue;
+                       goto next_insn;
                if (insn->src_reg == BPF_PSEUDO_CALL)
-                       continue;
+                       goto next_insn;
                if (insn->src_reg == BPF_PSEUDO_KFUNC_CALL) {
                        ret = fixup_kfunc_call(env, insn, insn_buf, i + delta, &cnt);
                        if (ret)
                                return ret;
                        if (cnt == 0)
-                               continue;
+                               goto next_insn;
 
                        new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
                        if (!new_prog)
                        delta    += cnt - 1;
                        env->prog = prog = new_prog;
                        insn      = new_prog->insnsi + i + delta;
-                       continue;
+                       goto next_insn;
                }
 
                if (insn->imm == BPF_FUNC_get_route_realm)
                                }
 
                                insn->imm = ret + 1;
-                               continue;
+                               goto next_insn;
                        }
 
                        if (!bpf_map_ptr_unpriv(aux))
-                               continue;
+                               goto next_insn;
 
                        /* instead of changing every JIT dealing with tail_call
                         * emit two extra insns:
                        delta    += cnt - 1;
                        env->prog = prog = new_prog;
                        insn      = new_prog->insnsi + i + delta;
-                       continue;
+                       goto next_insn;
                }
 
                if (insn->imm == BPF_FUNC_timer_set_callback) {
                                delta    += cnt - 1;
                                env->prog = prog = new_prog;
                                insn      = new_prog->insnsi + i + delta;
-                               continue;
+                               goto next_insn;
                        }
 
                        BUILD_BUG_ON(!__same_type(ops->map_lookup_elem,
                        switch (insn->imm) {
                        case BPF_FUNC_map_lookup_elem:
                                insn->imm = BPF_CALL_IMM(ops->map_lookup_elem);
-                               continue;
+                               goto next_insn;
                        case BPF_FUNC_map_update_elem:
                                insn->imm = BPF_CALL_IMM(ops->map_update_elem);
-                               continue;
+                               goto next_insn;
                        case BPF_FUNC_map_delete_elem:
                                insn->imm = BPF_CALL_IMM(ops->map_delete_elem);
-                               continue;
+                               goto next_insn;
                        case BPF_FUNC_map_push_elem:
                                insn->imm = BPF_CALL_IMM(ops->map_push_elem);
-                               continue;
+                               goto next_insn;
                        case BPF_FUNC_map_pop_elem:
                                insn->imm = BPF_CALL_IMM(ops->map_pop_elem);
-                               continue;
+                               goto next_insn;
                        case BPF_FUNC_map_peek_elem:
                                insn->imm = BPF_CALL_IMM(ops->map_peek_elem);
-                               continue;
+                               goto next_insn;
                        case BPF_FUNC_redirect_map:
                                insn->imm = BPF_CALL_IMM(ops->map_redirect);
-                               continue;
+                               goto next_insn;
                        case BPF_FUNC_for_each_map_elem:
                                insn->imm = BPF_CALL_IMM(ops->map_for_each_callback);
-                               continue;
+                               goto next_insn;
                        case BPF_FUNC_map_lookup_percpu_elem:
                                insn->imm = BPF_CALL_IMM(ops->map_lookup_percpu_elem);
-                               continue;
+                               goto next_insn;
                        }
 
                        goto patch_call_imm;
                        delta    += cnt - 1;
                        env->prog = prog = new_prog;
                        insn      = new_prog->insnsi + i + delta;
-                       continue;
+                       goto next_insn;
                }
 
                /* Implement bpf_get_func_arg inline. */
                        delta    += cnt - 1;
                        env->prog = prog = new_prog;
                        insn      = new_prog->insnsi + i + delta;
-                       continue;
+                       goto next_insn;
                }
 
                /* Implement bpf_get_func_ret inline. */
                        delta    += cnt - 1;
                        env->prog = prog = new_prog;
                        insn      = new_prog->insnsi + i + delta;
-                       continue;
+                       goto next_insn;
                }
 
                /* Implement get_func_arg_cnt inline. */
 
                        env->prog = prog = new_prog;
                        insn      = new_prog->insnsi + i + delta;
-                       continue;
+                       goto next_insn;
                }
 
                /* Implement bpf_get_func_ip inline. */
 
                        env->prog = prog = new_prog;
                        insn      = new_prog->insnsi + i + delta;
-                       continue;
+                       goto next_insn;
                }
 
                /* Implement bpf_kptr_xchg inline */
                        delta    += cnt - 1;
                        env->prog = prog = new_prog;
                        insn      = new_prog->insnsi + i + delta;
-                       continue;
+                       goto next_insn;
                }
 patch_call_imm:
                fn = env->ops->get_func_proto(insn->imm, env->prog);
                        return -EFAULT;
                }
                insn->imm = fn->func - __bpf_call_base;
+next_insn:
+               if (subprogs[cur_subprog + 1].start == i + delta + 1) {
+                       subprogs[cur_subprog].stack_depth += stack_depth_extra;
+                       subprogs[cur_subprog].stack_extra = stack_depth_extra;
+                       cur_subprog++;
+                       stack_depth = subprogs[cur_subprog].stack_depth;
+                       stack_depth_extra = 0;
+               }
+               i++;
+               insn++;
+       }
+
+       env->prog->aux->stack_depth = subprogs[0].stack_depth;
+       for (i = 0; i < env->subprog_cnt; i++) {
+               int subprog_start = subprogs[i].start;
+               int stack_slots = subprogs[i].stack_extra / 8;
+
+               if (!stack_slots)
+                       continue;
+               if (stack_slots > 1) {
+                       verbose(env, "verifier bug: stack_slots supports may_goto only\n");
+                       return -EFAULT;
+               }
+
+               /* Add ST insn to subprog prologue to init extra stack */
+               insn_buf[0] = BPF_ST_MEM(BPF_DW, BPF_REG_FP,
+                                        -subprogs[i].stack_depth, BPF_MAX_LOOPS);
+               /* Copy first actual insn to preserve it */
+               insn_buf[1] = env->prog->insnsi[subprog_start];
+
+               new_prog = bpf_patch_insn_data(env, subprog_start, insn_buf, 2);
+               if (!new_prog)
+                       return -ENOMEM;
+               env->prog = prog = new_prog;
        }
 
        /* Since poke tab is now finalized, publish aux to tracker. */