return 0;
 }
 
-static int bpf_size_to_bytes(int bpf_size)
-{
-       if (bpf_size == BPF_W)
-               return 4;
-       else if (bpf_size == BPF_H)
-               return 2;
-       else if (bpf_size == BPF_B)
-               return 1;
-       else if (bpf_size == BPF_DW)
-               return 8;
-       else
-               return -EINVAL;
-}
-
 static bool is_spillable_regtype(enum bpf_reg_type type)
 {
        switch (type) {
 static int check_ctx_access(struct bpf_verifier_env *env, int insn_idx, int off, int size,
                            enum bpf_access_type t, enum bpf_reg_type *reg_type)
 {
-       struct bpf_insn_access_aux info = { .reg_type = *reg_type };
+       struct bpf_insn_access_aux info = {
+               .reg_type = *reg_type,
+       };
 
        /* for analyzer ctx accesses are already validated and converted */
        if (env->analyzer_ops)
 
        if (env->prog->aux->ops->is_valid_access &&
            env->prog->aux->ops->is_valid_access(off, size, t, &info)) {
-               /* a non zero info.ctx_field_size indicates:
-                * . For this field, the prog type specific ctx conversion algorithm
-                *   only supports whole field access.
-                * . This ctx access is a candiate for later verifier transformation
-                *   to load the whole field and then apply a mask to get correct result.
-                * a non zero info.converted_op_size indicates perceived actual converted
-                * value width in convert_ctx_access.
+               /* A non zero info.ctx_field_size indicates that this field is a
+                * candidate for later verifier transformation to load the whole
+                * field and then apply a mask when accessed with a narrower
+                * access than actual ctx access size. A zero info.ctx_field_size
+                * will only allow for whole field access and rejects any other
+                * type of narrower access.
                 */
-               if ((info.ctx_field_size && !info.converted_op_size) ||
-                   (!info.ctx_field_size &&  info.converted_op_size)) {
-                       verbose("verifier bug in is_valid_access prog type=%u off=%d size=%d\n",
-                               env->prog->type, off, size);
-                       return -EACCES;
-               }
-
-               if (info.ctx_field_size) {
-                       env->insn_aux_data[insn_idx].ctx_field_size = info.ctx_field_size;
-                       env->insn_aux_data[insn_idx].converted_op_size = info.converted_op_size;
-               }
+               env->insn_aux_data[insn_idx].ctx_field_size = info.ctx_field_size;
                *reg_type = info.reg_type;
 
                /* remember the offset of last byte accessed in ctx */
 static int convert_ctx_accesses(struct bpf_verifier_env *env)
 {
        const struct bpf_verifier_ops *ops = env->prog->aux->ops;
+       int i, cnt, size, ctx_field_size, delta = 0;
        const int insn_cnt = env->prog->len;
        struct bpf_insn insn_buf[16], *insn;
        struct bpf_prog *new_prog;
        enum bpf_access_type type;
-       int i, cnt, off, size, ctx_field_size, converted_op_size, is_narrower_load, delta = 0;
+       bool is_narrower_load;
+       u32 target_size;
 
        if (ops->gen_prologue) {
                cnt = ops->gen_prologue(insn_buf, env->seen_direct_write,
                if (env->insn_aux_data[i + delta].ptr_type != PTR_TO_CTX)
                        continue;
 
-               off = insn->off;
-               size = bpf_size_to_bytes(BPF_SIZE(insn->code));
                ctx_field_size = env->insn_aux_data[i + delta].ctx_field_size;
-               converted_op_size = env->insn_aux_data[i + delta].converted_op_size;
-               is_narrower_load = type == BPF_READ && size < ctx_field_size;
+               size = BPF_LDST_BYTES(insn);
 
                /* If the read access is a narrower load of the field,
                 * convert to a 4/8-byte load, to minimum program type specific
                 * convert_ctx_access changes. If conversion is successful,
                 * we will apply proper mask to the result.
                 */
+               is_narrower_load = size < ctx_field_size;
                if (is_narrower_load) {
-                       int size_code = BPF_H;
+                       u32 off = insn->off;
+                       u8 size_code;
+
+                       if (type == BPF_WRITE) {
+                               verbose("bpf verifier narrow ctx access misconfigured\n");
+                               return -EINVAL;
+                       }
 
+                       size_code = BPF_H;
                        if (ctx_field_size == 4)
                                size_code = BPF_W;
                        else if (ctx_field_size == 8)
                                size_code = BPF_DW;
+
                        insn->off = off & ~(ctx_field_size - 1);
                        insn->code = BPF_LDX | BPF_MEM | size_code;
                }
-               cnt = ops->convert_ctx_access(type, insn, insn_buf, env->prog);
-               if (cnt == 0 || cnt >= ARRAY_SIZE(insn_buf)) {
+
+               target_size = 0;
+               cnt = ops->convert_ctx_access(type, insn, insn_buf, env->prog,
+                                             &target_size);
+               if (cnt == 0 || cnt >= ARRAY_SIZE(insn_buf) ||
+                   (ctx_field_size && !target_size)) {
                        verbose("bpf verifier is misconfigured\n");
                        return -EINVAL;
                }
-               if (is_narrower_load && size < converted_op_size) {
+
+               if (is_narrower_load && size < target_size) {
                        if (ctx_field_size <= 4)
                                insn_buf[cnt++] = BPF_ALU32_IMM(BPF_AND, insn->dst_reg,
-                                                       (1 << size * 8) - 1);
+                                                               (1 << size * 8) - 1);
                        else
                                insn_buf[cnt++] = BPF_ALU64_IMM(BPF_AND, insn->dst_reg,
-                                                       (1 << size * 8) - 1);
+                                                               (1 << size * 8) - 1);
                }
 
                new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
 
        }
 }
 
-static void __set_access_aux_info(int off, struct bpf_insn_access_aux *info)
+static bool bpf_skb_is_valid_access(int off, int size, enum bpf_access_type type,
+                                   struct bpf_insn_access_aux *info)
 {
-       info->ctx_field_size = 4;
-       switch (off) {
-       case offsetof(struct __sk_buff, pkt_type) ...
-            offsetof(struct __sk_buff, pkt_type) + sizeof(__u32) - 1:
-       case offsetof(struct __sk_buff, vlan_present) ...
-            offsetof(struct __sk_buff, vlan_present) + sizeof(__u32) - 1:
-               info->converted_op_size = 1;
-               break;
-       case offsetof(struct __sk_buff, queue_mapping) ...
-            offsetof(struct __sk_buff, queue_mapping) + sizeof(__u32) - 1:
-       case offsetof(struct __sk_buff, protocol) ...
-            offsetof(struct __sk_buff, protocol) + sizeof(__u32) - 1:
-       case offsetof(struct __sk_buff, vlan_tci) ...
-            offsetof(struct __sk_buff, vlan_tci) + sizeof(__u32) - 1:
-       case offsetof(struct __sk_buff, vlan_proto) ...
-            offsetof(struct __sk_buff, vlan_proto) + sizeof(__u32) - 1:
-       case offsetof(struct __sk_buff, tc_index) ...
-            offsetof(struct __sk_buff, tc_index) + sizeof(__u32) - 1:
-       case offsetof(struct __sk_buff, tc_classid) ...
-            offsetof(struct __sk_buff, tc_classid) + sizeof(__u32) - 1:
-               info->converted_op_size = 2;
-               break;
-       default:
-               info->converted_op_size = 4;
-       }
-}
+       const int size_default = sizeof(__u32);
 
-static bool __is_valid_access(int off, int size, enum bpf_access_type type,
-                             struct bpf_insn_access_aux *info)
-{
        if (off < 0 || off >= sizeof(struct __sk_buff))
                return false;
 
                return false;
 
        switch (off) {
-       case offsetof(struct __sk_buff, cb[0]) ...
-            offsetof(struct __sk_buff, cb[4]) + sizeof(__u32) - 1:
-               if (off + size >
-                   offsetof(struct __sk_buff, cb[4]) + sizeof(__u32))
+       case bpf_ctx_range_till(struct __sk_buff, cb[0], cb[4]):
+               if (off + size > offsetofend(struct __sk_buff, cb[4]))
                        return false;
                break;
-       case offsetof(struct __sk_buff, data) ...
-            offsetof(struct __sk_buff, data) + sizeof(__u32) - 1:
-               if (size != sizeof(__u32))
+       case bpf_ctx_range(struct __sk_buff, data):
+       case bpf_ctx_range(struct __sk_buff, data_end):
+               if (size != size_default)
                        return false;
-               info->reg_type = PTR_TO_PACKET;
-               break;
-       case offsetof(struct __sk_buff, data_end) ...
-            offsetof(struct __sk_buff, data_end) + sizeof(__u32) - 1:
-               if (size != sizeof(__u32))
-                       return false;
-               info->reg_type = PTR_TO_PACKET_END;
                break;
        default:
+               /* Only narrow read access allowed for now. */
                if (type == BPF_WRITE) {
-                       if (size != sizeof(__u32))
+                       if (size != size_default)
                                return false;
                } else {
-                       int allowed;
-
-                       /* permit narrower load for not cb/data/data_end fields */
-#ifdef __LITTLE_ENDIAN
-                       allowed = (off & 0x3) == 0 && size <= 4 && (size & (size - 1)) == 0;
-#else
-                       allowed = (off & 0x3) + size == 4 && size <= 4 && (size & (size - 1)) == 0;
-#endif
-                       if (!allowed)
+                       bpf_ctx_record_field_size(info, size_default);
+                       if (!bpf_ctx_narrow_access_ok(off, size, size_default))
                                return false;
-                       __set_access_aux_info(off, info);
                }
        }
 
                                      struct bpf_insn_access_aux *info)
 {
        switch (off) {
-       case offsetof(struct __sk_buff, tc_classid) ...
-            offsetof(struct __sk_buff, tc_classid) + sizeof(__u32) - 1:
-       case offsetof(struct __sk_buff, data) ...
-            offsetof(struct __sk_buff, data) + sizeof(__u32) - 1:
-       case offsetof(struct __sk_buff, data_end) ...
-            offsetof(struct __sk_buff, data_end) + sizeof(__u32) - 1:
+       case bpf_ctx_range(struct __sk_buff, tc_classid):
+       case bpf_ctx_range(struct __sk_buff, data):
+       case bpf_ctx_range(struct __sk_buff, data_end):
                return false;
        }
 
        if (type == BPF_WRITE) {
                switch (off) {
-               case offsetof(struct __sk_buff, cb[0]) ...
-                    offsetof(struct __sk_buff, cb[4]) + sizeof(__u32) - 1:
+               case bpf_ctx_range_till(struct __sk_buff, cb[0], cb[4]):
                        break;
                default:
                        return false;
                }
        }
 
-       return __is_valid_access(off, size, type, info);
+       return bpf_skb_is_valid_access(off, size, type, info);
 }
 
 static bool lwt_is_valid_access(int off, int size,
                                struct bpf_insn_access_aux *info)
 {
        switch (off) {
-       case offsetof(struct __sk_buff, tc_classid) ...
-            offsetof(struct __sk_buff, tc_classid) + sizeof(__u32) - 1:
+       case bpf_ctx_range(struct __sk_buff, tc_classid):
                return false;
        }
 
        if (type == BPF_WRITE) {
                switch (off) {
-               case offsetof(struct __sk_buff, mark):
-               case offsetof(struct __sk_buff, priority):
-               case offsetof(struct __sk_buff, cb[0]) ...
-                    offsetof(struct __sk_buff, cb[4]) + sizeof(__u32) - 1:
+               case bpf_ctx_range(struct __sk_buff, mark):
+               case bpf_ctx_range(struct __sk_buff, priority):
+               case bpf_ctx_range_till(struct __sk_buff, cb[0], cb[4]):
                        break;
                default:
                        return false;
                }
        }
 
-       return __is_valid_access(off, size, type, info);
+       switch (off) {
+       case bpf_ctx_range(struct __sk_buff, data):
+               info->reg_type = PTR_TO_PACKET;
+               break;
+       case bpf_ctx_range(struct __sk_buff, data_end):
+               info->reg_type = PTR_TO_PACKET_END;
+               break;
+       }
+
+       return bpf_skb_is_valid_access(off, size, type, info);
 }
 
 static bool sock_filter_is_valid_access(int off, int size,
 {
        if (type == BPF_WRITE) {
                switch (off) {
-               case offsetof(struct __sk_buff, mark):
-               case offsetof(struct __sk_buff, tc_index):
-               case offsetof(struct __sk_buff, priority):
-               case offsetof(struct __sk_buff, cb[0]) ...
-                    offsetof(struct __sk_buff, cb[4]) + sizeof(__u32) - 1:
-               case offsetof(struct __sk_buff, tc_classid):
+               case bpf_ctx_range(struct __sk_buff, mark):
+               case bpf_ctx_range(struct __sk_buff, tc_index):
+               case bpf_ctx_range(struct __sk_buff, priority):
+               case bpf_ctx_range(struct __sk_buff, tc_classid):
+               case bpf_ctx_range_till(struct __sk_buff, cb[0], cb[4]):
                        break;
                default:
                        return false;
                }
        }
 
-       return __is_valid_access(off, size, type, info);
+       switch (off) {
+       case bpf_ctx_range(struct __sk_buff, data):
+               info->reg_type = PTR_TO_PACKET;
+               break;
+       case bpf_ctx_range(struct __sk_buff, data_end):
+               info->reg_type = PTR_TO_PACKET_END;
+               break;
+       }
+
+       return bpf_skb_is_valid_access(off, size, type, info);
 }
 
 static bool __is_valid_xdp_access(int off, int size)
 static u32 bpf_convert_ctx_access(enum bpf_access_type type,
                                  const struct bpf_insn *si,
                                  struct bpf_insn *insn_buf,
-                                 struct bpf_prog *prog)
+                                 struct bpf_prog *prog, u32 *target_size)
 {
        struct bpf_insn *insn = insn_buf;
        int off;
 
        switch (si->off) {
        case offsetof(struct __sk_buff, len):
-               BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, len) != 4);
-
                *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
-                                     offsetof(struct sk_buff, len));
+                                     bpf_target_off(struct sk_buff, len, 4,
+                                                    target_size));
                break;
 
        case offsetof(struct __sk_buff, protocol):
-               BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, protocol) != 2);
-
                *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->src_reg,
-                                     offsetof(struct sk_buff, protocol));
+                                     bpf_target_off(struct sk_buff, protocol, 2,
+                                                    target_size));
                break;
 
        case offsetof(struct __sk_buff, vlan_proto):
-               BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, vlan_proto) != 2);
-
                *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->src_reg,
-                                     offsetof(struct sk_buff, vlan_proto));
+                                     bpf_target_off(struct sk_buff, vlan_proto, 2,
+                                                    target_size));
                break;
 
        case offsetof(struct __sk_buff, priority):
-               BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, priority) != 4);
-
                if (type == BPF_WRITE)
                        *insn++ = BPF_STX_MEM(BPF_W, si->dst_reg, si->src_reg,
-                                             offsetof(struct sk_buff, priority));
+                                             bpf_target_off(struct sk_buff, priority, 4,
+                                                            target_size));
                else
                        *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
-                                             offsetof(struct sk_buff, priority));
+                                             bpf_target_off(struct sk_buff, priority, 4,
+                                                            target_size));
                break;
 
        case offsetof(struct __sk_buff, ingress_ifindex):
-               BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, skb_iif) != 4);
-
                *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
-                                     offsetof(struct sk_buff, skb_iif));
+                                     bpf_target_off(struct sk_buff, skb_iif, 4,
+                                                    target_size));
                break;
 
        case offsetof(struct __sk_buff, ifindex):
-               BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, ifindex) != 4);
-
                *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, dev),
                                      si->dst_reg, si->src_reg,
                                      offsetof(struct sk_buff, dev));
                *insn++ = BPF_JMP_IMM(BPF_JEQ, si->dst_reg, 0, 1);
                *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
-                                     offsetof(struct net_device, ifindex));
+                                     bpf_target_off(struct net_device, ifindex, 4,
+                                                    target_size));
                break;
 
        case offsetof(struct __sk_buff, hash):
-               BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, hash) != 4);
-
                *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
-                                     offsetof(struct sk_buff, hash));
+                                     bpf_target_off(struct sk_buff, hash, 4,
+                                                    target_size));
                break;
 
        case offsetof(struct __sk_buff, mark):
-               BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, mark) != 4);
-
                if (type == BPF_WRITE)
                        *insn++ = BPF_STX_MEM(BPF_W, si->dst_reg, si->src_reg,
-                                             offsetof(struct sk_buff, mark));
+                                             bpf_target_off(struct sk_buff, mark, 4,
+                                                            target_size));
                else
                        *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
-                                             offsetof(struct sk_buff, mark));
+                                             bpf_target_off(struct sk_buff, mark, 4,
+                                                            target_size));
                break;
 
        case offsetof(struct __sk_buff, pkt_type):
-               return convert_skb_access(SKF_AD_PKTTYPE, si->dst_reg,
-                                         si->src_reg, insn);
+               *target_size = 1;
+               *insn++ = BPF_LDX_MEM(BPF_B, si->dst_reg, si->src_reg,
+                                     PKT_TYPE_OFFSET());
+               *insn++ = BPF_ALU32_IMM(BPF_AND, si->dst_reg, PKT_TYPE_MAX);
+#ifdef __BIG_ENDIAN_BITFIELD
+               *insn++ = BPF_ALU32_IMM(BPF_RSH, si->dst_reg, 5);
+#endif
+               break;
 
        case offsetof(struct __sk_buff, queue_mapping):
-               return convert_skb_access(SKF_AD_QUEUE, si->dst_reg,
-                                         si->src_reg, insn);
+               *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->src_reg,
+                                     bpf_target_off(struct sk_buff, queue_mapping, 2,
+                                                    target_size));
+               break;
 
        case offsetof(struct __sk_buff, vlan_present):
-               return convert_skb_access(SKF_AD_VLAN_TAG_PRESENT,
-                                         si->dst_reg, si->src_reg, insn);
-
        case offsetof(struct __sk_buff, vlan_tci):
-               return convert_skb_access(SKF_AD_VLAN_TAG,
-                                         si->dst_reg, si->src_reg, insn);
+               BUILD_BUG_ON(VLAN_TAG_PRESENT != 0x1000);
+
+               *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->src_reg,
+                                     bpf_target_off(struct sk_buff, vlan_tci, 2,
+                                                    target_size));
+               if (si->off == offsetof(struct __sk_buff, vlan_tci)) {
+                       *insn++ = BPF_ALU32_IMM(BPF_AND, si->dst_reg,
+                                               ~VLAN_TAG_PRESENT);
+               } else {
+                       *insn++ = BPF_ALU32_IMM(BPF_RSH, si->dst_reg, 12);
+                       *insn++ = BPF_ALU32_IMM(BPF_AND, si->dst_reg, 1);
+               }
+               break;
 
        case offsetof(struct __sk_buff, cb[0]) ...
-            offsetof(struct __sk_buff, cb[4]) + sizeof(__u32) - 1:
+            offsetofend(struct __sk_buff, cb[4]) - 1:
                BUILD_BUG_ON(FIELD_SIZEOF(struct qdisc_skb_cb, data) < 20);
                BUILD_BUG_ON((offsetof(struct sk_buff, cb) +
                              offsetof(struct qdisc_skb_cb, data)) %
                off -= offsetof(struct __sk_buff, tc_classid);
                off += offsetof(struct sk_buff, cb);
                off += offsetof(struct qdisc_skb_cb, tc_classid);
+               *target_size = 2;
                if (type == BPF_WRITE)
                        *insn++ = BPF_STX_MEM(BPF_H, si->dst_reg,
                                              si->src_reg, off);
 
        case offsetof(struct __sk_buff, tc_index):
 #ifdef CONFIG_NET_SCHED
-               BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, tc_index) != 2);
-
                if (type == BPF_WRITE)
                        *insn++ = BPF_STX_MEM(BPF_H, si->dst_reg, si->src_reg,
-                                             offsetof(struct sk_buff, tc_index));
+                                             bpf_target_off(struct sk_buff, tc_index, 2,
+                                                            target_size));
                else
                        *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->src_reg,
-                                             offsetof(struct sk_buff, tc_index));
+                                             bpf_target_off(struct sk_buff, tc_index, 2,
+                                                            target_size));
 #else
                if (type == BPF_WRITE)
                        *insn++ = BPF_MOV64_REG(si->dst_reg, si->dst_reg);
 
        case offsetof(struct __sk_buff, napi_id):
 #if defined(CONFIG_NET_RX_BUSY_POLL)
-               BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, napi_id) != 4);
-
                *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
-                                     offsetof(struct sk_buff, napi_id));
+                                     bpf_target_off(struct sk_buff, napi_id, 4,
+                                                    target_size));
                *insn++ = BPF_JMP_IMM(BPF_JGE, si->dst_reg, MIN_NAPI_ID, 1);
                *insn++ = BPF_MOV64_IMM(si->dst_reg, 0);
 #else
 static u32 sock_filter_convert_ctx_access(enum bpf_access_type type,
                                          const struct bpf_insn *si,
                                          struct bpf_insn *insn_buf,
-                                         struct bpf_prog *prog)
+                                         struct bpf_prog *prog, u32 *target_size)
 {
        struct bpf_insn *insn = insn_buf;
 
 static u32 tc_cls_act_convert_ctx_access(enum bpf_access_type type,
                                         const struct bpf_insn *si,
                                         struct bpf_insn *insn_buf,
-                                        struct bpf_prog *prog)
+                                        struct bpf_prog *prog, u32 *target_size)
 {
        struct bpf_insn *insn = insn_buf;
 
        switch (si->off) {
        case offsetof(struct __sk_buff, ifindex):
-               BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, ifindex) != 4);
-
                *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, dev),
                                      si->dst_reg, si->src_reg,
                                      offsetof(struct sk_buff, dev));
                *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
-                                     offsetof(struct net_device, ifindex));
+                                     bpf_target_off(struct net_device, ifindex, 4,
+                                                    target_size));
                break;
        default:
-               return bpf_convert_ctx_access(type, si, insn_buf, prog);
+               return bpf_convert_ctx_access(type, si, insn_buf, prog,
+                                             target_size);
        }
 
        return insn - insn_buf;
 static u32 xdp_convert_ctx_access(enum bpf_access_type type,
                                  const struct bpf_insn *si,
                                  struct bpf_insn *insn_buf,
-                                 struct bpf_prog *prog)
+                                 struct bpf_prog *prog, u32 *target_size)
 {
        struct bpf_insn *insn = insn_buf;
 
 static u32 sock_ops_convert_ctx_access(enum bpf_access_type type,
                                       const struct bpf_insn *si,
                                       struct bpf_insn *insn_buf,
-                                      struct bpf_prog *prog)
+                                      struct bpf_prog *prog,
+                                      u32 *target_size)
 {
        struct bpf_insn *insn = insn_buf;
        int off;