int ret;
 
        rx_info = &rx_ring->rx_buffer_info[rx_ring->ena_bufs[0].req_id];
-       xdp->data = page_address(rx_info->page) + rx_info->page_offset;
-       xdp_set_data_meta_invalid(xdp);
-       xdp->data_hard_start = page_address(rx_info->page);
-       xdp->data_end = xdp->data + rx_ring->ena_bufs[0].len;
+       xdp_prepare_buff(xdp, page_address(rx_info->page),
+                        rx_info->page_offset,
+                        rx_ring->ena_bufs[0].len, false);
        /* If for some reason we received a bigger packet than
         * we expect, then we simply drop it
         */
 
        txr = rxr->bnapi->tx_ring;
        /* BNXT_RX_PAGE_MODE(bp) when XDP enabled */
        xdp_init_buff(&xdp, PAGE_SIZE, &rxr->xdp_rxq);
-       xdp.data_hard_start = *data_ptr - offset;
-       xdp.data = *data_ptr;
-       xdp_set_data_meta_invalid(&xdp);
-       xdp.data_end = *data_ptr + *len;
+       xdp_prepare_buff(&xdp, *data_ptr - offset, offset, *len, false);
        orig_data = xdp.data;
 
        rcu_read_lock();
 
                                struct cqe_rx_t *cqe_rx, struct snd_queue *sq,
                                struct rcv_queue *rq, struct sk_buff **skb)
 {
+       unsigned char *hard_start, *data;
        struct xdp_buff xdp;
        struct page *page;
        u32 action;
 
        xdp_init_buff(&xdp, RCV_FRAG_LEN + XDP_PACKET_HEADROOM,
                      &rq->xdp_rxq);
-       xdp.data_hard_start = page_address(page);
-       xdp.data = (void *)cpu_addr;
-       xdp_set_data_meta_invalid(&xdp);
-       xdp.data_end = xdp.data + len;
+       hard_start = page_address(page);
+       data = (unsigned char *)cpu_addr;
+       xdp_prepare_buff(&xdp, hard_start, data - hard_start, len, false);
        orig_data = xdp.data;
 
        rcu_read_lock();
 
 
        xdp_init_buff(&xdp, DPAA_BP_RAW_SIZE - DPAA_TX_PRIV_DATA_SIZE,
                      &dpaa_fq->xdp_rxq);
-       xdp.data = vaddr + fd_off;
-       xdp.data_meta = xdp.data;
-       xdp.data_hard_start = xdp.data - XDP_PACKET_HEADROOM;
-       xdp.data_end = xdp.data + qm_fd_get_length(fd);
+       xdp_prepare_buff(&xdp, vaddr + fd_off - XDP_PACKET_HEADROOM,
+                        XDP_PACKET_HEADROOM, qm_fd_get_length(fd), true);
 
        /* We reserve a fixed headroom of 256 bytes under the erratum and we
         * offer it all to XDP programs to use. If no room is left for the
 
        struct bpf_prog *xdp_prog;
        struct xdp_buff xdp;
        u32 xdp_act = XDP_PASS;
-       int err;
+       int err, offset;
 
        rcu_read_lock();
 
        if (!xdp_prog)
                goto out;
 
-       xdp_init_buff(&xdp,
-                     DPAA2_ETH_RX_BUF_RAW_SIZE -
-                     (dpaa2_fd_get_offset(fd) - XDP_PACKET_HEADROOM),
-                     &ch->xdp_rxq);
-       xdp.data = vaddr + dpaa2_fd_get_offset(fd);
-       xdp.data_end = xdp.data + dpaa2_fd_get_len(fd);
-       xdp.data_hard_start = xdp.data - XDP_PACKET_HEADROOM;
-       xdp_set_data_meta_invalid(&xdp);
+       offset = dpaa2_fd_get_offset(fd) - XDP_PACKET_HEADROOM;
+       xdp_init_buff(&xdp, DPAA2_ETH_RX_BUF_RAW_SIZE - offset, &ch->xdp_rxq);
+       xdp_prepare_buff(&xdp, vaddr + offset, XDP_PACKET_HEADROOM,
+                        dpaa2_fd_get_len(fd), false);
 
        xdp_act = bpf_prog_run_xdp(xdp_prog, &xdp);
 
 
 
                /* retrieve a buffer from the ring */
                if (!skb) {
-                       xdp.data = page_address(rx_buffer->page) +
-                                  rx_buffer->page_offset;
-                       xdp.data_meta = xdp.data;
-                       xdp.data_hard_start = xdp.data -
-                                             i40e_rx_offset(rx_ring);
-                       xdp.data_end = xdp.data + size;
+                       unsigned int offset = i40e_rx_offset(rx_ring);
+                       unsigned char *hard_start;
+
+                       hard_start = page_address(rx_buffer->page) +
+                                    rx_buffer->page_offset - offset;
+                       xdp_prepare_buff(&xdp, hard_start, offset, size, true);
 #if (PAGE_SIZE > 4096)
                        /* At larger PAGE_SIZE, frame_sz depend on len size */
                        xdp.frame_sz = i40e_rx_frame_truesize(rx_ring, size);
 
 
        /* start the loop to process Rx packets bounded by 'budget' */
        while (likely(total_rx_pkts < (unsigned int)budget)) {
+               unsigned int offset = ice_rx_offset(rx_ring);
                union ice_32b_rx_flex_desc *rx_desc;
                struct ice_rx_buf *rx_buf;
+               unsigned char *hard_start;
                struct sk_buff *skb;
                unsigned int size;
                u16 stat_err_bits;
                        goto construct_skb;
                }
 
-               xdp.data = page_address(rx_buf->page) + rx_buf->page_offset;
-               xdp.data_hard_start = xdp.data - ice_rx_offset(rx_ring);
-               xdp.data_meta = xdp.data;
-               xdp.data_end = xdp.data + size;
+               hard_start = page_address(rx_buf->page) + rx_buf->page_offset -
+                            offset;
+               xdp_prepare_buff(&xdp, hard_start, offset, size, true);
 #if (PAGE_SIZE > 4096)
                /* At larger PAGE_SIZE, frame_sz depend on len size */
                xdp.frame_sz = ice_rx_frame_truesize(rx_ring, size);
 
 
                /* retrieve a buffer from the ring */
                if (!skb) {
-                       xdp.data = page_address(rx_buffer->page) +
-                                  rx_buffer->page_offset;
-                       xdp.data_meta = xdp.data;
-                       xdp.data_hard_start = xdp.data -
-                                             igb_rx_offset(rx_ring);
-                       xdp.data_end = xdp.data + size;
+                       unsigned int offset = igb_rx_offset(rx_ring);
+                       unsigned char *hard_start;
+
+                       hard_start = page_address(rx_buffer->page) +
+                                    rx_buffer->page_offset - offset;
+                       xdp_prepare_buff(&xdp, hard_start, offset, size, true);
 #if (PAGE_SIZE > 4096)
                        /* At larger PAGE_SIZE, frame_sz depend on len size */
                        xdp.frame_sz = igb_rx_frame_truesize(rx_ring, size);
 
 
                /* retrieve a buffer from the ring */
                if (!skb) {
-                       xdp.data = page_address(rx_buffer->page) +
-                                  rx_buffer->page_offset;
-                       xdp.data_meta = xdp.data;
-                       xdp.data_hard_start = xdp.data -
-                                             ixgbe_rx_offset(rx_ring);
-                       xdp.data_end = xdp.data + size;
+                       unsigned int offset = ixgbe_rx_offset(rx_ring);
+                       unsigned char *hard_start;
+
+                       hard_start = page_address(rx_buffer->page) +
+                                    rx_buffer->page_offset - offset;
+                       xdp_prepare_buff(&xdp, hard_start, offset, size, true);
 #if (PAGE_SIZE > 4096)
                        /* At larger PAGE_SIZE, frame_sz depend on len size */
                        xdp.frame_sz = ixgbe_rx_frame_truesize(rx_ring, size);
 
 
                /* retrieve a buffer from the ring */
                if (!skb) {
-                       xdp.data = page_address(rx_buffer->page) +
-                                  rx_buffer->page_offset;
-                       xdp.data_meta = xdp.data;
-                       xdp.data_hard_start = xdp.data -
-                                             ixgbevf_rx_offset(rx_ring);
-                       xdp.data_end = xdp.data + size;
+                       unsigned int offset = ixgbevf_rx_offset(rx_ring);
+                       unsigned char *hard_start;
+
+                       hard_start = page_address(rx_buffer->page) +
+                                    rx_buffer->page_offset - offset;
+                       xdp_prepare_buff(&xdp, hard_start, offset, size, true);
 #if (PAGE_SIZE > 4096)
                        /* At larger PAGE_SIZE, frame_sz depend on len size */
                        xdp.frame_sz = ixgbevf_rx_frame_truesize(rx_ring, size);
 
 
        /* Prefetch header */
        prefetch(data);
-
-       xdp->data_hard_start = data;
-       xdp->data = data + pp->rx_offset_correction + MVNETA_MH_SIZE;
-       xdp->data_end = xdp->data + data_len;
-       xdp_set_data_meta_invalid(xdp);
+       xdp_prepare_buff(xdp, data, pp->rx_offset_correction + MVNETA_MH_SIZE,
+                        data_len, false);
 
        sinfo = xdp_get_shared_info_from_buff(xdp);
        sinfo->nr_frags = 0;
 
                if (xdp_prog) {
                        struct xdp_rxq_info *xdp_rxq;
 
-                       xdp.data_hard_start = data;
-                       xdp.data = data + MVPP2_MH_SIZE + MVPP2_SKB_HEADROOM;
-                       xdp.data_end = xdp.data + rx_bytes;
-
                        if (bm_pool->pkt_size == MVPP2_BM_SHORT_PKT_SIZE)
                                xdp_rxq = &rxq->xdp_rxq_short;
                        else
                                xdp_rxq = &rxq->xdp_rxq_long;
 
                        xdp_init_buff(&xdp, PAGE_SIZE, xdp_rxq);
-                       xdp_set_data_meta_invalid(&xdp);
+                       xdp_prepare_buff(&xdp, data,
+                                        MVPP2_MH_SIZE + MVPP2_SKB_HEADROOM,
+                                        rx_bytes, false);
 
                        ret = mvpp2_run_xdp(port, rxq, xdp_prog, &xdp, pp, &ps);
 
 
                                                priv->frag_info[0].frag_size,
                                                DMA_FROM_DEVICE);
 
-                       xdp.data_hard_start = va - frags[0].page_offset;
-                       xdp.data = va;
-                       xdp_set_data_meta_invalid(&xdp);
-                       xdp.data_end = xdp.data + length;
+                       xdp_prepare_buff(&xdp, va - frags[0].page_offset,
+                                        frags[0].page_offset, length, false);
                        orig_data = xdp.data;
 
                        act = bpf_prog_run_xdp(xdp_prog, &xdp);
 
                                u32 len, struct xdp_buff *xdp)
 {
        xdp_init_buff(xdp, rq->buff.frame0_sz, &rq->xdp_rxq);
-       xdp->data_hard_start = va;
-       xdp->data = va + headroom;
-       xdp_set_data_meta_invalid(xdp);
-       xdp->data_end = xdp->data + len;
+       xdp_prepare_buff(xdp, va, headroom, len, false);
 }
 
 static struct sk_buff *
 
                        unsigned int dma_off;
                        int act;
 
-                       xdp.data_hard_start = rxbuf->frag + NFP_NET_RX_BUF_HEADROOM;
-                       xdp.data = orig_data;
-                       xdp.data_meta = orig_data;
-                       xdp.data_end = orig_data + pkt_len;
+                       xdp_prepare_buff(&xdp,
+                                        rxbuf->frag + NFP_NET_RX_BUF_HEADROOM,
+                                        pkt_off - NFP_NET_RX_BUF_HEADROOM,
+                                        pkt_len, true);
 
                        act = bpf_prog_run_xdp(xdp_prog, &xdp);
 
 
        enum xdp_action act;
 
        xdp_init_buff(&xdp, rxq->rx_buf_seg_size, &rxq->xdp_rxq);
-       xdp.data_hard_start = page_address(bd->data);
-       xdp.data = xdp.data_hard_start + *data_offset;
-       xdp_set_data_meta_invalid(&xdp);
-       xdp.data_end = xdp.data + *len;
+       xdp_prepare_buff(&xdp, page_address(bd->data), *data_offset,
+                        *len, false);
 
        /* Queues always have a full reset currently, so for the time
         * being until there's atomic program replace just mark read
 
               efx->rx_prefix_size);
 
        xdp_init_buff(&xdp, efx->rx_page_buf_step, &rx_queue->xdp_rxq_info);
-       xdp.data = *ehp;
-       xdp.data_hard_start = xdp.data - EFX_XDP_HEADROOM;
-
        /* No support yet for XDP metadata */
-       xdp_set_data_meta_invalid(&xdp);
-       xdp.data_end = xdp.data + rx_buf->len;
+       xdp_prepare_buff(&xdp, *ehp - EFX_XDP_HEADROOM, EFX_XDP_HEADROOM,
+                        rx_buf->len, false);
 
        xdp_act = bpf_prog_run_xdp(xdp_prog, &xdp);
        rcu_read_unlock();
 
                                        dma_dir);
                prefetch(desc->addr);
 
-               xdp.data_hard_start = desc->addr;
-               xdp.data = desc->addr + NETSEC_RXBUF_HEADROOM;
-               xdp_set_data_meta_invalid(&xdp);
-               xdp.data_end = xdp.data + pkt_len;
+               xdp_prepare_buff(&xdp, desc->addr, NETSEC_RXBUF_HEADROOM,
+                                pkt_len, false);
 
                if (xdp_prog) {
                        xdp_result = netsec_run_xdp(priv, xdp_prog, &xdp);
 
        }
 
        if (priv->xdp_prog) {
-               xdp_init_buff(&xdp, PAGE_SIZE, &priv->xdp_rxq[ch]);
+               int headroom = CPSW_HEADROOM, size = len;
 
+               xdp_init_buff(&xdp, PAGE_SIZE, &priv->xdp_rxq[ch]);
                if (status & CPDMA_RX_VLAN_ENCAP) {
-                       xdp.data = pa + CPSW_HEADROOM +
-                                  CPSW_RX_VLAN_ENCAP_HDR_SIZE;
-                       xdp.data_end = xdp.data + len -
-                                      CPSW_RX_VLAN_ENCAP_HDR_SIZE;
-               } else {
-                       xdp.data = pa + CPSW_HEADROOM;
-                       xdp.data_end = xdp.data + len;
+                       headroom += CPSW_RX_VLAN_ENCAP_HDR_SIZE;
+                       size -= CPSW_RX_VLAN_ENCAP_HDR_SIZE;
                }
 
-               xdp_set_data_meta_invalid(&xdp);
-
-               xdp.data_hard_start = pa;
+               xdp_prepare_buff(&xdp, pa, headroom, size, false);
 
                port = priv->emac_port + cpsw->data.dual_emac;
                ret = cpsw_run_xdp(priv, ch, &xdp, page, port);
 
        }
 
        if (priv->xdp_prog) {
-               xdp_init_buff(&xdp, PAGE_SIZE, &priv->xdp_rxq[ch]);
+               int headroom = CPSW_HEADROOM, size = len;
 
+               xdp_init_buff(&xdp, PAGE_SIZE, &priv->xdp_rxq[ch]);
                if (status & CPDMA_RX_VLAN_ENCAP) {
-                       xdp.data = pa + CPSW_HEADROOM +
-                                  CPSW_RX_VLAN_ENCAP_HDR_SIZE;
-                       xdp.data_end = xdp.data + len -
-                                      CPSW_RX_VLAN_ENCAP_HDR_SIZE;
-               } else {
-                       xdp.data = pa + CPSW_HEADROOM;
-                       xdp.data_end = xdp.data + len;
+                       headroom += CPSW_RX_VLAN_ENCAP_HDR_SIZE;
+                       size -= CPSW_RX_VLAN_ENCAP_HDR_SIZE;
                }
 
-               xdp_set_data_meta_invalid(&xdp);
-
-               xdp.data_hard_start = pa;
+               xdp_prepare_buff(&xdp, pa, headroom, size, false);
 
                ret = cpsw_run_xdp(priv, ch, &xdp, page, priv->emac_port);
                if (ret != CPSW_XDP_PASS)
 
        }
 
        xdp_init_buff(xdp, PAGE_SIZE, &nvchan->xdp_rxq);
-       xdp->data_hard_start = page_address(page);
-       xdp->data = xdp->data_hard_start + NETVSC_XDP_HDRM;
-       xdp_set_data_meta_invalid(xdp);
-       xdp->data_end = xdp->data + len;
+       xdp_prepare_buff(xdp, page_address(page), NETVSC_XDP_HDRM, len, false);
 
        memcpy(xdp->data, data, len);
 
 
                u32 act;
 
                xdp_init_buff(&xdp, buflen, &tfile->xdp_rxq);
-               xdp.data_hard_start = buf;
-               xdp.data = buf + pad;
-               xdp_set_data_meta_invalid(&xdp);
-               xdp.data_end = xdp.data + len;
+               xdp_prepare_buff(&xdp, buf, pad, len, false);
 
                act = bpf_prog_run_xdp(xdp_prog, &xdp);
                if (act == XDP_REDIRECT || act == XDP_TX) {
 
                skb = nskb;
        }
 
-       xdp.data_hard_start = skb->head;
-       xdp.data = skb_mac_header(skb);
-       xdp.data_end = xdp.data + pktlen;
-       xdp.data_meta = xdp.data;
-
        /* SKB "head" area always have tailroom for skb_shared_info */
-       frame_sz = (void *)skb_end_pointer(skb) - xdp.data_hard_start;
+       frame_sz = skb_end_pointer(skb) - skb->head;
        frame_sz += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
        xdp_init_buff(&xdp, frame_sz, &rq->xdp_rxq);
+       xdp_prepare_buff(&xdp, skb->head, skb->mac_header, pktlen, true);
 
        orig_data = xdp.data;
        orig_data_end = xdp.data_end;
 
                }
 
                xdp_init_buff(&xdp, buflen, &rq->xdp_rxq);
-               xdp.data_hard_start = buf + VIRTNET_RX_PAD + vi->hdr_len;
-               xdp.data = xdp.data_hard_start + xdp_headroom;
-               xdp.data_end = xdp.data + len;
-               xdp.data_meta = xdp.data;
+               xdp_prepare_buff(&xdp, buf + VIRTNET_RX_PAD + vi->hdr_len,
+                                xdp_headroom, len, true);
                orig_data = xdp.data;
                act = bpf_prog_run_xdp(xdp_prog, &xdp);
                stats->xdp_packets++;
                 */
                data = page_address(xdp_page) + offset;
                xdp_init_buff(&xdp, frame_sz - vi->hdr_len, &rq->xdp_rxq);
-               xdp.data_hard_start = data - VIRTIO_XDP_HEADROOM + vi->hdr_len;
-               xdp.data = data + vi->hdr_len;
-               xdp.data_end = xdp.data + (len - vi->hdr_len);
-               xdp.data_meta = xdp.data;
+               xdp_prepare_buff(&xdp, data - VIRTIO_XDP_HEADROOM + vi->hdr_len,
+                                VIRTIO_XDP_HEADROOM, len - vi->hdr_len, true);
 
                act = bpf_prog_run_xdp(xdp_prog, &xdp);
                stats->xdp_packets++;
 
 
        xdp_init_buff(xdp, XEN_PAGE_SIZE - XDP_PACKET_HEADROOM,
                      &queue->xdp_rxq);
-       xdp->data_hard_start = page_address(pdata);
-       xdp->data = xdp->data_hard_start + XDP_PACKET_HEADROOM;
-       xdp_set_data_meta_invalid(xdp);
-       xdp->data_end = xdp->data + len;
+       xdp_prepare_buff(xdp, page_address(pdata), XDP_PACKET_HEADROOM,
+                        len, false);
 
        act = bpf_prog_run_xdp(prog, xdp);
        switch (act) {
 
        xdp->rxq = rxq;
 }
 
+static __always_inline void
+xdp_prepare_buff(struct xdp_buff *xdp, unsigned char *hard_start,
+                int headroom, int data_len, const bool meta_valid)
+{
+       unsigned char *data = hard_start + headroom;
+
+       xdp->data_hard_start = hard_start;
+       xdp->data = data;
+       xdp->data_end = data + data_len;
+       xdp->data_meta = meta_valid ? data : data + 1;
+}
+
 /* Reserve memory area at end-of data area.
  *
  * This macro reserves tailroom in the XDP buffer by limiting the
 
        if (IS_ERR(data))
                return PTR_ERR(data);
 
-       xdp.data_hard_start = data;
-       xdp.data = data + headroom;
-       xdp.data_meta = xdp.data;
-       xdp.data_end = xdp.data + size;
-
        rxqueue = __netif_get_rx_queue(current->nsproxy->net_ns->loopback_dev, 0);
        xdp_init_buff(&xdp, headroom + max_data_sz + tailroom,
                      &rxqueue->xdp_rxq);
+       xdp_prepare_buff(&xdp, data, headroom, size, true);
+
        bpf_prog_change_xdp(NULL, prog);
        ret = bpf_test_run(prog, &xdp, repeat, &retval, &duration, true);
        if (ret)
 
                                     struct xdp_buff *xdp,
                                     struct bpf_prog *xdp_prog)
 {
+       void *orig_data, *orig_data_end, *hard_start;
        struct netdev_rx_queue *rxqueue;
-       void *orig_data, *orig_data_end;
        u32 metalen, act = XDP_DROP;
        u32 mac_len, frame_sz;
        __be16 orig_eth_type;
        struct ethhdr *eth;
        bool orig_bcast;
-       int hlen, off;
+       int off;
 
        /* Reinjected packets coming from act_mirred or similar should
         * not get XDP generic processing.
         * header.
         */
        mac_len = skb->data - skb_mac_header(skb);
-       hlen = skb_headlen(skb) + mac_len;
-       xdp->data = skb->data - mac_len;
-       xdp->data_meta = xdp->data;
-       xdp->data_end = xdp->data + hlen;
-       xdp->data_hard_start = skb->data - skb_headroom(skb);
+       hard_start = skb->data - skb_headroom(skb);
 
        /* SKB "head" area always have tailroom for skb_shared_info */
-       frame_sz = (void *)skb_end_pointer(skb) - xdp->data_hard_start;
+       frame_sz = (void *)skb_end_pointer(skb) - hard_start;
        frame_sz += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
 
+       rxqueue = netif_get_rxqueue(skb);
+       xdp_init_buff(xdp, frame_sz, &rxqueue->xdp_rxq);
+       xdp_prepare_buff(xdp, hard_start, skb_headroom(skb) - mac_len,
+                        skb_headlen(skb) + mac_len, true);
+
        orig_data_end = xdp->data_end;
        orig_data = xdp->data;
        eth = (struct ethhdr *)xdp->data;
        orig_bcast = is_multicast_ether_addr_64bits(eth->h_dest);
        orig_eth_type = eth->h_proto;
 
-       rxqueue = netif_get_rxqueue(skb);
-       xdp_init_buff(xdp, frame_sz, &rxqueue->xdp_rxq);
-
        act = bpf_prog_run_xdp(xdp_prog, xdp);
 
        /* check if bpf_xdp_adjust_head was used */