ixgbe: add xdp frags support to ndo_xdp_xmit
authorLorenzo Bianconi <lorenzo@kernel.org>
Thu, 12 May 2022 21:26:21 +0000 (14:26 -0700)
committerJakub Kicinski <kuba@kernel.org>
Sat, 14 May 2022 00:05:41 +0000 (17:05 -0700)
Add the capability to map non-linear xdp frames in XDP_TX and ndo_xdp_xmit
callback.

Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
Tested-by: Sandeep Penigalapati <sandeep.penigalapati@intel.com>
Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
Link: https://lore.kernel.org/r/20220512212621.3746140-1-anthony.l.nguyen@intel.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c

index 19cde928d9b7065e06aafd65b9853033960a66cb..77c2e70b0860d538a17942c6fe51ce8902c21a47 100644 (file)
@@ -2344,6 +2344,7 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
                        hard_start = page_address(rx_buffer->page) +
                                     rx_buffer->page_offset - offset;
                        xdp_prepare_buff(&xdp, hard_start, offset, size, true);
+                       xdp_buff_clear_frags_flag(&xdp);
 #if (PAGE_SIZE > 4096)
                        /* At larger PAGE_SIZE, frame_sz depend on len size */
                        xdp.frame_sz = ixgbe_rx_frame_truesize(rx_ring, size);
@@ -8571,57 +8572,83 @@ static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb,
 int ixgbe_xmit_xdp_ring(struct ixgbe_ring *ring,
                        struct xdp_frame *xdpf)
 {
-       struct ixgbe_tx_buffer *tx_buffer;
-       union ixgbe_adv_tx_desc *tx_desc;
-       u32 len, cmd_type;
-       dma_addr_t dma;
-       u16 i;
-
-       len = xdpf->len;
+       struct skb_shared_info *sinfo = xdp_get_shared_info_from_frame(xdpf);
+       u8 nr_frags = unlikely(xdp_frame_has_frags(xdpf)) ? sinfo->nr_frags : 0;
+       u16 i = 0, index = ring->next_to_use;
+       struct ixgbe_tx_buffer *tx_head = &ring->tx_buffer_info[index];
+       struct ixgbe_tx_buffer *tx_buff = tx_head;
+       union ixgbe_adv_tx_desc *tx_desc = IXGBE_TX_DESC(ring, index);
+       u32 cmd_type, len = xdpf->len;
+       void *data = xdpf->data;
 
-       if (unlikely(!ixgbe_desc_unused(ring)))
+       if (unlikely(ixgbe_desc_unused(ring) < 1 + nr_frags))
                return IXGBE_XDP_CONSUMED;
 
-       dma = dma_map_single(ring->dev, xdpf->data, len, DMA_TO_DEVICE);
-       if (dma_mapping_error(ring->dev, dma))
-               return IXGBE_XDP_CONSUMED;
+       tx_head->bytecount = xdp_get_frame_len(xdpf);
+       tx_head->gso_segs = 1;
+       tx_head->xdpf = xdpf;
 
-       /* record the location of the first descriptor for this packet */
-       tx_buffer = &ring->tx_buffer_info[ring->next_to_use];
-       tx_buffer->bytecount = len;
-       tx_buffer->gso_segs = 1;
-       tx_buffer->protocol = 0;
+       tx_desc->read.olinfo_status =
+               cpu_to_le32(tx_head->bytecount << IXGBE_ADVTXD_PAYLEN_SHIFT);
 
-       i = ring->next_to_use;
-       tx_desc = IXGBE_TX_DESC(ring, i);
+       for (;;) {
+               dma_addr_t dma;
 
-       dma_unmap_len_set(tx_buffer, len, len);
-       dma_unmap_addr_set(tx_buffer, dma, dma);
-       tx_buffer->xdpf = xdpf;
+               dma = dma_map_single(ring->dev, data, len, DMA_TO_DEVICE);
+               if (dma_mapping_error(ring->dev, dma))
+                       goto unmap;
 
-       tx_desc->read.buffer_addr = cpu_to_le64(dma);
+               dma_unmap_len_set(tx_buff, len, len);
+               dma_unmap_addr_set(tx_buff, dma, dma);
+
+               cmd_type = IXGBE_ADVTXD_DTYP_DATA | IXGBE_ADVTXD_DCMD_DEXT |
+                          IXGBE_ADVTXD_DCMD_IFCS | len;
+               tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type);
+               tx_desc->read.buffer_addr = cpu_to_le64(dma);
+               tx_buff->protocol = 0;
+
+               if (++index == ring->count)
+                       index = 0;
+
+               if (i == nr_frags)
+                       break;
+
+               tx_buff = &ring->tx_buffer_info[index];
+               tx_desc = IXGBE_TX_DESC(ring, index);
+               tx_desc->read.olinfo_status = 0;
 
+               data = skb_frag_address(&sinfo->frags[i]);
+               len = skb_frag_size(&sinfo->frags[i]);
+               i++;
+       }
        /* put descriptor type bits */
-       cmd_type = IXGBE_ADVTXD_DTYP_DATA |
-                  IXGBE_ADVTXD_DCMD_DEXT |
-                  IXGBE_ADVTXD_DCMD_IFCS;
-       cmd_type |= len | IXGBE_TXD_CMD;
-       tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type);
-       tx_desc->read.olinfo_status =
-               cpu_to_le32(len << IXGBE_ADVTXD_PAYLEN_SHIFT);
+       tx_desc->read.cmd_type_len |= cpu_to_le32(IXGBE_TXD_CMD);
 
        /* Avoid any potential race with xdp_xmit and cleanup */
        smp_wmb();
 
-       /* set next_to_watch value indicating a packet is present */
-       i++;
-       if (i == ring->count)
-               i = 0;
-
-       tx_buffer->next_to_watch = tx_desc;
-       ring->next_to_use = i;
+       tx_head->next_to_watch = tx_desc;
+       ring->next_to_use = index;
 
        return IXGBE_XDP_TX;
+
+unmap:
+       for (;;) {
+               tx_buff = &ring->tx_buffer_info[index];
+               if (dma_unmap_len(tx_buff, len))
+                       dma_unmap_page(ring->dev, dma_unmap_addr(tx_buff, dma),
+                                      dma_unmap_len(tx_buff, len),
+                                      DMA_TO_DEVICE);
+               dma_unmap_len_set(tx_buff, len, 0);
+               if (tx_buff == tx_head)
+                       break;
+
+               if (!index)
+                       index += ring->count;
+               index--;
+       }
+
+       return IXGBE_XDP_CONSUMED;
 }
 
 netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,