net: pass net_device argument to the eth_get_headlen
authorStanislav Fomichev <sdf@google.com>
Mon, 22 Apr 2019 15:55:48 +0000 (08:55 -0700)
committerDaniel Borkmann <daniel@iogearbox.net>
Tue, 23 Apr 2019 16:36:34 +0000 (18:36 +0200)
Update all users of eth_get_headlen to pass network device, fetch
network namespace from it and pass it down to the flow dissector.
This commit is a noop until administrator inserts BPF flow dissector
program.

Cc: Maxim Krasnyansky <maxk@qti.qualcomm.com>
Cc: Saeed Mahameed <saeedm@mellanox.com>
Cc: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
Cc: intel-wired-lan@lists.osuosl.org
Cc: Yisen Zhuang <yisen.zhuang@huawei.com>
Cc: Salil Mehta <salil.mehta@huawei.com>
Cc: Michael Chan <michael.chan@broadcom.com>
Cc: Igor Russkikh <igor.russkikh@aquantia.com>
Signed-off-by: Stanislav Fomichev <sdf@google.com>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
16 files changed:
drivers/net/ethernet/aquantia/atlantic/aq_ring.c
drivers/net/ethernet/broadcom/bnxt/bnxt.c
drivers/net/ethernet/hisilicon/hns/hns_enet.c
drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
drivers/net/ethernet/intel/fm10k/fm10k_main.c
drivers/net/ethernet/intel/i40e/i40e_txrx.c
drivers/net/ethernet/intel/iavf/iavf_txrx.c
drivers/net/ethernet/intel/ice/ice_txrx.c
drivers/net/ethernet/intel/igb/igb_main.c
drivers/net/ethernet/intel/igc/igc_main.c
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
drivers/net/tun.c
include/linux/etherdevice.h
net/ethernet/eth.c

index c64e2fb5a4f10d4b6a5468782a08ebcbf9f140e4..350e385528fddd8697c3c1349336c57360e16418 100644 (file)
@@ -354,7 +354,8 @@ int aq_ring_rx_clean(struct aq_ring_s *self,
 
                        hdr_len = buff->len;
                        if (hdr_len > AQ_CFG_RX_HDR_SIZE)
-                               hdr_len = eth_get_headlen(aq_buf_vaddr(&buff->rxdata),
+                               hdr_len = eth_get_headlen(skb->dev,
+                                                         aq_buf_vaddr(&buff->rxdata),
                                                          AQ_CFG_RX_HDR_SIZE);
 
                        memcpy(__skb_put(skb, hdr_len), aq_buf_vaddr(&buff->rxdata),
index 6528a597367bd4ba7862b13964dd5960b685a165..526f36dcb2042ae067a713bc0a616657ecb69ef7 100644 (file)
@@ -899,7 +899,7 @@ static struct sk_buff *bnxt_rx_page_skb(struct bnxt *bp,
                             DMA_ATTR_WEAK_ORDERING);
 
        if (unlikely(!payload))
-               payload = eth_get_headlen(data_ptr, len);
+               payload = eth_get_headlen(bp->dev, data_ptr, len);
 
        skb = napi_alloc_skb(&rxr->bnapi->napi, payload);
        if (!skb) {
index 297b95c1b3c1f5780d4c71915d380c46f83c8489..65b985acae38595d66a90bcb9eaea6423b72f909 100644 (file)
@@ -598,7 +598,7 @@ static int hns_nic_poll_rx_skb(struct hns_nic_ring_data *ring_data,
        } else {
                ring->stats.seg_pkt_cnt++;
 
-               pull_len = eth_get_headlen(va, HNS_RX_HEAD_SIZE);
+               pull_len = eth_get_headlen(ndev, va, HNS_RX_HEAD_SIZE);
                memcpy(__skb_put(skb, pull_len), va,
                       ALIGN(pull_len, sizeof(long)));
 
index 176d4b965709f7ed773ee48a7bdfc801eca5efe6..5f7b51c6ee918b0fd76ee03e5ed7dfbfbb588caa 100644 (file)
@@ -2580,7 +2580,7 @@ static int hns3_alloc_skb(struct hns3_enet_ring *ring, int length,
        ring->stats.seg_pkt_cnt++;
        u64_stats_update_end(&ring->syncp);
 
-       ring->pull_len = eth_get_headlen(va, HNS3_RX_HEAD_SIZE);
+       ring->pull_len = eth_get_headlen(netdev, va, HNS3_RX_HEAD_SIZE);
        __skb_put(skb, ring->pull_len);
        hns3_nic_reuse_page(skb, ring->frag_num++, ring, ring->pull_len,
                            desc_cb);
index 2325cee76211364f6d378f41172a8b341b7f5d9a..b4d970e44163fe0a96f89aedf2d594a23d76f9d0 100644 (file)
@@ -280,7 +280,7 @@ static bool fm10k_add_rx_frag(struct fm10k_rx_buffer *rx_buffer,
        /* we need the header to contain the greater of either ETH_HLEN or
         * 60 bytes if the skb->len is less than 60 for skb_pad.
         */
-       pull_len = eth_get_headlen(va, FM10K_RX_HDR_LEN);
+       pull_len = eth_get_headlen(skb->dev, va, FM10K_RX_HDR_LEN);
 
        /* align pull length to size of long to optimize memcpy performance */
        memcpy(__skb_put(skb, pull_len), va, ALIGN(pull_len, sizeof(long)));
index 1a95223c9f99af3738641b85c46f2b09ac88f384..e1931701cd7e926c5beccd76902199570be501ff 100644 (file)
@@ -2035,7 +2035,8 @@ static struct sk_buff *i40e_construct_skb(struct i40e_ring *rx_ring,
        /* Determine available headroom for copy */
        headlen = size;
        if (headlen > I40E_RX_HDR_SIZE)
-               headlen = eth_get_headlen(xdp->data, I40E_RX_HDR_SIZE);
+               headlen = eth_get_headlen(skb->dev, xdp->data,
+                                         I40E_RX_HDR_SIZE);
 
        /* align pull length to size of long to optimize memcpy performance */
        memcpy(__skb_put(skb, headlen), xdp->data,
index b64187753ad67ce6a6fdf3efcd020d16fdea3872..cf8be63a8a4f588f8866baee213af8446a087b0d 100644 (file)
@@ -1315,7 +1315,7 @@ static struct sk_buff *iavf_construct_skb(struct iavf_ring *rx_ring,
        /* Determine available headroom for copy */
        headlen = size;
        if (headlen > IAVF_RX_HDR_SIZE)
-               headlen = eth_get_headlen(va, IAVF_RX_HDR_SIZE);
+               headlen = eth_get_headlen(skb->dev, va, IAVF_RX_HDR_SIZE);
 
        /* align pull length to size of long to optimize memcpy performance */
        memcpy(__skb_put(skb, headlen), va, ALIGN(headlen, sizeof(long)));
index 79043fec01872202a6e997353f4b0191fe3147bd..259f118c7d8b97c9a2a6dc100eabfe4bb1db0c6a 100644 (file)
@@ -699,7 +699,7 @@ ice_construct_skb(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf,
        /* Determine available headroom for copy */
        headlen = size;
        if (headlen > ICE_RX_HDR_SIZE)
-               headlen = eth_get_headlen(va, ICE_RX_HDR_SIZE);
+               headlen = eth_get_headlen(skb->dev, va, ICE_RX_HDR_SIZE);
 
        /* align pull length to size of long to optimize memcpy performance */
        memcpy(__skb_put(skb, headlen), va, ALIGN(headlen, sizeof(long)));
index acbb5b4f333db909416750e1391f112bcc635bcb..9b8a4bb253278beaa049236f0b2aa99971ef1945 100644 (file)
@@ -8051,7 +8051,7 @@ static struct sk_buff *igb_construct_skb(struct igb_ring *rx_ring,
        /* Determine available headroom for copy */
        headlen = size;
        if (headlen > IGB_RX_HDR_LEN)
-               headlen = eth_get_headlen(va, IGB_RX_HDR_LEN);
+               headlen = eth_get_headlen(skb->dev, va, IGB_RX_HDR_LEN);
 
        /* align pull length to size of long to optimize memcpy performance */
        memcpy(__skb_put(skb, headlen), va, ALIGN(headlen, sizeof(long)));
index f79728381e8a8255e4be5d4b77d4ad5c8b3242cf..e58a6e0dc4d9d6dc523ed0f0ad6ededcc00959a1 100644 (file)
@@ -1199,7 +1199,7 @@ static struct sk_buff *igc_construct_skb(struct igc_ring *rx_ring,
        /* Determine available headroom for copy */
        headlen = size;
        if (headlen > IGC_RX_HDR_LEN)
-               headlen = eth_get_headlen(va, IGC_RX_HDR_LEN);
+               headlen = eth_get_headlen(skb->dev, va, IGC_RX_HDR_LEN);
 
        /* align pull length to size of long to optimize memcpy performance */
        memcpy(__skb_put(skb, headlen), va, ALIGN(headlen, sizeof(long)));
index 60cec3540dd783e6e079451a719a9d71fb14898e..7b903206b534b0298c82dec84812f88f477f4d7e 100644 (file)
@@ -1800,7 +1800,7 @@ static void ixgbe_pull_tail(struct ixgbe_ring *rx_ring,
         * we need the header to contain the greater of either ETH_HLEN or
         * 60 bytes if the skb->len is less than 60 for skb_pad.
         */
-       pull_len = eth_get_headlen(va, IXGBE_RX_HDR_SIZE);
+       pull_len = eth_get_headlen(skb->dev, va, IXGBE_RX_HDR_SIZE);
 
        /* align pull length to size of long to optimize memcpy performance */
        skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long)));
index 49e23afa05a2e58cfc1f6d9d296a23b2c1a71676..d189ed24766524676f2a1baa3651e53412bd1ad0 100644 (file)
@@ -895,7 +895,8 @@ struct sk_buff *ixgbevf_construct_skb(struct ixgbevf_ring *rx_ring,
        /* Determine available headroom for copy */
        headlen = size;
        if (headlen > IXGBEVF_RX_HDR_SIZE)
-               headlen = eth_get_headlen(xdp->data, IXGBEVF_RX_HDR_SIZE);
+               headlen = eth_get_headlen(skb->dev, xdp->data,
+                                         IXGBEVF_RX_HDR_SIZE);
 
        /* align pull length to size of long to optimize memcpy performance */
        memcpy(__skb_put(skb, headlen), xdp->data,
index 40f3f98aa279c7c8c1dd96abf4778d0c1bd694bf..7b61126fcec9741f999dde262077f59e0f65f7ee 100644 (file)
@@ -163,7 +163,7 @@ static inline u16 mlx5e_calc_min_inline(enum mlx5_inline_modes mode,
        case MLX5_INLINE_MODE_NONE:
                return 0;
        case MLX5_INLINE_MODE_TCP_UDP:
-               hlen = eth_get_headlen(skb->data, skb_headlen(skb));
+               hlen = eth_get_headlen(skb->dev, skb->data, skb_headlen(skb));
                if (hlen == ETH_HLEN && !skb_vlan_tag_present(skb))
                        hlen += VLAN_HLEN;
                break;
index 24d0220b9ba00724ebad94fbc58858a4abffb207..9d72f8c76c158322cb54f67d8adefb6343a9283a 100644 (file)
@@ -1965,7 +1965,8 @@ drop:
 
        if (frags) {
                /* Exercise flow dissector code path. */
-               u32 headlen = eth_get_headlen(skb->data, skb_headlen(skb));
+               u32 headlen = eth_get_headlen(tun->dev, skb->data,
+                                             skb_headlen(skb));
 
                if (unlikely(headlen > skb_headlen(skb))) {
                        this_cpu_inc(tun->pcpu_stats->rx_dropped);
index e2f3b21cd72a28d16cf2324d308e13dc64c86f59..c6c1930e28a0c8e59d1ed6b866cb7d67823d0725 100644 (file)
@@ -33,7 +33,7 @@ struct device;
 int eth_platform_get_mac_address(struct device *dev, u8 *mac_addr);
 unsigned char *arch_get_platform_mac_address(void);
 int nvmem_get_mac_address(struct device *dev, void *addrbuf);
-u32 eth_get_headlen(void *data, unsigned int max_len);
+u32 eth_get_headlen(const struct net_device *dev, void *data, unsigned int len);
 __be16 eth_type_trans(struct sk_buff *skb, struct net_device *dev);
 extern const struct header_ops eth_header_ops;
 
index 1e439549c419b74aaee296e231f76eb016037dca..0f9863dc4d44acac146666a93b941014aa282ee9 100644 (file)
@@ -119,13 +119,14 @@ EXPORT_SYMBOL(eth_header);
 
 /**
  * eth_get_headlen - determine the length of header for an ethernet frame
+ * @dev: pointer to network device
  * @data: pointer to start of frame
  * @len: total length of frame
  *
  * Make a best effort attempt to pull the length for all of the headers for
  * a given frame in a linear buffer.
  */
-u32 eth_get_headlen(void *data, unsigned int len)
+u32 eth_get_headlen(const struct net_device *dev, void *data, unsigned int len)
 {
        const unsigned int flags = FLOW_DISSECTOR_F_PARSE_1ST_FRAG;
        const struct ethhdr *eth = (const struct ethhdr *)data;
@@ -136,7 +137,7 @@ u32 eth_get_headlen(void *data, unsigned int len)
                return len;
 
        /* parse any remaining L2/L3 headers, check for L4 */
-       if (!skb_flow_dissect_flow_keys_basic(NULL, NULL, &keys, data,
+       if (!skb_flow_dissect_flow_keys_basic(dev_net(dev), NULL, &keys, data,
                                              eth->h_proto, sizeof(*eth),
                                              len, flags))
                return max_t(u32, keys.control.thoff, sizeof(*eth));