net: mana: Add new MANA VF performance counters for easier troubleshooting
authorShradha Gupta <shradhagupta@linux.microsoft.com>
Wed, 15 Mar 2023 11:55:13 +0000 (04:55 -0700)
committerDavid S. Miller <davem@davemloft.net>
Fri, 17 Mar 2023 07:58:18 +0000 (07:58 +0000)
Extended performance counter stats in 'ethtool -S <interface>' output
for MANA VF to facilitate troubleshooting.

Tested-on: Ubuntu22
Signed-off-by: Shradha Gupta <shradhagupta@linux.microsoft.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
drivers/net/ethernet/microsoft/mana/mana_en.c
drivers/net/ethernet/microsoft/mana/mana_ethtool.c
include/net/mana/mana.h

index 6120f2b6684fb15a94ff371319dcc4067523e1ce..492474b4d8aac06337535a71e0901d5bd41aed45 100644 (file)
@@ -156,6 +156,7 @@ netdev_tx_t mana_start_xmit(struct sk_buff *skb, struct net_device *ndev)
        struct mana_txq *txq;
        struct mana_cq *cq;
        int err, len;
+       u16 ihs;
 
        if (unlikely(!apc->port_is_up))
                goto tx_drop;
@@ -166,6 +167,7 @@ netdev_tx_t mana_start_xmit(struct sk_buff *skb, struct net_device *ndev)
        txq = &apc->tx_qp[txq_idx].txq;
        gdma_sq = txq->gdma_sq;
        cq = &apc->tx_qp[txq_idx].tx_cq;
+       tx_stats = &txq->stats;
 
        pkg.tx_oob.s_oob.vcq_num = cq->gdma_id;
        pkg.tx_oob.s_oob.vsq_frame = txq->vsq_frame;
@@ -179,10 +181,17 @@ netdev_tx_t mana_start_xmit(struct sk_buff *skb, struct net_device *ndev)
 
        pkg.tx_oob.s_oob.pkt_fmt = pkt_fmt;
 
-       if (pkt_fmt == MANA_SHORT_PKT_FMT)
+       if (pkt_fmt == MANA_SHORT_PKT_FMT) {
                pkg.wqe_req.inline_oob_size = sizeof(struct mana_tx_short_oob);
-       else
+               u64_stats_update_begin(&tx_stats->syncp);
+               tx_stats->short_pkt_fmt++;
+               u64_stats_update_end(&tx_stats->syncp);
+       } else {
                pkg.wqe_req.inline_oob_size = sizeof(struct mana_tx_oob);
+               u64_stats_update_begin(&tx_stats->syncp);
+               tx_stats->long_pkt_fmt++;
+               u64_stats_update_end(&tx_stats->syncp);
+       }
 
        pkg.wqe_req.inline_oob_data = &pkg.tx_oob;
        pkg.wqe_req.flags = 0;
@@ -232,9 +241,35 @@ netdev_tx_t mana_start_xmit(struct sk_buff *skb, struct net_device *ndev)
                                                 &ipv6_hdr(skb)->daddr, 0,
                                                 IPPROTO_TCP, 0);
                }
+
+               if (skb->encapsulation) {
+                       ihs = skb_inner_tcp_all_headers(skb);
+                       u64_stats_update_begin(&tx_stats->syncp);
+                       tx_stats->tso_inner_packets++;
+                       tx_stats->tso_inner_bytes += skb->len - ihs;
+                       u64_stats_update_end(&tx_stats->syncp);
+               } else {
+                       if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
+                               ihs = skb_transport_offset(skb) + sizeof(struct udphdr);
+                       } else {
+                               ihs = skb_tcp_all_headers(skb);
+                               if (ipv6_has_hopopt_jumbo(skb))
+                                       ihs -= sizeof(struct hop_jumbo_hdr);
+                       }
+
+                       u64_stats_update_begin(&tx_stats->syncp);
+                       tx_stats->tso_packets++;
+                       tx_stats->tso_bytes += skb->len - ihs;
+                       u64_stats_update_end(&tx_stats->syncp);
+               }
+
        } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
                csum_type = mana_checksum_info(skb);
 
+               u64_stats_update_begin(&tx_stats->syncp);
+               tx_stats->csum_partial++;
+               u64_stats_update_end(&tx_stats->syncp);
+
                if (csum_type == IPPROTO_TCP) {
                        pkg.tx_oob.s_oob.is_outer_ipv4 = ipv4;
                        pkg.tx_oob.s_oob.is_outer_ipv6 = ipv6;
@@ -254,8 +289,12 @@ netdev_tx_t mana_start_xmit(struct sk_buff *skb, struct net_device *ndev)
                }
        }
 
-       if (mana_map_skb(skb, apc, &pkg))
+       if (mana_map_skb(skb, apc, &pkg)) {
+               u64_stats_update_begin(&tx_stats->syncp);
+               tx_stats->mana_map_err++;
+               u64_stats_update_end(&tx_stats->syncp);
                goto free_sgl_ptr;
+       }
 
        skb_queue_tail(&txq->pending_skbs, skb);
 
@@ -1038,6 +1077,8 @@ static void mana_poll_tx_cq(struct mana_cq *cq)
        if (comp_read < 1)
                return;
 
+       apc->eth_stats.tx_cqes = comp_read;
+
        for (i = 0; i < comp_read; i++) {
                struct mana_tx_comp_oob *cqe_oob;
 
@@ -1064,6 +1105,7 @@ static void mana_poll_tx_cq(struct mana_cq *cq)
                case CQE_TX_VLAN_TAGGING_VIOLATION:
                        WARN_ONCE(1, "TX: CQE error %d: ignored.\n",
                                  cqe_oob->cqe_hdr.cqe_type);
+                       apc->eth_stats.tx_cqe_err++;
                        break;
 
                default:
@@ -1072,6 +1114,7 @@ static void mana_poll_tx_cq(struct mana_cq *cq)
                         */
                        WARN_ONCE(1, "TX: Unexpected CQE type %d: HW BUG?\n",
                                  cqe_oob->cqe_hdr.cqe_type);
+                       apc->eth_stats.tx_cqe_unknown_type++;
                        return;
                }
 
@@ -1118,6 +1161,8 @@ static void mana_poll_tx_cq(struct mana_cq *cq)
                WARN_ON_ONCE(1);
 
        cq->work_done = pkt_transmitted;
+
+       apc->eth_stats.tx_cqes -= pkt_transmitted;
 }
 
 static void mana_post_pkt_rxq(struct mana_rxq *rxq)
@@ -1252,12 +1297,15 @@ static void mana_process_rx_cqe(struct mana_rxq *rxq, struct mana_cq *cq,
        struct gdma_context *gc = rxq->gdma_rq->gdma_dev->gdma_context;
        struct net_device *ndev = rxq->ndev;
        struct mana_recv_buf_oob *rxbuf_oob;
+       struct mana_port_context *apc;
        struct device *dev = gc->dev;
        void *new_buf, *old_buf;
        struct page *new_page;
        u32 curr, pktlen;
        dma_addr_t da;
 
+       apc = netdev_priv(ndev);
+
        switch (oob->cqe_hdr.cqe_type) {
        case CQE_RX_OKAY:
                break;
@@ -1270,6 +1318,7 @@ static void mana_process_rx_cqe(struct mana_rxq *rxq, struct mana_cq *cq,
 
        case CQE_RX_COALESCED_4:
                netdev_err(ndev, "RX coalescing is unsupported\n");
+               apc->eth_stats.rx_coalesced_err++;
                return;
 
        case CQE_RX_OBJECT_FENCE:
@@ -1279,6 +1328,7 @@ static void mana_process_rx_cqe(struct mana_rxq *rxq, struct mana_cq *cq,
        default:
                netdev_err(ndev, "Unknown RX CQE type = %d\n",
                           oob->cqe_hdr.cqe_type);
+               apc->eth_stats.rx_cqe_unknown_type++;
                return;
        }
 
@@ -1341,11 +1391,15 @@ static void mana_poll_rx_cq(struct mana_cq *cq)
 {
        struct gdma_comp *comp = cq->gdma_comp_buf;
        struct mana_rxq *rxq = cq->rxq;
+       struct mana_port_context *apc;
        int comp_read, i;
 
+       apc = netdev_priv(rxq->ndev);
+
        comp_read = mana_gd_poll_cq(cq->gdma_cq, comp, CQE_POLLING_BUFFER);
        WARN_ON_ONCE(comp_read > CQE_POLLING_BUFFER);
 
+       apc->eth_stats.rx_cqes = comp_read;
        rxq->xdp_flush = false;
 
        for (i = 0; i < comp_read; i++) {
@@ -1357,6 +1411,8 @@ static void mana_poll_rx_cq(struct mana_cq *cq)
                        return;
 
                mana_process_rx_cqe(rxq, cq, &comp[i]);
+
+               apc->eth_stats.rx_cqes--;
        }
 
        if (rxq->xdp_flush)
index 5b776a33a81773f23b3a47bd93e43a8a3d5f8dc6..a64c81410dc14cfcd44d263f0e79cc3337805ca4 100644 (file)
@@ -13,6 +13,15 @@ static const struct {
 } mana_eth_stats[] = {
        {"stop_queue", offsetof(struct mana_ethtool_stats, stop_queue)},
        {"wake_queue", offsetof(struct mana_ethtool_stats, wake_queue)},
+       {"tx_cqes", offsetof(struct mana_ethtool_stats, tx_cqes)},
+       {"tx_cq_err", offsetof(struct mana_ethtool_stats, tx_cqe_err)},
+       {"tx_cqe_unknown_type", offsetof(struct mana_ethtool_stats,
+                                       tx_cqe_unknown_type)},
+       {"rx_cqes", offsetof(struct mana_ethtool_stats, rx_cqes)},
+       {"rx_coalesced_err", offsetof(struct mana_ethtool_stats,
+                                       rx_coalesced_err)},
+       {"rx_cqe_unknown_type", offsetof(struct mana_ethtool_stats,
+                                       rx_cqe_unknown_type)},
 };
 
 static int mana_get_sset_count(struct net_device *ndev, int stringset)
@@ -23,7 +32,8 @@ static int mana_get_sset_count(struct net_device *ndev, int stringset)
        if (stringset != ETH_SS_STATS)
                return -EINVAL;
 
-       return ARRAY_SIZE(mana_eth_stats) + num_queues * 8;
+       return ARRAY_SIZE(mana_eth_stats) + num_queues *
+                               (MANA_STATS_RX_COUNT + MANA_STATS_TX_COUNT);
 }
 
 static void mana_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
@@ -61,6 +71,22 @@ static void mana_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
                p += ETH_GSTRING_LEN;
                sprintf(p, "tx_%d_xdp_xmit", i);
                p += ETH_GSTRING_LEN;
+               sprintf(p, "tx_%d_tso_packets", i);
+               p += ETH_GSTRING_LEN;
+               sprintf(p, "tx_%d_tso_bytes", i);
+               p += ETH_GSTRING_LEN;
+               sprintf(p, "tx_%d_tso_inner_packets", i);
+               p += ETH_GSTRING_LEN;
+               sprintf(p, "tx_%d_tso_inner_bytes", i);
+               p += ETH_GSTRING_LEN;
+               sprintf(p, "tx_%d_long_pkt_fmt", i);
+               p += ETH_GSTRING_LEN;
+               sprintf(p, "tx_%d_short_pkt_fmt", i);
+               p += ETH_GSTRING_LEN;
+               sprintf(p, "tx_%d_csum_partial", i);
+               p += ETH_GSTRING_LEN;
+               sprintf(p, "tx_%d_mana_map_err", i);
+               p += ETH_GSTRING_LEN;
        }
 }
 
@@ -78,6 +104,14 @@ static void mana_get_ethtool_stats(struct net_device *ndev,
        u64 xdp_xmit;
        u64 xdp_drop;
        u64 xdp_tx;
+       u64 tso_packets;
+       u64 tso_bytes;
+       u64 tso_inner_packets;
+       u64 tso_inner_bytes;
+       u64 long_pkt_fmt;
+       u64 short_pkt_fmt;
+       u64 csum_partial;
+       u64 mana_map_err;
        int q, i = 0;
 
        if (!apc->port_is_up)
@@ -113,11 +147,27 @@ static void mana_get_ethtool_stats(struct net_device *ndev,
                        packets = tx_stats->packets;
                        bytes = tx_stats->bytes;
                        xdp_xmit = tx_stats->xdp_xmit;
+                       tso_packets = tx_stats->tso_packets;
+                       tso_bytes = tx_stats->tso_bytes;
+                       tso_inner_packets = tx_stats->tso_inner_packets;
+                       tso_inner_bytes = tx_stats->tso_inner_bytes;
+                       long_pkt_fmt = tx_stats->long_pkt_fmt;
+                       short_pkt_fmt = tx_stats->short_pkt_fmt;
+                       csum_partial = tx_stats->csum_partial;
+                       mana_map_err = tx_stats->mana_map_err;
                } while (u64_stats_fetch_retry(&tx_stats->syncp, start));
 
                data[i++] = packets;
                data[i++] = bytes;
                data[i++] = xdp_xmit;
+               data[i++] = tso_packets;
+               data[i++] = tso_bytes;
+               data[i++] = tso_inner_packets;
+               data[i++] = tso_inner_bytes;
+               data[i++] = long_pkt_fmt;
+               data[i++] = short_pkt_fmt;
+               data[i++] = csum_partial;
+               data[i++] = mana_map_err;
        }
 }
 
index 3bb579962a14aad1cf7be2c16b0e2e3451c45df6..bb11a6535d80648739aa6302bea52b7a67164c7b 100644 (file)
@@ -48,6 +48,10 @@ enum TRI_STATE {
 
 #define MAX_PORTS_IN_MANA_DEV 256
 
+/* Update this count whenever the respective structures are changed */
+#define MANA_STATS_RX_COUNT 5
+#define MANA_STATS_TX_COUNT 11
+
 struct mana_stats_rx {
        u64 packets;
        u64 bytes;
@@ -61,6 +65,14 @@ struct mana_stats_tx {
        u64 packets;
        u64 bytes;
        u64 xdp_xmit;
+       u64 tso_packets;
+       u64 tso_bytes;
+       u64 tso_inner_packets;
+       u64 tso_inner_bytes;
+       u64 short_pkt_fmt;
+       u64 long_pkt_fmt;
+       u64 csum_partial;
+       u64 mana_map_err;
        struct u64_stats_sync syncp;
 };
 
@@ -331,6 +343,12 @@ struct mana_tx_qp {
 struct mana_ethtool_stats {
        u64 stop_queue;
        u64 wake_queue;
+       u64 tx_cqes;
+       u64 tx_cqe_err;
+       u64 tx_cqe_unknown_type;
+       u64 rx_cqes;
+       u64 rx_coalesced_err;
+       u64 rx_cqe_unknown_type;
 };
 
 struct mana_context {