net: ethernet: mtk_eth_soc: remove unnecessary TX queue stops
authorFelix Fietkau <nbd@nbd.name>
Fri, 23 Apr 2021 05:20:59 +0000 (22:20 -0700)
committerDavid S. Miller <davem@davemloft.net>
Fri, 23 Apr 2021 20:31:58 +0000 (13:31 -0700)
When running short on descriptors, only stop the queue for the netdev that
tx was attempted for. By the time something tries to send on the other
netdev, the ring might have some more room already.

Signed-off-by: Felix Fietkau <nbd@nbd.name>
Signed-off-by: Ilya Lipnitskiy <ilya.lipnitskiy@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
drivers/net/ethernet/mediatek/mtk_eth_soc.c

index d992d4f1f40059a0773eb0c1b94ee5db8422075c..e6f832dde9a6aa368938b0dbf6846776cde7317a 100644 (file)
@@ -1131,17 +1131,6 @@ static void mtk_wake_queue(struct mtk_eth *eth)
        }
 }
 
-static void mtk_stop_queue(struct mtk_eth *eth)
-{
-       int i;
-
-       for (i = 0; i < MTK_MAC_COUNT; i++) {
-               if (!eth->netdev[i])
-                       continue;
-               netif_stop_queue(eth->netdev[i]);
-       }
-}
-
 static netdev_tx_t mtk_start_xmit(struct sk_buff *skb, struct net_device *dev)
 {
        struct mtk_mac *mac = netdev_priv(dev);
@@ -1162,7 +1151,7 @@ static netdev_tx_t mtk_start_xmit(struct sk_buff *skb, struct net_device *dev)
 
        tx_num = mtk_cal_txd_req(skb);
        if (unlikely(atomic_read(&ring->free_count) <= tx_num)) {
-               mtk_stop_queue(eth);
+               netif_stop_queue(dev);
                netif_err(eth, tx_queued, dev,
                          "Tx Ring full when queue awake!\n");
                spin_unlock(&eth->page_lock);
@@ -1188,7 +1177,7 @@ static netdev_tx_t mtk_start_xmit(struct sk_buff *skb, struct net_device *dev)
                goto drop;
 
        if (unlikely(atomic_read(&ring->free_count) <= ring->thresh))
-               mtk_stop_queue(eth);
+               netif_stop_queue(dev);
 
        spin_unlock(&eth->page_lock);