From: Felix Fietkau Date: Wed, 26 Aug 2020 16:55:54 +0200 Subject: [PATCH] net: ethernet: mtk_eth_soc: fix unnecessary tx queue stops When running short on descriptors, only stop the queue for the netdev that tx was attempted for. By the time the something tries to send on the other netdev, the ring might have some more room already Signed-off-by: Felix Fietkau --- --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c @@ -1147,17 +1147,6 @@ static void mtk_wake_queue(struct mtk_et } } -static void mtk_stop_queue(struct mtk_eth *eth) -{ - int i; - - for (i = 0; i < MTK_MAC_COUNT; i++) { - if (!eth->netdev[i]) - continue; - netif_stop_queue(eth->netdev[i]); - } -} - static int mtk_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct mtk_mac *mac = netdev_priv(dev); @@ -1178,7 +1167,7 @@ static int mtk_start_xmit(struct sk_buff tx_num = mtk_cal_txd_req(skb); if (unlikely(atomic_read(&ring->free_count) <= tx_num)) { - mtk_stop_queue(eth); + netif_stop_queue(dev); netif_err(eth, tx_queued, dev, "Tx Ring full when queue awake!\n"); spin_unlock(ð->page_lock); @@ -1204,7 +1193,7 @@ static int mtk_start_xmit(struct sk_buff goto drop; if (unlikely(atomic_read(&ring->free_count) <= ring->thresh)) - mtk_stop_queue(eth); + netif_stop_queue(dev); spin_unlock(ð->page_lock);