aboutsummaryrefslogtreecommitdiffstats
path: root/target/linux/generic/pending-5.4/770-03-net-ethernet-mtk_eth_soc-fix-unnecessary-tx-queue-st.patch
blob: 6eeae6985ffd9c3677e75bb773b483de16d5a66e (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
From: Felix Fietkau <nbd@nbd.name>
Date: Wed, 26 Aug 2020 16:55:54 +0200
Subject: [PATCH] net: ethernet: mtk_eth_soc: fix unnecessary tx queue
 stops

When running short on descriptors, only stop the queue for the netdev that tx
was attempted for. By the time the something tries to send on the other netdev,
the ring might have some more room already

Signed-off-by: Felix Fietkau <nbd@nbd.name>
---

--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -1147,17 +1147,6 @@ static void mtk_wake_queue(struct mtk_et
 	}
 }
 
-static void mtk_stop_queue(struct mtk_eth *eth)
-{
-	int i;
-
-	for (i = 0; i < MTK_MAC_COUNT; i++) {
-		if (!eth->netdev[i])
-			continue;
-		netif_stop_queue(eth->netdev[i]);
-	}
-}
-
 static int mtk_start_xmit(struct sk_buff *skb, struct net_device *dev)
 {
 	struct mtk_mac *mac = netdev_priv(dev);
@@ -1178,7 +1167,7 @@ static int mtk_start_xmit(struct sk_buff
 
 	tx_num = mtk_cal_txd_req(skb);
 	if (unlikely(atomic_read(&ring->free_count) <= tx_num)) {
-		mtk_stop_queue(eth);
+		netif_stop_queue(dev);
 		netif_err(eth, tx_queued, dev,
 			  "Tx Ring full when queue awake!\n");
 		spin_unlock(&eth->page_lock);
@@ -1204,7 +1193,7 @@ static int mtk_start_xmit(struct sk_buff
 		goto drop;
 
 	if (unlikely(atomic_read(&ring->free_count) <= ring->thresh))
-		mtk_stop_queue(eth);
+		netif_stop_queue(dev);
 
 	spin_unlock(&eth->page_lock);