summaryrefslogtreecommitdiffstats
path: root/target/linux/mediatek/patches-4.4/0065-net-mediatek-fix-stop-and-wakeup-of-queue.patch
diff options
context:
space:
mode:
Diffstat (limited to 'target/linux/mediatek/patches-4.4/0065-net-mediatek-fix-stop-and-wakeup-of-queue.patch')
-rw-r--r--target/linux/mediatek/patches-4.4/0065-net-mediatek-fix-stop-and-wakeup-of-queue.patch89
1 files changed, 89 insertions, 0 deletions
diff --git a/target/linux/mediatek/patches-4.4/0065-net-mediatek-fix-stop-and-wakeup-of-queue.patch b/target/linux/mediatek/patches-4.4/0065-net-mediatek-fix-stop-and-wakeup-of-queue.patch
new file mode 100644
index 0000000000..43b083d5a6
--- /dev/null
+++ b/target/linux/mediatek/patches-4.4/0065-net-mediatek-fix-stop-and-wakeup-of-queue.patch
@@ -0,0 +1,89 @@
+From afc838dde560ab584d3fb0e4b011e4a6770dab3d Mon Sep 17 00:00:00 2001
+From: John Crispin <blogic@openwrt.org>
+Date: Tue, 29 Mar 2016 16:41:07 +0200
+Subject: [PATCH 065/102] net: mediatek: fix stop and wakeup of queue
+
+The driver supports 2 MACs. Both run on the same DMA ring. If we go
+above/below the TX rings thershold value, we always need to wake/stop
+the queu of both devices. Not doing to can cause TX stalls and packet
+drops on one of the devices.
+
+Signed-off-by: John Crispin <blogic@openwrt.org>
+---
+ drivers/net/ethernet/mediatek/mtk_eth_soc.c | 37 +++++++++++++++++++--------
+ 1 file changed, 27 insertions(+), 10 deletions(-)
+
+diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+index 293ea59..04bdb9d 100644
+--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
++++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+@@ -684,6 +684,28 @@ static inline int mtk_cal_txd_req(struct sk_buff *skb)
+ return nfrags;
+ }
+
++static void mtk_wake_queue(struct mtk_eth *eth)
++{
++ int i;
++
++ for (i = 0; i < MTK_MAC_COUNT; i++) {
++ if (!eth->netdev[i])
++ continue;
++ netif_wake_queue(eth->netdev[i]);
++ }
++}
++
++static void mtk_stop_queue(struct mtk_eth *eth)
++{
++ int i;
++
++ for (i = 0; i < MTK_MAC_COUNT; i++) {
++ if (!eth->netdev[i])
++ continue;
++ netif_stop_queue(eth->netdev[i]);
++ }
++}
++
+ static int mtk_start_xmit(struct sk_buff *skb, struct net_device *dev)
+ {
+ struct mtk_mac *mac = netdev_priv(dev);
+@@ -695,7 +717,7 @@ static int mtk_start_xmit(struct sk_buff *skb, struct net_device *dev)
+
+ tx_num = mtk_cal_txd_req(skb);
+ if (unlikely(atomic_read(&ring->free_count) <= tx_num)) {
+- netif_stop_queue(dev);
++ mtk_stop_queue(eth);
+ netif_err(eth, tx_queued, dev,
+ "Tx Ring full when queue awake!\n");
+ return NETDEV_TX_BUSY;
+@@ -720,10 +742,10 @@ static int mtk_start_xmit(struct sk_buff *skb, struct net_device *dev)
+ goto drop;
+
+ if (unlikely(atomic_read(&ring->free_count) <= ring->thresh)) {
+- netif_stop_queue(dev);
++ mtk_stop_queue(eth);
+ if (unlikely(atomic_read(&ring->free_count) >
+ ring->thresh))
+- netif_wake_queue(dev);
++ mtk_wake_queue(eth);
+ }
+
+ return NETDEV_TX_OK;
+@@ -897,13 +919,8 @@ static int mtk_poll_tx(struct mtk_eth *eth, int budget, bool *tx_again)
+ if (!total)
+ return 0;
+
+- for (i = 0; i < MTK_MAC_COUNT; i++) {
+- if (!eth->netdev[i] ||
+- unlikely(!netif_queue_stopped(eth->netdev[i])))
+- continue;
+- if (atomic_read(&ring->free_count) > ring->thresh)
+- netif_wake_queue(eth->netdev[i]);
+- }
++ if (atomic_read(&ring->free_count) > ring->thresh)
++ mtk_wake_queue(eth);
+
+ return total;
+ }
+--
+1.7.10.4
+