diff options
author | John Crispin <john@openwrt.org> | 2016-04-01 07:11:18 +0000 |
---|---|---|
committer | John Crispin <john@openwrt.org> | 2016-04-01 07:11:18 +0000 |
commit | 41ba4b04c84a80a441b30e167eab0d45d3b6c009 (patch) | |
tree | ce7011c8604b5052af66008a7cb361470af41058 /target/linux/mediatek/patches-4.4/0071-net-mediatek-fix-TX-locking.patch | |
parent | 53a74644b0ec68015abb13ff1e5bd4fe6844bd6e (diff) | |
download | upstream-41ba4b04c84a80a441b30e167eab0d45d3b6c009.tar.gz upstream-41ba4b04c84a80a441b30e167eab0d45d3b6c009.tar.bz2 upstream-41ba4b04c84a80a441b30e167eab0d45d3b6c009.zip |
mediatek: update patches
add fixes for
* ethernet
* cpufreq
* nand
* a7-timer
Signed-off-by: John Crispin <blogic@openwrt.org>
SVN-Revision: 49098
Diffstat (limited to 'target/linux/mediatek/patches-4.4/0071-net-mediatek-fix-TX-locking.patch')
-rw-r--r-- | target/linux/mediatek/patches-4.4/0071-net-mediatek-fix-TX-locking.patch | 98 |
1 files changed, 98 insertions, 0 deletions
diff --git a/target/linux/mediatek/patches-4.4/0071-net-mediatek-fix-TX-locking.patch b/target/linux/mediatek/patches-4.4/0071-net-mediatek-fix-TX-locking.patch new file mode 100644 index 0000000000..011fad2e61 --- /dev/null +++ b/target/linux/mediatek/patches-4.4/0071-net-mediatek-fix-TX-locking.patch @@ -0,0 +1,98 @@ +From b9df14f712866925856c0ffb2d899511c21e1b8a Mon Sep 17 00:00:00 2001 +From: John Crispin <blogic@openwrt.org> +Date: Tue, 29 Mar 2016 17:20:01 +0200 +Subject: [PATCH 71/78] net: mediatek: fix TX locking + +Inside the TX path there is a lock inside the tx_map function. This is +however too late. The patch moves the lock to the start of the xmit +function right before the free count check of the DMA ring happens. +If we do not do this, the code becomes racy leading to TX stalls and +dropped packets. This happens as there are 2 netdevs running on the +same physical DMA ring. + +Signed-off-by: John Crispin <blogic@openwrt.org> +--- + drivers/net/ethernet/mediatek/mtk_eth_soc.c | 20 ++++++++++---------- + 1 file changed, 10 insertions(+), 10 deletions(-) + +diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c +index 26eeb1a..67b18f9 100644 +--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c ++++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c +@@ -536,7 +536,6 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev, + struct mtk_eth *eth = mac->hw; + struct mtk_tx_dma *itxd, *txd; + struct mtk_tx_buf *tx_buf; +- unsigned long flags; + dma_addr_t mapped_addr; + unsigned int nr_frags; + int i, n_desc = 1; +@@ -568,11 +567,6 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev, + if (unlikely(dma_mapping_error(&dev->dev, mapped_addr))) + return -ENOMEM; + +- /* normally we can rely on the stack not calling this more than once, +- * however we have 2 queues running ont he same ring so we need to lock +- * the ring access +- */ +- spin_lock_irqsave(ð->page_lock, flags); + WRITE_ONCE(itxd->txd1, mapped_addr); + tx_buf->flags |= MTK_TX_FLAGS_SINGLE0; + dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr); +@@ -632,8 +626,6 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev, + WRITE_ONCE(itxd->txd3, (TX_DMA_SWC | TX_DMA_PLEN0(skb_headlen(skb)) | + (!nr_frags * TX_DMA_LS0))); + +- spin_unlock_irqrestore(ð->page_lock, flags); +- + netdev_sent_queue(dev, skb->len); + skb_tx_timestamp(skb); + +@@ -661,8 +653,6 @@ err_dma: + itxd = mtk_qdma_phys_to_virt(ring, itxd->txd2); + } while (itxd != txd); + +- spin_unlock_irqrestore(ð->page_lock, flags); +- + return -ENOMEM; + } + +@@ -712,14 +702,22 @@ static int mtk_start_xmit(struct sk_buff *skb, struct net_device *dev) + struct mtk_eth *eth = mac->hw; + struct mtk_tx_ring *ring = ð->tx_ring; + struct net_device_stats *stats = &dev->stats; ++ unsigned long flags; + bool gso = false; + int tx_num; + ++ /* normally we can rely on the stack not calling this more than once, ++ * however we have 2 queues running ont he same ring so we need to lock ++ * the ring access ++ */ ++ spin_lock_irqsave(ð->page_lock, flags); ++ + tx_num = mtk_cal_txd_req(skb); + if (unlikely(atomic_read(&ring->free_count) <= tx_num)) { + mtk_stop_queue(eth); + netif_err(eth, tx_queued, dev, + "Tx Ring full when queue awake!\n"); ++ spin_unlock_irqrestore(ð->page_lock, flags); + return NETDEV_TX_BUSY; + } + +@@ -747,10 +745,12 @@ static int mtk_start_xmit(struct sk_buff *skb, struct net_device *dev) + ring->thresh)) + mtk_wake_queue(eth); + } ++ spin_unlock_irqrestore(ð->page_lock, flags); + + return NETDEV_TX_OK; + + drop: ++ spin_unlock_irqrestore(ð->page_lock, flags); + stats->tx_dropped++; + dev_kfree_skb(skb); + return NETDEV_TX_OK; +-- +1.7.10.4 + |