summaryrefslogtreecommitdiffstats
path: root/target/linux/mediatek/patches-4.4/0088-net-next-mediatek-add-support-for-IRQ-grouping.patch
diff options
context:
space:
mode:
Diffstat (limited to 'target/linux/mediatek/patches-4.4/0088-net-next-mediatek-add-support-for-IRQ-grouping.patch')
-rw-r--r--target/linux/mediatek/patches-4.4/0088-net-next-mediatek-add-support-for-IRQ-grouping.patch403
1 files changed, 403 insertions, 0 deletions
diff --git a/target/linux/mediatek/patches-4.4/0088-net-next-mediatek-add-support-for-IRQ-grouping.patch b/target/linux/mediatek/patches-4.4/0088-net-next-mediatek-add-support-for-IRQ-grouping.patch
new file mode 100644
index 0000000000..95fad36e19
--- /dev/null
+++ b/target/linux/mediatek/patches-4.4/0088-net-next-mediatek-add-support-for-IRQ-grouping.patch
@@ -0,0 +1,403 @@
+From f43e3aaaacaaf0482f0aaa6fbad03572f3a0c614 Mon Sep 17 00:00:00 2001
+From: John Crispin <blogic@openwrt.org>
+Date: Wed, 23 Mar 2016 18:31:48 +0100
+Subject: [PATCH 88/91] net-next: mediatek: add support for IRQ grouping
+
+The ethernet core has 3 IRQs. using the IRQ grouping registers we are able
+to separate TX and RX IRQs, which allows us to service them on separate
+cores. This patch splits the irq handler into 2 separate functiosn, one for
+TX and another for RX. The TX housekeeping is split out of the NAPI handler.
+Instead we use a tasklet to handle housekeeping.
+
+Signed-off-by: John Crispin <blogic@openwrt.org>
+---
+ drivers/net/ethernet/mediatek/mtk_eth_soc.c | 165 ++++++++++++++++++---------
+ drivers/net/ethernet/mediatek/mtk_eth_soc.h | 16 ++-
+ 2 files changed, 125 insertions(+), 56 deletions(-)
+
+diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+index f821820..b5e364c 100644
+--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
++++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+@@ -789,7 +789,7 @@ drop:
+ }
+
+ static int mtk_poll_rx(struct napi_struct *napi, int budget,
+- struct mtk_eth *eth, u32 rx_intr)
++ struct mtk_eth *eth)
+ {
+ struct mtk_rx_ring *ring = &eth->rx_ring;
+ int idx = ring->calc_idx;
+@@ -877,19 +877,18 @@ release_desc:
+ }
+
+ if (done < budget)
+- mtk_w32(eth, rx_intr, MTK_QMTK_INT_STATUS);
++ mtk_w32(eth, MTK_RX_DONE_INT, MTK_QMTK_INT_STATUS);
+
+ return done;
+ }
+
+-static int mtk_poll_tx(struct mtk_eth *eth, int budget, bool *tx_again)
++static int mtk_poll_tx(struct mtk_eth *eth, int budget)
+ {
+ struct mtk_tx_ring *ring = &eth->tx_ring;
+ struct mtk_tx_dma *desc;
+ struct sk_buff *skb;
+ struct mtk_tx_buf *tx_buf;
+- int total = 0, done = 0;
+- unsigned int bytes = 0;
++ unsigned int bytes = 0, done = 0;
+ u32 cpu, dma;
+ static int condition;
+ int i;
+@@ -941,64 +940,82 @@ static int mtk_poll_tx(struct mtk_eth *eth, int budget, bool *tx_again)
+ continue;
+ netdev_completed_queue(eth->netdev[i], done, bytes);
+ }
++
+ /* read hw index again make sure no new tx packet */
+- if (cpu != dma || cpu != mtk_r32(eth, MTK_QTX_DRX_PTR))
+- *tx_again = true;
+- else
++ if (cpu == dma && cpu == mtk_r32(eth, MTK_QTX_DRX_PTR))
+ mtk_w32(eth, MTK_TX_DONE_INT, MTK_QMTK_INT_STATUS);
+
+- if (!total)
+- return 0;
+-
+ if (atomic_read(&ring->free_count) > ring->thresh)
+ mtk_wake_queue(eth);
+
+- return total;
++ return done;
+ }
+
+-static int mtk_poll(struct napi_struct *napi, int budget)
++static void mtk_handle_status_irq(struct mtk_eth *eth)
+ {
+- struct mtk_eth *eth = container_of(napi, struct mtk_eth, rx_napi);
+- u32 status, status2, mask, tx_intr, rx_intr, status_intr;
+- int tx_done, rx_done;
+- bool tx_again = false;
+-
+- status = mtk_r32(eth, MTK_QMTK_INT_STATUS);
+- status2 = mtk_r32(eth, MTK_INT_STATUS2);
+- tx_intr = MTK_TX_DONE_INT;
+- rx_intr = MTK_RX_DONE_INT;
+- status_intr = (MTK_GDM1_AF | MTK_GDM2_AF);
+- tx_done = 0;
+- rx_done = 0;
+- tx_again = 0;
+-
+- if (status & tx_intr)
+- tx_done = mtk_poll_tx(eth, budget, &tx_again);
+-
+- if (status & rx_intr)
+- rx_done = mtk_poll_rx(napi, budget, eth, rx_intr);
++ u32 status2 = mtk_r32(eth, MTK_INT_STATUS2);
++ u32 status_intr = (MTK_GDM1_AF | MTK_GDM2_AF);
+
+ if (unlikely(status2 & status_intr)) {
+ mtk_stats_update(eth);
+ mtk_w32(eth, status_intr, MTK_INT_STATUS2);
+ }
++}
++
++static int mtk_napi_tx(struct napi_struct *napi, int budget)
++{
++ struct mtk_eth *eth = container_of(napi, struct mtk_eth, tx_napi);
++ u32 status, mask;
++ int tx_done = 0;
++
++ mtk_handle_status_irq(eth);
++
++ status = mtk_r32(eth, MTK_QMTK_INT_STATUS);
++ tx_done = mtk_poll_tx(eth, budget);
++ if (unlikely(netif_msg_intr(eth))) {
++ mask = mtk_r32(eth, MTK_QDMA_INT_MASK);
++ dev_info(eth->dev,
++ "done tx %d, intr 0x%08x/0x%x\n",
++ tx_done, status, mask);
++ }
++
++ if (tx_done == budget)
++ return budget;
++
++ status = mtk_r32(eth, MTK_QMTK_INT_STATUS);
++ if (status & MTK_TX_DONE_INT)
++ return budget;
++
++ napi_complete(napi);
++ mtk_irq_enable(eth, MTK_TX_DONE_INT);
++
++ return tx_done;
++}
++
++static int mtk_napi_rx(struct napi_struct *napi, int budget)
++{
++ struct mtk_eth *eth = container_of(napi, struct mtk_eth, rx_napi);
++ u32 status, mask;
++ int rx_done = 0;
+
++ status = mtk_r32(eth, MTK_QMTK_INT_STATUS);
++ rx_done = mtk_poll_rx(napi, budget, eth);
+ if (unlikely(netif_msg_intr(eth))) {
+ mask = mtk_r32(eth, MTK_QDMA_INT_MASK);
+- netdev_info(eth->netdev[0],
+- "done tx %d, rx %d, intr 0x%08x/0x%x\n",
+- tx_done, rx_done, status, mask);
++ dev_info(eth->dev,
++ "done rx %d, intr 0x%08x/0x%x\n",
++ rx_done, status, mask);
+ }
+
+- if (tx_again || rx_done == budget)
++ if (rx_done == budget)
+ return budget;
+
+ status = mtk_r32(eth, MTK_QMTK_INT_STATUS);
+- if (status & (tx_intr | rx_intr))
++ if (status & MTK_RX_DONE_INT)
+ return budget;
+
+ napi_complete(napi);
+- mtk_irq_enable(eth, tx_intr | rx_intr);
++ mtk_irq_enable(eth, MTK_RX_DONE_INT);
+
+ return rx_done;
+ }
+@@ -1235,22 +1252,44 @@ static void mtk_tx_timeout(struct net_device *dev)
+ schedule_work(&eth->pending_work);
+ }
+
+-static irqreturn_t mtk_handle_irq(int irq, void *_eth)
++static irqreturn_t mtk_handle_irq_rx(int irq, void *_eth)
+ {
+ struct mtk_eth *eth = _eth;
+ u32 status;
+
+ status = mtk_r32(eth, MTK_QMTK_INT_STATUS);
++ status &= ~MTK_TX_DONE_INT;
++
+ if (unlikely(!status))
+ return IRQ_NONE;
+
+- if (likely(status & (MTK_RX_DONE_INT | MTK_TX_DONE_INT))) {
++ if (status & MTK_RX_DONE_INT) {
+ if (likely(napi_schedule_prep(&eth->rx_napi)))
+ __napi_schedule(&eth->rx_napi);
+- } else {
+- mtk_w32(eth, status, MTK_QMTK_INT_STATUS);
++ mtk_irq_disable(eth, MTK_RX_DONE_INT);
+ }
+- mtk_irq_disable(eth, (MTK_RX_DONE_INT | MTK_TX_DONE_INT));
++ mtk_w32(eth, status, MTK_QMTK_INT_STATUS);
++
++ return IRQ_HANDLED;
++}
++
++static irqreturn_t mtk_handle_irq_tx(int irq, void *_eth)
++{
++ struct mtk_eth *eth = _eth;
++ u32 status;
++
++ status = mtk_r32(eth, MTK_QMTK_INT_STATUS);
++ status &= ~MTK_RX_DONE_INT;
++
++ if (unlikely(!status))
++ return IRQ_NONE;
++
++ if (status & MTK_TX_DONE_INT) {
++ if (likely(napi_schedule_prep(&eth->tx_napi)))
++ __napi_schedule(&eth->tx_napi);
++ mtk_irq_disable(eth, MTK_TX_DONE_INT);
++ }
++ mtk_w32(eth, status, MTK_QMTK_INT_STATUS);
+
+ return IRQ_HANDLED;
+ }
+@@ -1263,7 +1302,7 @@ static void mtk_poll_controller(struct net_device *dev)
+ u32 int_mask = MTK_TX_DONE_INT | MTK_RX_DONE_INT;
+
+ mtk_irq_disable(eth, int_mask);
+- mtk_handle_irq(dev->irq, dev);
++ mtk_handle_irq(dev->irq[0], dev);
+ mtk_irq_enable(eth, int_mask);
+ }
+ #endif
+@@ -1299,6 +1338,7 @@ static int mtk_open(struct net_device *dev)
+ if (err)
+ return err;
+
++ napi_enable(&eth->tx_napi);
+ napi_enable(&eth->rx_napi);
+ mtk_irq_enable(eth, MTK_TX_DONE_INT | MTK_RX_DONE_INT);
+ }
+@@ -1347,6 +1387,7 @@ static int mtk_stop(struct net_device *dev)
+ return 0;
+
+ mtk_irq_disable(eth, MTK_TX_DONE_INT | MTK_RX_DONE_INT);
++ napi_disable(&eth->tx_napi);
+ napi_disable(&eth->rx_napi);
+
+ mtk_stop_dma(eth, MTK_QDMA_GLO_CFG);
+@@ -1384,7 +1425,11 @@ static int __init mtk_hw_init(struct mtk_eth *eth)
+ /* Enable RX VLan Offloading */
+ mtk_w32(eth, 1, MTK_CDMP_EG_CTRL);
+
+- err = devm_request_irq(eth->dev, eth->irq, mtk_handle_irq, 0,
++ err = devm_request_irq(eth->dev, eth->irq[1], mtk_handle_irq_tx, 0,
++ dev_name(eth->dev), eth);
++ if (err)
++ return err;
++ err = devm_request_irq(eth->dev, eth->irq[2], mtk_handle_irq_rx, 0,
+ dev_name(eth->dev), eth);
+ if (err)
+ return err;
+@@ -1400,7 +1445,11 @@ static int __init mtk_hw_init(struct mtk_eth *eth)
+ mtk_w32(eth, 0, MTK_RST_GL);
+
+ /* FE int grouping */
+- mtk_w32(eth, 0, MTK_FE_INT_GRP);
++ mtk_w32(eth, MTK_TX_DONE_INT, MTK_PDMA_INT_GRP1);
++ mtk_w32(eth, MTK_RX_DONE_INT, MTK_PDMA_INT_GRP2);
++ mtk_w32(eth, MTK_TX_DONE_INT, MTK_QDMA_INT_GRP1);
++ mtk_w32(eth, MTK_RX_DONE_INT, MTK_QDMA_INT_GRP2);
++ mtk_w32(eth, 0x21021000, MTK_FE_INT_GRP);
+
+ for (i = 0; i < 2; i++) {
+ u32 val = mtk_r32(eth, MTK_GDMA_FWD_CFG(i));
+@@ -1448,7 +1497,9 @@ static void mtk_uninit(struct net_device *dev)
+ phy_disconnect(mac->phy_dev);
+ mtk_mdio_cleanup(eth);
+ mtk_irq_disable(eth, ~0);
+- free_irq(dev->irq, dev);
++ free_irq(eth->irq[0], dev);
++ free_irq(eth->irq[1], dev);
++ free_irq(eth->irq[2], dev);
+ }
+
+ static int mtk_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+@@ -1723,10 +1774,10 @@ static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
+ dev_err(eth->dev, "error bringing up device\n");
+ goto free_netdev;
+ }
+- eth->netdev[id]->irq = eth->irq;
++ eth->netdev[id]->irq = eth->irq[0];
+ netif_info(eth, probe, eth->netdev[id],
+ "mediatek frame engine at 0x%08lx, irq %d\n",
+- eth->netdev[id]->base_addr, eth->netdev[id]->irq);
++ eth->netdev[id]->base_addr, eth->irq[0]);
+
+ return 0;
+
+@@ -1743,6 +1794,7 @@ static int mtk_probe(struct platform_device *pdev)
+ struct mtk_soc_data *soc;
+ struct mtk_eth *eth;
+ int err;
++ int i;
+
+ match = of_match_device(of_mtk_match, &pdev->dev);
+ soc = (struct mtk_soc_data *)match->data;
+@@ -1778,10 +1830,12 @@ static int mtk_probe(struct platform_device *pdev)
+ return PTR_ERR(eth->rstc);
+ }
+
+- eth->irq = platform_get_irq(pdev, 0);
+- if (eth->irq < 0) {
+- dev_err(&pdev->dev, "no IRQ resource found\n");
+- return -ENXIO;
++ for (i = 0; i < 3; i++) {
++ eth->irq[i] = platform_get_irq(pdev, i);
++ if (eth->irq[i] < 0) {
++ dev_err(&pdev->dev, "no IRQ%d resource found\n", i);
++ return -ENXIO;
++ }
+ }
+
+ eth->clk_ethif = devm_clk_get(&pdev->dev, "ethif");
+@@ -1822,7 +1876,9 @@ static int mtk_probe(struct platform_device *pdev)
+ * for NAPI to work
+ */
+ init_dummy_netdev(&eth->dummy_dev);
+- netif_napi_add(&eth->dummy_dev, &eth->rx_napi, mtk_poll,
++ netif_napi_add(&eth->dummy_dev, &eth->tx_napi, mtk_napi_tx,
++ MTK_NAPI_WEIGHT);
++ netif_napi_add(&eth->dummy_dev, &eth->rx_napi, mtk_napi_rx,
+ MTK_NAPI_WEIGHT);
+
+ platform_set_drvdata(pdev, eth);
+@@ -1843,6 +1899,7 @@ static int mtk_remove(struct platform_device *pdev)
+ clk_disable_unprepare(eth->clk_gp1);
+ clk_disable_unprepare(eth->clk_gp2);
+
++ netif_napi_del(&eth->tx_napi);
+ netif_napi_del(&eth->rx_napi);
+ mtk_cleanup(eth);
+ platform_set_drvdata(pdev, NULL);
+diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
+index 8220275..bf158f8 100644
+--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
++++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
+@@ -68,6 +68,10 @@
+ /* Unicast Filter MAC Address Register - High */
+ #define MTK_GDMA_MAC_ADRH(x) (0x50C + (x * 0x1000))
+
++/* PDMA Interrupt grouping registers */
++#define MTK_PDMA_INT_GRP1 0xa50
++#define MTK_PDMA_INT_GRP2 0xa54
++
+ /* QDMA TX Queue Configuration Registers */
+ #define MTK_QTX_CFG(x) (0x1800 + (x * 0x10))
+ #define QDMA_RES_THRES 4
+@@ -124,6 +128,11 @@
+ #define MTK_TX_DONE_INT (MTK_TX_DONE_INT0 | MTK_TX_DONE_INT1 | \
+ MTK_TX_DONE_INT2 | MTK_TX_DONE_INT3)
+
++/* QDMA Interrupt grouping registers */
++#define MTK_QDMA_INT_GRP1 0x1a20
++#define MTK_QDMA_INT_GRP2 0x1a24
++#define MTK_RLS_DONE_INT BIT(0)
++
+ /* QDMA Interrupt Status Register */
+ #define MTK_QDMA_INT_MASK 0x1A1C
+
+@@ -355,7 +364,8 @@ struct mtk_rx_ring {
+ * @dma_refcnt: track how many netdevs are using the DMA engine
+ * @tx_ring: Pointer to the memore holding info about the TX ring
+ * @rx_ring: Pointer to the memore holding info about the RX ring
+- * @rx_napi: The NAPI struct
++ * @tx_napi: The TX NAPI struct
++ * @rx_napi: The RX NAPI struct
+ * @scratch_ring: Newer SoCs need memory for a second HW managed TX ring
+ * @phy_scratch_ring: physical address of scratch_ring
+ * @scratch_head: The scratch memory that scratch_ring points to.
+@@ -376,7 +386,7 @@ struct mtk_eth {
+ struct net_device dummy_dev;
+ struct net_device *netdev[MTK_MAX_DEVS];
+ struct mtk_mac *mac[MTK_MAX_DEVS];
+- int irq;
++ int irq[3];
+ u32 msg_enable;
+ unsigned long sysclk;
+ struct regmap *ethsys;
+@@ -384,6 +394,7 @@ struct mtk_eth {
+ atomic_t dma_refcnt;
+ struct mtk_tx_ring tx_ring;
+ struct mtk_rx_ring rx_ring;
++ struct napi_struct tx_napi;
+ struct napi_struct rx_napi;
+ struct mtk_tx_dma *scratch_ring;
+ dma_addr_t phy_scratch_ring;
+@@ -394,6 +405,7 @@ struct mtk_eth {
+ struct clk *clk_gp2;
+ struct mii_bus *mii_bus;
+ struct work_struct pending_work;
++
+ };
+
+ /* struct mtk_mac - the structure that holds the info about the MACs of the
+--
+1.7.10.4
+