aboutsummaryrefslogtreecommitdiffstats
path: root/target/linux/generic/backport-5.10/610-v5.13-45-net-ethernet-mtk_eth_soc-implement-dynamic-interrupt.patch
diff options
context:
space:
mode:
Diffstat (limited to 'target/linux/generic/backport-5.10/610-v5.13-45-net-ethernet-mtk_eth_soc-implement-dynamic-interrupt.patch')
-rw-r--r--target/linux/generic/backport-5.10/610-v5.13-45-net-ethernet-mtk_eth_soc-implement-dynamic-interrupt.patch313
1 files changed, 313 insertions, 0 deletions
diff --git a/target/linux/generic/backport-5.10/610-v5.13-45-net-ethernet-mtk_eth_soc-implement-dynamic-interrupt.patch b/target/linux/generic/backport-5.10/610-v5.13-45-net-ethernet-mtk_eth_soc-implement-dynamic-interrupt.patch
new file mode 100644
index 0000000000..ef4d2e875b
--- /dev/null
+++ b/target/linux/generic/backport-5.10/610-v5.13-45-net-ethernet-mtk_eth_soc-implement-dynamic-interrupt.patch
@@ -0,0 +1,313 @@
+From e9229ffd550b2d8c4997c67a501dbc3919fd4e26 Mon Sep 17 00:00:00 2001
+From: Felix Fietkau <nbd@nbd.name>
+Date: Thu, 22 Apr 2021 22:21:02 -0700
+Subject: [PATCH] net: ethernet: mtk_eth_soc: implement dynamic interrupt
+ moderation
+
+Reduces the number of interrupts under load
+
+Signed-off-by: Felix Fietkau <nbd@nbd.name>
+[Ilya: add documentation for new struct fields]
+Signed-off-by: Ilya Lipnitskiy <ilya.lipnitskiy@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+---
+ drivers/net/ethernet/mediatek/Kconfig | 1 +
+ drivers/net/ethernet/mediatek/mtk_eth_soc.c | 96 +++++++++++++++++++--
+ drivers/net/ethernet/mediatek/mtk_eth_soc.h | 41 +++++++--
+ 3 files changed, 124 insertions(+), 14 deletions(-)
+
+--- a/drivers/net/ethernet/mediatek/Kconfig
++++ b/drivers/net/ethernet/mediatek/Kconfig
+@@ -10,6 +10,7 @@ if NET_VENDOR_MEDIATEK
+ config NET_MEDIATEK_SOC
+ tristate "MediaTek SoC Gigabit Ethernet support"
+ select PHYLINK
++ select DIMLIB
+ help
+ This driver supports the gigabit ethernet MACs in the
+ MediaTek SoC family.
+--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
++++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+@@ -1233,12 +1233,13 @@ static void mtk_update_rx_cpu_idx(struct
+ static int mtk_poll_rx(struct napi_struct *napi, int budget,
+ struct mtk_eth *eth)
+ {
++ struct dim_sample dim_sample = {};
+ struct mtk_rx_ring *ring;
+ int idx;
+ struct sk_buff *skb;
+ u8 *data, *new_data;
+ struct mtk_rx_dma *rxd, trxd;
+- int done = 0;
++ int done = 0, bytes = 0;
+
+ while (done < budget) {
+ struct net_device *netdev;
+@@ -1312,6 +1313,7 @@ static int mtk_poll_rx(struct napi_struc
+ else
+ skb_checksum_none_assert(skb);
+ skb->protocol = eth_type_trans(skb, netdev);
++ bytes += pktlen;
+
+ if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX &&
+ (trxd.rxd2 & RX_DMA_VTAG))
+@@ -1344,6 +1346,12 @@ rx_done:
+ mtk_update_rx_cpu_idx(eth);
+ }
+
++ eth->rx_packets += done;
++ eth->rx_bytes += bytes;
++ dim_update_sample(eth->rx_events, eth->rx_packets, eth->rx_bytes,
++ &dim_sample);
++ net_dim(&eth->rx_dim, dim_sample);
++
+ return done;
+ }
+
+@@ -1436,6 +1444,7 @@ static int mtk_poll_tx_pdma(struct mtk_e
+ static int mtk_poll_tx(struct mtk_eth *eth, int budget)
+ {
+ struct mtk_tx_ring *ring = &eth->tx_ring;
++ struct dim_sample dim_sample = {};
+ unsigned int done[MTK_MAX_DEVS];
+ unsigned int bytes[MTK_MAX_DEVS];
+ int total = 0, i;
+@@ -1453,8 +1462,14 @@ static int mtk_poll_tx(struct mtk_eth *e
+ continue;
+ netdev_completed_queue(eth->netdev[i], done[i], bytes[i]);
+ total += done[i];
++ eth->tx_packets += done[i];
++ eth->tx_bytes += bytes[i];
+ }
+
++ dim_update_sample(eth->tx_events, eth->tx_packets, eth->tx_bytes,
++ &dim_sample);
++ net_dim(&eth->tx_dim, dim_sample);
++
+ if (mtk_queue_stopped(eth) &&
+ (atomic_read(&ring->free_count) > ring->thresh))
+ mtk_wake_queue(eth);
+@@ -2129,6 +2144,7 @@ static irqreturn_t mtk_handle_irq_rx(int
+ {
+ struct mtk_eth *eth = _eth;
+
++ eth->rx_events++;
+ if (likely(napi_schedule_prep(&eth->rx_napi))) {
+ __napi_schedule(&eth->rx_napi);
+ mtk_rx_irq_disable(eth, MTK_RX_DONE_INT);
+@@ -2141,6 +2157,7 @@ static irqreturn_t mtk_handle_irq_tx(int
+ {
+ struct mtk_eth *eth = _eth;
+
++ eth->tx_events++;
+ if (likely(napi_schedule_prep(&eth->tx_napi))) {
+ __napi_schedule(&eth->tx_napi);
+ mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
+@@ -2325,6 +2342,9 @@ static int mtk_stop(struct net_device *d
+ napi_disable(&eth->tx_napi);
+ napi_disable(&eth->rx_napi);
+
++ cancel_work_sync(&eth->rx_dim.work);
++ cancel_work_sync(&eth->tx_dim.work);
++
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
+ mtk_stop_dma(eth, MTK_QDMA_GLO_CFG);
+ mtk_stop_dma(eth, MTK_PDMA_GLO_CFG);
+@@ -2377,6 +2397,64 @@ err_disable_clks:
+ return ret;
+ }
+
++static void mtk_dim_rx(struct work_struct *work)
++{
++ struct dim *dim = container_of(work, struct dim, work);
++ struct mtk_eth *eth = container_of(dim, struct mtk_eth, rx_dim);
++ struct dim_cq_moder cur_profile;
++ u32 val, cur;
++
++ cur_profile = net_dim_get_rx_moderation(eth->rx_dim.mode,
++ dim->profile_ix);
++ spin_lock_bh(&eth->dim_lock);
++
++ val = mtk_r32(eth, MTK_PDMA_DELAY_INT);
++ val &= MTK_PDMA_DELAY_TX_MASK;
++ val |= MTK_PDMA_DELAY_RX_EN;
++
++ cur = min_t(u32, DIV_ROUND_UP(cur_profile.usec, 20), MTK_PDMA_DELAY_PTIME_MASK);
++ val |= cur << MTK_PDMA_DELAY_RX_PTIME_SHIFT;
++
++ cur = min_t(u32, cur_profile.pkts, MTK_PDMA_DELAY_PINT_MASK);
++ val |= cur << MTK_PDMA_DELAY_RX_PINT_SHIFT;
++
++ mtk_w32(eth, val, MTK_PDMA_DELAY_INT);
++ mtk_w32(eth, val, MTK_QDMA_DELAY_INT);
++
++ spin_unlock_bh(&eth->dim_lock);
++
++ dim->state = DIM_START_MEASURE;
++}
++
++static void mtk_dim_tx(struct work_struct *work)
++{
++ struct dim *dim = container_of(work, struct dim, work);
++ struct mtk_eth *eth = container_of(dim, struct mtk_eth, tx_dim);
++ struct dim_cq_moder cur_profile;
++ u32 val, cur;
++
++ cur_profile = net_dim_get_tx_moderation(eth->tx_dim.mode,
++ dim->profile_ix);
++ spin_lock_bh(&eth->dim_lock);
++
++ val = mtk_r32(eth, MTK_PDMA_DELAY_INT);
++ val &= MTK_PDMA_DELAY_RX_MASK;
++ val |= MTK_PDMA_DELAY_TX_EN;
++
++ cur = min_t(u32, DIV_ROUND_UP(cur_profile.usec, 20), MTK_PDMA_DELAY_PTIME_MASK);
++ val |= cur << MTK_PDMA_DELAY_TX_PTIME_SHIFT;
++
++ cur = min_t(u32, cur_profile.pkts, MTK_PDMA_DELAY_PINT_MASK);
++ val |= cur << MTK_PDMA_DELAY_TX_PINT_SHIFT;
++
++ mtk_w32(eth, val, MTK_PDMA_DELAY_INT);
++ mtk_w32(eth, val, MTK_QDMA_DELAY_INT);
++
++ spin_unlock_bh(&eth->dim_lock);
++
++ dim->state = DIM_START_MEASURE;
++}
++
+ static int mtk_hw_init(struct mtk_eth *eth)
+ {
+ int i, val, ret;
+@@ -2398,9 +2476,6 @@ static int mtk_hw_init(struct mtk_eth *e
+ goto err_disable_pm;
+ }
+
+- /* enable interrupt delay for RX */
+- mtk_w32(eth, MTK_PDMA_DELAY_RX_DELAY, MTK_PDMA_DELAY_INT);
+-
+ /* disable delay and normal interrupt */
+ mtk_tx_irq_disable(eth, ~0);
+ mtk_rx_irq_disable(eth, ~0);
+@@ -2439,11 +2514,11 @@ static int mtk_hw_init(struct mtk_eth *e
+ /* Enable RX VLan Offloading */
+ mtk_w32(eth, 1, MTK_CDMP_EG_CTRL);
+
+- /* enable interrupt delay for RX */
+- mtk_w32(eth, MTK_PDMA_DELAY_RX_DELAY, MTK_PDMA_DELAY_INT);
++ /* set interrupt delays based on current Net DIM sample */
++ mtk_dim_rx(&eth->rx_dim.work);
++ mtk_dim_tx(&eth->tx_dim.work);
+
+ /* disable delay and normal interrupt */
+- mtk_w32(eth, 0, MTK_QDMA_DELAY_INT);
+ mtk_tx_irq_disable(eth, ~0);
+ mtk_rx_irq_disable(eth, ~0);
+
+@@ -2948,6 +3023,13 @@ static int mtk_probe(struct platform_dev
+ spin_lock_init(&eth->page_lock);
+ spin_lock_init(&eth->tx_irq_lock);
+ spin_lock_init(&eth->rx_irq_lock);
++ spin_lock_init(&eth->dim_lock);
++
++ eth->rx_dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
++ INIT_WORK(&eth->rx_dim.work, mtk_dim_rx);
++
++ eth->tx_dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
++ INIT_WORK(&eth->tx_dim.work, mtk_dim_tx);
+
+ if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
+ eth->ethsys = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
+--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
++++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
+@@ -16,6 +16,7 @@
+ #include <linux/refcount.h>
+ #include <linux/phylink.h>
+ #include <linux/rhashtable.h>
++#include <linux/dim.h>
+ #include "mtk_ppe.h"
+
+ #define MTK_QDMA_PAGE_SIZE 2048
+@@ -136,13 +137,18 @@
+
+ /* PDMA Delay Interrupt Register */
+ #define MTK_PDMA_DELAY_INT 0xa0c
++#define MTK_PDMA_DELAY_RX_MASK GENMASK(15, 0)
+ #define MTK_PDMA_DELAY_RX_EN BIT(15)
+-#define MTK_PDMA_DELAY_RX_PINT 4
+ #define MTK_PDMA_DELAY_RX_PINT_SHIFT 8
+-#define MTK_PDMA_DELAY_RX_PTIME 4
+-#define MTK_PDMA_DELAY_RX_DELAY \
+- (MTK_PDMA_DELAY_RX_EN | MTK_PDMA_DELAY_RX_PTIME | \
+- (MTK_PDMA_DELAY_RX_PINT << MTK_PDMA_DELAY_RX_PINT_SHIFT))
++#define MTK_PDMA_DELAY_RX_PTIME_SHIFT 0
++
++#define MTK_PDMA_DELAY_TX_MASK GENMASK(31, 16)
++#define MTK_PDMA_DELAY_TX_EN BIT(31)
++#define MTK_PDMA_DELAY_TX_PINT_SHIFT 24
++#define MTK_PDMA_DELAY_TX_PTIME_SHIFT 16
++
++#define MTK_PDMA_DELAY_PINT_MASK 0x7f
++#define MTK_PDMA_DELAY_PTIME_MASK 0xff
+
+ /* PDMA Interrupt Status Register */
+ #define MTK_PDMA_INT_STATUS 0xa20
+@@ -224,6 +230,7 @@
+ /* QDMA Interrupt Status Register */
+ #define MTK_QDMA_INT_STATUS 0x1A18
+ #define MTK_RX_DONE_DLY BIT(30)
++#define MTK_TX_DONE_DLY BIT(28)
+ #define MTK_RX_DONE_INT3 BIT(19)
+ #define MTK_RX_DONE_INT2 BIT(18)
+ #define MTK_RX_DONE_INT1 BIT(17)
+@@ -233,8 +240,7 @@
+ #define MTK_TX_DONE_INT1 BIT(1)
+ #define MTK_TX_DONE_INT0 BIT(0)
+ #define MTK_RX_DONE_INT MTK_RX_DONE_DLY
+-#define MTK_TX_DONE_INT (MTK_TX_DONE_INT0 | MTK_TX_DONE_INT1 | \
+- MTK_TX_DONE_INT2 | MTK_TX_DONE_INT3)
++#define MTK_TX_DONE_INT MTK_TX_DONE_DLY
+
+ /* QDMA Interrupt grouping registers */
+ #define MTK_QDMA_INT_GRP1 0x1a20
+@@ -843,6 +849,7 @@ struct mtk_sgmii {
+ * @page_lock: Make sure that register operations are atomic
+ * @tx_irq__lock: Make sure that IRQ register operations are atomic
+ * @rx_irq__lock: Make sure that IRQ register operations are atomic
++ * @dim_lock: Make sure that Net DIM operations are atomic
+ * @dummy_dev: we run 2 netdevs on 1 physical DMA ring and need a
+ * dummy for NAPI to work
+ * @netdev: The netdev instances
+@@ -861,6 +868,14 @@ struct mtk_sgmii {
+ * @rx_ring_qdma: Pointer to the memory holding info about the QDMA RX ring
+ * @tx_napi: The TX NAPI struct
+ * @rx_napi: The RX NAPI struct
++ * @rx_events: Net DIM RX event counter
++ * @rx_packets: Net DIM RX packet counter
++ * @rx_bytes: Net DIM RX byte counter
++ * @rx_dim: Net DIM RX context
++ * @tx_events: Net DIM TX event counter
++ * @tx_packets: Net DIM TX packet counter
++ * @tx_bytes: Net DIM TX byte counter
++ * @tx_dim: Net DIM TX context
+ * @scratch_ring: Newer SoCs need memory for a second HW managed TX ring
+ * @phy_scratch_ring: physical address of scratch_ring
+ * @scratch_head: The scratch memory that scratch_ring points to.
+@@ -905,6 +920,18 @@ struct mtk_eth {
+
+ const struct mtk_soc_data *soc;
+
++ spinlock_t dim_lock;
++
++ u32 rx_events;
++ u32 rx_packets;
++ u32 rx_bytes;
++ struct dim rx_dim;
++
++ u32 tx_events;
++ u32 tx_packets;
++ u32 tx_bytes;
++ struct dim tx_dim;
++
+ u32 tx_int_mask_reg;
+ u32 tx_int_status_reg;
+ u32 rx_dma_l4_valid;