aboutsummaryrefslogtreecommitdiffstats
path: root/target/linux/generic/pending-5.10/701-00-net-ethernet-mtk_eth_soc-add-support-for-coherent-DM.patch
diff options
context:
space:
mode:
authorFelix Fietkau <nbd@nbd.name>2021-11-30 11:55:24 +0100
committerFelix Fietkau <nbd@nbd.name>2022-05-06 19:57:57 +0200
commit77e123340f0b5490905e27ddc92f0dff8ed017a5 (patch)
tree36398f65a267775cb961539a2a3ad479a6054648 /target/linux/generic/pending-5.10/701-00-net-ethernet-mtk_eth_soc-add-support-for-coherent-DM.patch
parent5ff900e0ade775062bf888b447893aefa1a37146 (diff)
downloadupstream-77e123340f0b5490905e27ddc92f0dff8ed017a5.tar.gz
upstream-77e123340f0b5490905e27ddc92f0dff8ed017a5.tar.bz2
upstream-77e123340f0b5490905e27ddc92f0dff8ed017a5.zip
mediatek: add patches for MT7622 WED (wireless ethernet dispatch)
This series also contains other improvement for hardware flow offload support Signed-off-by: Felix Fietkau <nbd@nbd.name> (cherry-picked from commit 0f029b3d2b505b40aca9a24a002838ed1060f83d)
Diffstat (limited to 'target/linux/generic/pending-5.10/701-00-net-ethernet-mtk_eth_soc-add-support-for-coherent-DM.patch')
-rw-r--r--target/linux/generic/pending-5.10/701-00-net-ethernet-mtk_eth_soc-add-support-for-coherent-DM.patch327
1 files changed, 327 insertions, 0 deletions
diff --git a/target/linux/generic/pending-5.10/701-00-net-ethernet-mtk_eth_soc-add-support-for-coherent-DM.patch b/target/linux/generic/pending-5.10/701-00-net-ethernet-mtk_eth_soc-add-support-for-coherent-DM.patch
new file mode 100644
index 0000000000..245a996c96
--- /dev/null
+++ b/target/linux/generic/pending-5.10/701-00-net-ethernet-mtk_eth_soc-add-support-for-coherent-DM.patch
@@ -0,0 +1,327 @@
+From: Felix Fietkau <nbd@nbd.name>
+Date: Sat, 5 Feb 2022 17:59:07 +0100
+Subject: [PATCH] net: ethernet: mtk_eth_soc: add support for coherent
+ DMA
+
+It improves performance by eliminating the need for a cache flush on rx and tx
+In preparation for supporting WED (Wireless Ethernet Dispatch), also add a
+function for disabling coherent DMA at runtime.
+
+Signed-off-by: Felix Fietkau <nbd@nbd.name>
+---
+
+--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
++++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+@@ -9,6 +9,7 @@
+ #include <linux/of_device.h>
+ #include <linux/of_mdio.h>
+ #include <linux/of_net.h>
++#include <linux/of_address.h>
+ #include <linux/mfd/syscon.h>
+ #include <linux/regmap.h>
+ #include <linux/clk.h>
+@@ -821,7 +822,7 @@ static int mtk_init_fq_dma(struct mtk_et
+ dma_addr_t dma_addr;
+ int i;
+
+- eth->scratch_ring = dma_alloc_coherent(eth->dev,
++ eth->scratch_ring = dma_alloc_coherent(eth->dma_dev,
+ cnt * sizeof(struct mtk_tx_dma),
+ &eth->phy_scratch_ring,
+ GFP_ATOMIC);
+@@ -833,10 +834,10 @@ static int mtk_init_fq_dma(struct mtk_et
+ if (unlikely(!eth->scratch_head))
+ return -ENOMEM;
+
+- dma_addr = dma_map_single(eth->dev,
++ dma_addr = dma_map_single(eth->dma_dev,
+ eth->scratch_head, cnt * MTK_QDMA_PAGE_SIZE,
+ DMA_FROM_DEVICE);
+- if (unlikely(dma_mapping_error(eth->dev, dma_addr)))
++ if (unlikely(dma_mapping_error(eth->dma_dev, dma_addr)))
+ return -ENOMEM;
+
+ phy_ring_tail = eth->phy_scratch_ring +
+@@ -890,26 +891,26 @@ static void mtk_tx_unmap(struct mtk_eth
+ {
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
+ if (tx_buf->flags & MTK_TX_FLAGS_SINGLE0) {
+- dma_unmap_single(eth->dev,
++ dma_unmap_single(eth->dma_dev,
+ dma_unmap_addr(tx_buf, dma_addr0),
+ dma_unmap_len(tx_buf, dma_len0),
+ DMA_TO_DEVICE);
+ } else if (tx_buf->flags & MTK_TX_FLAGS_PAGE0) {
+- dma_unmap_page(eth->dev,
++ dma_unmap_page(eth->dma_dev,
+ dma_unmap_addr(tx_buf, dma_addr0),
+ dma_unmap_len(tx_buf, dma_len0),
+ DMA_TO_DEVICE);
+ }
+ } else {
+ if (dma_unmap_len(tx_buf, dma_len0)) {
+- dma_unmap_page(eth->dev,
++ dma_unmap_page(eth->dma_dev,
+ dma_unmap_addr(tx_buf, dma_addr0),
+ dma_unmap_len(tx_buf, dma_len0),
+ DMA_TO_DEVICE);
+ }
+
+ if (dma_unmap_len(tx_buf, dma_len1)) {
+- dma_unmap_page(eth->dev,
++ dma_unmap_page(eth->dma_dev,
+ dma_unmap_addr(tx_buf, dma_addr1),
+ dma_unmap_len(tx_buf, dma_len1),
+ DMA_TO_DEVICE);
+@@ -987,9 +988,9 @@ static int mtk_tx_map(struct sk_buff *sk
+ if (skb_vlan_tag_present(skb))
+ txd4 |= TX_DMA_INS_VLAN | skb_vlan_tag_get(skb);
+
+- mapped_addr = dma_map_single(eth->dev, skb->data,
++ mapped_addr = dma_map_single(eth->dma_dev, skb->data,
+ skb_headlen(skb), DMA_TO_DEVICE);
+- if (unlikely(dma_mapping_error(eth->dev, mapped_addr)))
++ if (unlikely(dma_mapping_error(eth->dma_dev, mapped_addr)))
+ return -ENOMEM;
+
+ WRITE_ONCE(itxd->txd1, mapped_addr);
+@@ -1028,10 +1029,10 @@ static int mtk_tx_map(struct sk_buff *sk
+
+
+ frag_map_size = min(frag_size, MTK_TX_DMA_BUF_LEN);
+- mapped_addr = skb_frag_dma_map(eth->dev, frag, offset,
++ mapped_addr = skb_frag_dma_map(eth->dma_dev, frag, offset,
+ frag_map_size,
+ DMA_TO_DEVICE);
+- if (unlikely(dma_mapping_error(eth->dev, mapped_addr)))
++ if (unlikely(dma_mapping_error(eth->dma_dev, mapped_addr)))
+ goto err_dma;
+
+ if (i == nr_frags - 1 &&
+@@ -1309,18 +1310,18 @@ static int mtk_poll_rx(struct napi_struc
+ netdev->stats.rx_dropped++;
+ goto release_desc;
+ }
+- dma_addr = dma_map_single(eth->dev,
++ dma_addr = dma_map_single(eth->dma_dev,
+ new_data + NET_SKB_PAD +
+ eth->ip_align,
+ ring->buf_size,
+ DMA_FROM_DEVICE);
+- if (unlikely(dma_mapping_error(eth->dev, dma_addr))) {
++ if (unlikely(dma_mapping_error(eth->dma_dev, dma_addr))) {
+ skb_free_frag(new_data);
+ netdev->stats.rx_dropped++;
+ goto release_desc;
+ }
+
+- dma_unmap_single(eth->dev, trxd.rxd1,
++ dma_unmap_single(eth->dma_dev, trxd.rxd1,
+ ring->buf_size, DMA_FROM_DEVICE);
+
+ /* receive data */
+@@ -1593,7 +1594,7 @@ static int mtk_tx_alloc(struct mtk_eth *
+ if (!ring->buf)
+ goto no_tx_mem;
+
+- ring->dma = dma_alloc_coherent(eth->dev, MTK_DMA_SIZE * sz,
++ ring->dma = dma_alloc_coherent(eth->dma_dev, MTK_DMA_SIZE * sz,
+ &ring->phys, GFP_ATOMIC);
+ if (!ring->dma)
+ goto no_tx_mem;
+@@ -1611,7 +1612,7 @@ static int mtk_tx_alloc(struct mtk_eth *
+ * descriptors in ring->dma_pdma.
+ */
+ if (!MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
+- ring->dma_pdma = dma_alloc_coherent(eth->dev, MTK_DMA_SIZE * sz,
++ ring->dma_pdma = dma_alloc_coherent(eth->dma_dev, MTK_DMA_SIZE * sz,
+ &ring->phys_pdma,
+ GFP_ATOMIC);
+ if (!ring->dma_pdma)
+@@ -1670,7 +1671,7 @@ static void mtk_tx_clean(struct mtk_eth
+ }
+
+ if (ring->dma) {
+- dma_free_coherent(eth->dev,
++ dma_free_coherent(eth->dma_dev,
+ MTK_DMA_SIZE * sizeof(*ring->dma),
+ ring->dma,
+ ring->phys);
+@@ -1678,7 +1679,7 @@ static void mtk_tx_clean(struct mtk_eth
+ }
+
+ if (ring->dma_pdma) {
+- dma_free_coherent(eth->dev,
++ dma_free_coherent(eth->dma_dev,
+ MTK_DMA_SIZE * sizeof(*ring->dma_pdma),
+ ring->dma_pdma,
+ ring->phys_pdma);
+@@ -1723,18 +1724,18 @@ static int mtk_rx_alloc(struct mtk_eth *
+ return -ENOMEM;
+ }
+
+- ring->dma = dma_alloc_coherent(eth->dev,
++ ring->dma = dma_alloc_coherent(eth->dma_dev,
+ rx_dma_size * sizeof(*ring->dma),
+ &ring->phys, GFP_ATOMIC);
+ if (!ring->dma)
+ return -ENOMEM;
+
+ for (i = 0; i < rx_dma_size; i++) {
+- dma_addr_t dma_addr = dma_map_single(eth->dev,
++ dma_addr_t dma_addr = dma_map_single(eth->dma_dev,
+ ring->data[i] + NET_SKB_PAD + eth->ip_align,
+ ring->buf_size,
+ DMA_FROM_DEVICE);
+- if (unlikely(dma_mapping_error(eth->dev, dma_addr)))
++ if (unlikely(dma_mapping_error(eth->dma_dev, dma_addr)))
+ return -ENOMEM;
+ ring->dma[i].rxd1 = (unsigned int)dma_addr;
+
+@@ -1770,7 +1771,7 @@ static void mtk_rx_clean(struct mtk_eth
+ continue;
+ if (!ring->dma[i].rxd1)
+ continue;
+- dma_unmap_single(eth->dev,
++ dma_unmap_single(eth->dma_dev,
+ ring->dma[i].rxd1,
+ ring->buf_size,
+ DMA_FROM_DEVICE);
+@@ -1781,7 +1782,7 @@ static void mtk_rx_clean(struct mtk_eth
+ }
+
+ if (ring->dma) {
+- dma_free_coherent(eth->dev,
++ dma_free_coherent(eth->dma_dev,
+ ring->dma_size * sizeof(*ring->dma),
+ ring->dma,
+ ring->phys);
+@@ -2134,7 +2135,7 @@ static void mtk_dma_free(struct mtk_eth
+ if (eth->netdev[i])
+ netdev_reset_queue(eth->netdev[i]);
+ if (eth->scratch_ring) {
+- dma_free_coherent(eth->dev,
++ dma_free_coherent(eth->dma_dev,
+ MTK_DMA_SIZE * sizeof(struct mtk_tx_dma),
+ eth->scratch_ring,
+ eth->phy_scratch_ring);
+@@ -2482,6 +2483,8 @@ static void mtk_dim_tx(struct work_struc
+
+ static int mtk_hw_init(struct mtk_eth *eth)
+ {
++ u32 dma_mask = ETHSYS_DMA_AG_MAP_PDMA | ETHSYS_DMA_AG_MAP_QDMA |
++ ETHSYS_DMA_AG_MAP_PPE;
+ int i, val, ret;
+
+ if (test_and_set_bit(MTK_HW_INIT, &eth->state))
+@@ -2494,6 +2497,10 @@ static int mtk_hw_init(struct mtk_eth *e
+ if (ret)
+ goto err_disable_pm;
+
++ if (eth->ethsys)
++ regmap_update_bits(eth->ethsys, ETHSYS_DMA_AG_MAP, dma_mask,
++ of_dma_is_coherent(eth->dma_dev->of_node) * dma_mask);
++
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
+ ret = device_reset(eth->dev);
+ if (ret) {
+@@ -3043,6 +3050,35 @@ free_netdev:
+ return err;
+ }
+
++void mtk_eth_set_dma_device(struct mtk_eth *eth, struct device *dma_dev)
++{
++ struct net_device *dev, *tmp;
++ LIST_HEAD(dev_list);
++ int i;
++
++ rtnl_lock();
++
++ for (i = 0; i < MTK_MAC_COUNT; i++) {
++ dev = eth->netdev[i];
++
++ if (!dev || !(dev->flags & IFF_UP))
++ continue;
++
++ list_add_tail(&dev->close_list, &dev_list);
++ }
++
++ dev_close_many(&dev_list, false);
++
++ eth->dma_dev = dma_dev;
++
++ list_for_each_entry_safe(dev, tmp, &dev_list, close_list) {
++ list_del_init(&dev->close_list);
++ dev_open(dev, NULL);
++ }
++
++ rtnl_unlock();
++}
++
+ static int mtk_probe(struct platform_device *pdev)
+ {
+ struct device_node *mac_np;
+@@ -3056,6 +3092,7 @@ static int mtk_probe(struct platform_dev
+ eth->soc = of_device_get_match_data(&pdev->dev);
+
+ eth->dev = &pdev->dev;
++ eth->dma_dev = &pdev->dev;
+ eth->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(eth->base))
+ return PTR_ERR(eth->base);
+@@ -3104,6 +3141,16 @@ static int mtk_probe(struct platform_dev
+ }
+ }
+
++ if (of_dma_is_coherent(pdev->dev.of_node)) {
++ struct regmap *cci;
++
++ cci = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
++ "mediatek,cci-control");
++ /* enable CPU/bus coherency */
++ if (!IS_ERR(cci))
++ regmap_write(cci, 0, 3);
++ }
++
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_SGMII)) {
+ eth->sgmii = devm_kzalloc(eth->dev, sizeof(*eth->sgmii),
+ GFP_KERNEL);
+--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
++++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
+@@ -456,6 +456,12 @@
+ #define RSTCTRL_FE BIT(6)
+ #define RSTCTRL_PPE BIT(31)
+
++/* ethernet dma channel agent map */
++#define ETHSYS_DMA_AG_MAP 0x408
++#define ETHSYS_DMA_AG_MAP_PDMA BIT(0)
++#define ETHSYS_DMA_AG_MAP_QDMA BIT(1)
++#define ETHSYS_DMA_AG_MAP_PPE BIT(2)
++
+ /* SGMII subsystem config registers */
+ /* Register to auto-negotiation restart */
+ #define SGMSYS_PCS_CONTROL_1 0x0
+@@ -873,6 +879,7 @@ struct mtk_sgmii {
+ /* struct mtk_eth - This is the main datasructure for holding the state
+ * of the driver
+ * @dev: The device pointer
++ * @dev: The device pointer used for dma mapping/alloc
+ * @base: The mapped register i/o base
+ * @page_lock: Make sure that register operations are atomic
+ * @tx_irq__lock: Make sure that IRQ register operations are atomic
+@@ -916,6 +923,7 @@ struct mtk_sgmii {
+
+ struct mtk_eth {
+ struct device *dev;
++ struct device *dma_dev;
+ void __iomem *base;
+ spinlock_t page_lock;
+ spinlock_t tx_irq_lock;
+@@ -1014,6 +1022,7 @@ int mtk_gmac_rgmii_path_setup(struct mtk
+ int mtk_eth_offload_init(struct mtk_eth *eth);
+ int mtk_eth_setup_tc(struct net_device *dev, enum tc_setup_type type,
+ void *type_data);
++void mtk_eth_set_dma_device(struct mtk_eth *eth, struct device *dma_dev);
+
+
+ #endif /* MTK_ETH_H */