aboutsummaryrefslogtreecommitdiffstats
path: root/target/linux/generic/pending-5.10/701-00-net-ethernet-mtk_eth_soc-add-support-for-coherent-DM.patch
diff options
context:
space:
mode:
Diffstat (limited to 'target/linux/generic/pending-5.10/701-00-net-ethernet-mtk_eth_soc-add-support-for-coherent-DM.patch')
-rw-r--r--target/linux/generic/pending-5.10/701-00-net-ethernet-mtk_eth_soc-add-support-for-coherent-DM.patch327
1 files changed, 0 insertions, 327 deletions
diff --git a/target/linux/generic/pending-5.10/701-00-net-ethernet-mtk_eth_soc-add-support-for-coherent-DM.patch b/target/linux/generic/pending-5.10/701-00-net-ethernet-mtk_eth_soc-add-support-for-coherent-DM.patch
deleted file mode 100644
index c7bb6c5e10..0000000000
--- a/target/linux/generic/pending-5.10/701-00-net-ethernet-mtk_eth_soc-add-support-for-coherent-DM.patch
+++ /dev/null
@@ -1,327 +0,0 @@
-From: Felix Fietkau <nbd@nbd.name>
-Date: Sat, 5 Feb 2022 17:59:07 +0100
-Subject: [PATCH] net: ethernet: mtk_eth_soc: add support for coherent
- DMA
-
-It improves performance by eliminating the need for a cache flush on rx and tx
-In preparation for supporting WED (Wireless Ethernet Dispatch), also add a
-function for disabling coherent DMA at runtime.
-
-Signed-off-by: Felix Fietkau <nbd@nbd.name>
----
-
---- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
-+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
-@@ -9,6 +9,7 @@
- #include <linux/of_device.h>
- #include <linux/of_mdio.h>
- #include <linux/of_net.h>
-+#include <linux/of_address.h>
- #include <linux/mfd/syscon.h>
- #include <linux/regmap.h>
- #include <linux/clk.h>
-@@ -833,7 +834,7 @@ static int mtk_init_fq_dma(struct mtk_et
- dma_addr_t dma_addr;
- int i;
-
-- eth->scratch_ring = dma_alloc_coherent(eth->dev,
-+ eth->scratch_ring = dma_alloc_coherent(eth->dma_dev,
- cnt * sizeof(struct mtk_tx_dma),
- &eth->phy_scratch_ring,
- GFP_ATOMIC);
-@@ -845,10 +846,10 @@ static int mtk_init_fq_dma(struct mtk_et
- if (unlikely(!eth->scratch_head))
- return -ENOMEM;
-
-- dma_addr = dma_map_single(eth->dev,
-+ dma_addr = dma_map_single(eth->dma_dev,
- eth->scratch_head, cnt * MTK_QDMA_PAGE_SIZE,
- DMA_FROM_DEVICE);
-- if (unlikely(dma_mapping_error(eth->dev, dma_addr)))
-+ if (unlikely(dma_mapping_error(eth->dma_dev, dma_addr)))
- return -ENOMEM;
-
- phy_ring_tail = eth->phy_scratch_ring +
-@@ -902,26 +903,26 @@ static void mtk_tx_unmap(struct mtk_eth
- {
- if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
- if (tx_buf->flags & MTK_TX_FLAGS_SINGLE0) {
-- dma_unmap_single(eth->dev,
-+ dma_unmap_single(eth->dma_dev,
- dma_unmap_addr(tx_buf, dma_addr0),
- dma_unmap_len(tx_buf, dma_len0),
- DMA_TO_DEVICE);
- } else if (tx_buf->flags & MTK_TX_FLAGS_PAGE0) {
-- dma_unmap_page(eth->dev,
-+ dma_unmap_page(eth->dma_dev,
- dma_unmap_addr(tx_buf, dma_addr0),
- dma_unmap_len(tx_buf, dma_len0),
- DMA_TO_DEVICE);
- }
- } else {
- if (dma_unmap_len(tx_buf, dma_len0)) {
-- dma_unmap_page(eth->dev,
-+ dma_unmap_page(eth->dma_dev,
- dma_unmap_addr(tx_buf, dma_addr0),
- dma_unmap_len(tx_buf, dma_len0),
- DMA_TO_DEVICE);
- }
-
- if (dma_unmap_len(tx_buf, dma_len1)) {
-- dma_unmap_page(eth->dev,
-+ dma_unmap_page(eth->dma_dev,
- dma_unmap_addr(tx_buf, dma_addr1),
- dma_unmap_len(tx_buf, dma_len1),
- DMA_TO_DEVICE);
-@@ -999,9 +1000,9 @@ static int mtk_tx_map(struct sk_buff *sk
- if (skb_vlan_tag_present(skb))
- txd4 |= TX_DMA_INS_VLAN | skb_vlan_tag_get(skb);
-
-- mapped_addr = dma_map_single(eth->dev, skb->data,
-+ mapped_addr = dma_map_single(eth->dma_dev, skb->data,
- skb_headlen(skb), DMA_TO_DEVICE);
-- if (unlikely(dma_mapping_error(eth->dev, mapped_addr)))
-+ if (unlikely(dma_mapping_error(eth->dma_dev, mapped_addr)))
- return -ENOMEM;
-
- WRITE_ONCE(itxd->txd1, mapped_addr);
-@@ -1040,10 +1041,10 @@ static int mtk_tx_map(struct sk_buff *sk
-
-
- frag_map_size = min(frag_size, MTK_TX_DMA_BUF_LEN);
-- mapped_addr = skb_frag_dma_map(eth->dev, frag, offset,
-+ mapped_addr = skb_frag_dma_map(eth->dma_dev, frag, offset,
- frag_map_size,
- DMA_TO_DEVICE);
-- if (unlikely(dma_mapping_error(eth->dev, mapped_addr)))
-+ if (unlikely(dma_mapping_error(eth->dma_dev, mapped_addr)))
- goto err_dma;
-
- if (i == nr_frags - 1 &&
-@@ -1324,18 +1325,18 @@ static int mtk_poll_rx(struct napi_struc
- netdev->stats.rx_dropped++;
- goto release_desc;
- }
-- dma_addr = dma_map_single(eth->dev,
-+ dma_addr = dma_map_single(eth->dma_dev,
- new_data + NET_SKB_PAD +
- eth->ip_align,
- ring->buf_size,
- DMA_FROM_DEVICE);
-- if (unlikely(dma_mapping_error(eth->dev, dma_addr))) {
-+ if (unlikely(dma_mapping_error(eth->dma_dev, dma_addr))) {
- skb_free_frag(new_data);
- netdev->stats.rx_dropped++;
- goto release_desc;
- }
-
-- dma_unmap_single(eth->dev, trxd.rxd1,
-+ dma_unmap_single(eth->dma_dev, trxd.rxd1,
- ring->buf_size, DMA_FROM_DEVICE);
-
- /* receive data */
-@@ -1608,7 +1609,7 @@ static int mtk_tx_alloc(struct mtk_eth *
- if (!ring->buf)
- goto no_tx_mem;
-
-- ring->dma = dma_alloc_coherent(eth->dev, MTK_DMA_SIZE * sz,
-+ ring->dma = dma_alloc_coherent(eth->dma_dev, MTK_DMA_SIZE * sz,
- &ring->phys, GFP_ATOMIC);
- if (!ring->dma)
- goto no_tx_mem;
-@@ -1626,7 +1627,7 @@ static int mtk_tx_alloc(struct mtk_eth *
- * descriptors in ring->dma_pdma.
- */
- if (!MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
-- ring->dma_pdma = dma_alloc_coherent(eth->dev, MTK_DMA_SIZE * sz,
-+ ring->dma_pdma = dma_alloc_coherent(eth->dma_dev, MTK_DMA_SIZE * sz,
- &ring->phys_pdma,
- GFP_ATOMIC);
- if (!ring->dma_pdma)
-@@ -1685,7 +1686,7 @@ static void mtk_tx_clean(struct mtk_eth
- }
-
- if (ring->dma) {
-- dma_free_coherent(eth->dev,
-+ dma_free_coherent(eth->dma_dev,
- MTK_DMA_SIZE * sizeof(*ring->dma),
- ring->dma,
- ring->phys);
-@@ -1693,7 +1694,7 @@ static void mtk_tx_clean(struct mtk_eth
- }
-
- if (ring->dma_pdma) {
-- dma_free_coherent(eth->dev,
-+ dma_free_coherent(eth->dma_dev,
- MTK_DMA_SIZE * sizeof(*ring->dma_pdma),
- ring->dma_pdma,
- ring->phys_pdma);
-@@ -1741,18 +1742,18 @@ static int mtk_rx_alloc(struct mtk_eth *
- return -ENOMEM;
- }
-
-- ring->dma = dma_alloc_coherent(eth->dev,
-+ ring->dma = dma_alloc_coherent(eth->dma_dev,
- rx_dma_size * sizeof(*ring->dma),
- &ring->phys, GFP_ATOMIC);
- if (!ring->dma)
- return -ENOMEM;
-
- for (i = 0; i < rx_dma_size; i++) {
-- dma_addr_t dma_addr = dma_map_single(eth->dev,
-+ dma_addr_t dma_addr = dma_map_single(eth->dma_dev,
- ring->data[i] + NET_SKB_PAD + eth->ip_align,
- ring->buf_size,
- DMA_FROM_DEVICE);
-- if (unlikely(dma_mapping_error(eth->dev, dma_addr)))
-+ if (unlikely(dma_mapping_error(eth->dma_dev, dma_addr)))
- return -ENOMEM;
- ring->dma[i].rxd1 = (unsigned int)dma_addr;
-
-@@ -1788,7 +1789,7 @@ static void mtk_rx_clean(struct mtk_eth
- continue;
- if (!ring->dma[i].rxd1)
- continue;
-- dma_unmap_single(eth->dev,
-+ dma_unmap_single(eth->dma_dev,
- ring->dma[i].rxd1,
- ring->buf_size,
- DMA_FROM_DEVICE);
-@@ -1799,7 +1800,7 @@ static void mtk_rx_clean(struct mtk_eth
- }
-
- if (ring->dma) {
-- dma_free_coherent(eth->dev,
-+ dma_free_coherent(eth->dma_dev,
- ring->dma_size * sizeof(*ring->dma),
- ring->dma,
- ring->phys);
-@@ -2155,7 +2156,7 @@ static void mtk_dma_free(struct mtk_eth
- if (eth->netdev[i])
- netdev_reset_queue(eth->netdev[i]);
- if (eth->scratch_ring) {
-- dma_free_coherent(eth->dev,
-+ dma_free_coherent(eth->dma_dev,
- MTK_DMA_SIZE * sizeof(struct mtk_tx_dma),
- eth->scratch_ring,
- eth->phy_scratch_ring);
-@@ -2507,6 +2508,8 @@ static void mtk_dim_tx(struct work_struc
-
- static int mtk_hw_init(struct mtk_eth *eth)
- {
-+ u32 dma_mask = ETHSYS_DMA_AG_MAP_PDMA | ETHSYS_DMA_AG_MAP_QDMA |
-+ ETHSYS_DMA_AG_MAP_PPE;
- int i, val, ret;
-
- if (test_and_set_bit(MTK_HW_INIT, &eth->state))
-@@ -2519,6 +2522,10 @@ static int mtk_hw_init(struct mtk_eth *e
- if (ret)
- goto err_disable_pm;
-
-+ if (eth->ethsys)
-+ regmap_update_bits(eth->ethsys, ETHSYS_DMA_AG_MAP, dma_mask,
-+ of_dma_is_coherent(eth->dma_dev->of_node) * dma_mask);
-+
- if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
- ret = device_reset(eth->dev);
- if (ret) {
-@@ -3068,6 +3075,35 @@ free_netdev:
- return err;
- }
-
-+void mtk_eth_set_dma_device(struct mtk_eth *eth, struct device *dma_dev)
-+{
-+ struct net_device *dev, *tmp;
-+ LIST_HEAD(dev_list);
-+ int i;
-+
-+ rtnl_lock();
-+
-+ for (i = 0; i < MTK_MAC_COUNT; i++) {
-+ dev = eth->netdev[i];
-+
-+ if (!dev || !(dev->flags & IFF_UP))
-+ continue;
-+
-+ list_add_tail(&dev->close_list, &dev_list);
-+ }
-+
-+ dev_close_many(&dev_list, false);
-+
-+ eth->dma_dev = dma_dev;
-+
-+ list_for_each_entry_safe(dev, tmp, &dev_list, close_list) {
-+ list_del_init(&dev->close_list);
-+ dev_open(dev, NULL);
-+ }
-+
-+ rtnl_unlock();
-+}
-+
- static int mtk_probe(struct platform_device *pdev)
- {
- struct device_node *mac_np;
-@@ -3081,6 +3117,7 @@ static int mtk_probe(struct platform_dev
- eth->soc = of_device_get_match_data(&pdev->dev);
-
- eth->dev = &pdev->dev;
-+ eth->dma_dev = &pdev->dev;
- eth->base = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(eth->base))
- return PTR_ERR(eth->base);
-@@ -3129,6 +3166,16 @@ static int mtk_probe(struct platform_dev
- }
- }
-
-+ if (of_dma_is_coherent(pdev->dev.of_node)) {
-+ struct regmap *cci;
-+
-+ cci = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
-+ "mediatek,cci-control");
-+ /* enable CPU/bus coherency */
-+ if (!IS_ERR(cci))
-+ regmap_write(cci, 0, 3);
-+ }
-+
- if (MTK_HAS_CAPS(eth->soc->caps, MTK_SGMII)) {
- eth->sgmii = devm_kzalloc(eth->dev, sizeof(*eth->sgmii),
- GFP_KERNEL);
---- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
-+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
-@@ -457,6 +457,12 @@
- #define RSTCTRL_FE BIT(6)
- #define RSTCTRL_PPE BIT(31)
-
-+/* ethernet dma channel agent map */
-+#define ETHSYS_DMA_AG_MAP 0x408
-+#define ETHSYS_DMA_AG_MAP_PDMA BIT(0)
-+#define ETHSYS_DMA_AG_MAP_QDMA BIT(1)
-+#define ETHSYS_DMA_AG_MAP_PPE BIT(2)
-+
- /* SGMII subsystem config registers */
- /* Register to auto-negotiation restart */
- #define SGMSYS_PCS_CONTROL_1 0x0
-@@ -874,6 +880,7 @@ struct mtk_sgmii {
- /* struct mtk_eth - This is the main datasructure for holding the state
- * of the driver
- * @dev: The device pointer
-+ * @dev: The device pointer used for dma mapping/alloc
- * @base: The mapped register i/o base
- * @page_lock: Make sure that register operations are atomic
- * @tx_irq__lock: Make sure that IRQ register operations are atomic
-@@ -917,6 +924,7 @@ struct mtk_sgmii {
-
- struct mtk_eth {
- struct device *dev;
-+ struct device *dma_dev;
- void __iomem *base;
- spinlock_t page_lock;
- spinlock_t tx_irq_lock;
-@@ -1015,6 +1023,7 @@ int mtk_gmac_rgmii_path_setup(struct mtk
- int mtk_eth_offload_init(struct mtk_eth *eth);
- int mtk_eth_setup_tc(struct net_device *dev, enum tc_setup_type type,
- void *type_data);
-+void mtk_eth_set_dma_device(struct mtk_eth *eth, struct device *dma_dev);
-
-
- #endif /* MTK_ETH_H */