aboutsummaryrefslogtreecommitdiffstats
path: root/target/linux/generic/backport-5.15/706-03-v6.0-net-ethernet-mtk_eth_soc-add-xmit-XDP-support.patch
diff options
context:
space:
mode:
authorDaniel Golle <daniel@makrotopia.org>2022-08-29 08:54:41 +0200
committerDaniel Golle <daniel@makrotopia.org>2022-09-22 23:55:02 +0100
commitc93c5365c0eb78ba8b479a9fe0cc5ec96f773978 (patch)
treeb250435d0f0e8e66c56d0486d1151d4eb1fc827e /target/linux/generic/backport-5.15/706-03-v6.0-net-ethernet-mtk_eth_soc-add-xmit-XDP-support.patch
parent524f52a471495d2c4e764539e000cb699610de1f (diff)
downloadupstream-c93c5365c0eb78ba8b479a9fe0cc5ec96f773978.tar.gz
upstream-c93c5365c0eb78ba8b479a9fe0cc5ec96f773978.tar.bz2
upstream-c93c5365c0eb78ba8b479a9fe0cc5ec96f773978.zip
kernel: pick patches for MediaTek Ethernet from linux-next
Pick patches with several fixes and improvements, preparation for upcoming WED (TX) [1] as well as basic XDP support [2] with MediaTek's Filogic SoCs to the mtk_eth_soc driver. Also pick follow-up patch fixing Ethernet on MT7621 [3]. Tested on Bananapi BPi-R3 (MT7986), Bananapi BPi-R64 (MT7622), Bananapi BPi-R2 (MT7623), MikroTik RouterBoard M11G (MT7621). [1]: https://patchwork.kernel.org/project/netdevbpf/list/?series=662108&state=* [2]: https://patchwork.kernel.org/project/netdevbpf/list/?series=675368&state=* (the first part of the series adding wed nodes to mt7986a.dtsi was applied to the copy of mt7986a.dtsi in our tree) [3]: https://git.kernel.org/pub/scm/linux/kernel/git/netdev/net.git/commit/?id=5e69163d3b9931098922b3fc2f8e786af8c1f37e Signed-off-by: Daniel Golle <daniel@makrotopia.org>
Diffstat (limited to 'target/linux/generic/backport-5.15/706-03-v6.0-net-ethernet-mtk_eth_soc-add-xmit-XDP-support.patch')
-rw-r--r--target/linux/generic/backport-5.15/706-03-v6.0-net-ethernet-mtk_eth_soc-add-xmit-XDP-support.patch340
1 files changed, 340 insertions, 0 deletions
diff --git a/target/linux/generic/backport-5.15/706-03-v6.0-net-ethernet-mtk_eth_soc-add-xmit-XDP-support.patch b/target/linux/generic/backport-5.15/706-03-v6.0-net-ethernet-mtk_eth_soc-add-xmit-XDP-support.patch
new file mode 100644
index 0000000000..42b3ea215d
--- /dev/null
+++ b/target/linux/generic/backport-5.15/706-03-v6.0-net-ethernet-mtk_eth_soc-add-xmit-XDP-support.patch
@@ -0,0 +1,340 @@
+From 5886d26fd25bbe26130e3e5f7474b9b3e98a3469 Mon Sep 17 00:00:00 2001
+From: Lorenzo Bianconi <lorenzo@kernel.org>
+Date: Fri, 22 Jul 2022 09:19:39 +0200
+Subject: [PATCH] net: ethernet: mtk_eth_soc: add xmit XDP support
+
+Introduce XDP support for XDP_TX verdict and ndo_xdp_xmit function
+pointer.
+
+Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+---
+ drivers/net/ethernet/mediatek/mtk_eth_soc.c | 192 +++++++++++++++++---
+ drivers/net/ethernet/mediatek/mtk_eth_soc.h | 10 +-
+ 2 files changed, 180 insertions(+), 22 deletions(-)
+
+--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
++++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+@@ -987,15 +987,26 @@ static void mtk_tx_unmap(struct mtk_eth
+ }
+ }
+
+- tx_buf->flags = 0;
+- if (tx_buf->skb &&
+- (tx_buf->skb != (struct sk_buff *)MTK_DMA_DUMMY_DESC)) {
+- if (napi)
+- napi_consume_skb(tx_buf->skb, napi);
++ if (tx_buf->type == MTK_TYPE_SKB) {
++ if (tx_buf->data &&
++ tx_buf->data != (void *)MTK_DMA_DUMMY_DESC) {
++ struct sk_buff *skb = tx_buf->data;
++
++ if (napi)
++ napi_consume_skb(skb, napi);
++ else
++ dev_kfree_skb_any(skb);
++ }
++ } else if (tx_buf->data) {
++ struct xdp_frame *xdpf = tx_buf->data;
++
++ if (napi && tx_buf->type == MTK_TYPE_XDP_TX)
++ xdp_return_frame_rx_napi(xdpf);
+ else
+- dev_kfree_skb_any(tx_buf->skb);
++ xdp_return_frame(xdpf);
+ }
+- tx_buf->skb = NULL;
++ tx_buf->flags = 0;
++ tx_buf->data = NULL;
+ }
+
+ static void setup_tx_buf(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf,
+@@ -1012,7 +1023,7 @@ static void setup_tx_buf(struct mtk_eth
+ dma_unmap_addr_set(tx_buf, dma_addr1, mapped_addr);
+ dma_unmap_len_set(tx_buf, dma_len1, size);
+ } else {
+- tx_buf->skb = (struct sk_buff *)MTK_DMA_DUMMY_DESC;
++ tx_buf->data = (void *)MTK_DMA_DUMMY_DESC;
+ txd->txd1 = mapped_addr;
+ txd->txd2 = TX_DMA_PLEN0(size);
+ dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr);
+@@ -1188,7 +1199,7 @@ static int mtk_tx_map(struct sk_buff *sk
+ soc->txrx.txd_size);
+ if (new_desc)
+ memset(tx_buf, 0, sizeof(*tx_buf));
+- tx_buf->skb = (struct sk_buff *)MTK_DMA_DUMMY_DESC;
++ tx_buf->data = (void *)MTK_DMA_DUMMY_DESC;
+ tx_buf->flags |= MTK_TX_FLAGS_PAGE0;
+ tx_buf->flags |= (!mac->id) ? MTK_TX_FLAGS_FPORT0 :
+ MTK_TX_FLAGS_FPORT1;
+@@ -1202,7 +1213,8 @@ static int mtk_tx_map(struct sk_buff *sk
+ }
+
+ /* store skb to cleanup */
+- itx_buf->skb = skb;
++ itx_buf->type = MTK_TYPE_SKB;
++ itx_buf->data = skb;
+
+ if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
+ if (k & 0x1)
+@@ -1414,13 +1426,14 @@ static struct page_pool *mtk_create_page
+ .pool_size = size,
+ .nid = NUMA_NO_NODE,
+ .dev = eth->dma_dev,
+- .dma_dir = DMA_FROM_DEVICE,
+ .offset = MTK_PP_HEADROOM,
+ .max_len = MTK_PP_MAX_BUF_SIZE,
+ };
+ struct page_pool *pp;
+ int err;
+
++ pp_params.dma_dir = rcu_access_pointer(eth->prog) ? DMA_BIDIRECTIONAL
++ : DMA_FROM_DEVICE;
+ pp = page_pool_create(&pp_params);
+ if (IS_ERR(pp))
+ return pp;
+@@ -1466,6 +1479,122 @@ static void mtk_rx_put_buff(struct mtk_r
+ skb_free_frag(data);
+ }
+
++static int mtk_xdp_submit_frame(struct mtk_eth *eth, struct xdp_frame *xdpf,
++ struct net_device *dev, bool dma_map)
++{
++ const struct mtk_soc_data *soc = eth->soc;
++ struct mtk_tx_ring *ring = &eth->tx_ring;
++ struct mtk_tx_dma_desc_info txd_info = {
++ .size = xdpf->len,
++ .first = true,
++ .last = true,
++ };
++ struct mtk_mac *mac = netdev_priv(dev);
++ struct mtk_tx_dma *txd, *txd_pdma;
++ int err = 0, index = 0, n_desc = 1;
++ struct mtk_tx_buf *tx_buf;
++
++ if (unlikely(test_bit(MTK_RESETTING, &eth->state)))
++ return -EBUSY;
++
++ if (unlikely(atomic_read(&ring->free_count) <= 1))
++ return -EBUSY;
++
++ spin_lock(&eth->page_lock);
++
++ txd = ring->next_free;
++ if (txd == ring->last_free) {
++ err = -ENOMEM;
++ goto out;
++ }
++
++ tx_buf = mtk_desc_to_tx_buf(ring, txd, soc->txrx.txd_size);
++ memset(tx_buf, 0, sizeof(*tx_buf));
++
++ if (dma_map) { /* ndo_xdp_xmit */
++ txd_info.addr = dma_map_single(eth->dma_dev, xdpf->data,
++ txd_info.size, DMA_TO_DEVICE);
++ if (unlikely(dma_mapping_error(eth->dma_dev, txd_info.addr))) {
++ err = -ENOMEM;
++ goto out;
++ }
++ tx_buf->flags |= MTK_TX_FLAGS_SINGLE0;
++ } else {
++ struct page *page = virt_to_head_page(xdpf->data);
++
++ txd_info.addr = page_pool_get_dma_addr(page) +
++ sizeof(*xdpf) + xdpf->headroom;
++ dma_sync_single_for_device(eth->dma_dev, txd_info.addr,
++ txd_info.size,
++ DMA_BIDIRECTIONAL);
++ }
++ mtk_tx_set_dma_desc(dev, txd, &txd_info);
++
++ tx_buf->flags |= !mac->id ? MTK_TX_FLAGS_FPORT0 : MTK_TX_FLAGS_FPORT1;
++
++ txd_pdma = qdma_to_pdma(ring, txd);
++ setup_tx_buf(eth, tx_buf, txd_pdma, txd_info.addr, txd_info.size,
++ index++);
++
++ /* store xdpf for cleanup */
++ tx_buf->type = dma_map ? MTK_TYPE_XDP_NDO : MTK_TYPE_XDP_TX;
++ tx_buf->data = xdpf;
++
++ if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
++ if (index & 1)
++ txd_pdma->txd2 |= TX_DMA_LS0;
++ else
++ txd_pdma->txd2 |= TX_DMA_LS1;
++ }
++
++ ring->next_free = mtk_qdma_phys_to_virt(ring, txd->txd2);
++ atomic_sub(n_desc, &ring->free_count);
++
++ /* make sure that all changes to the dma ring are flushed before we
++ * continue
++ */
++ wmb();
++
++ if (MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
++ mtk_w32(eth, txd->txd2, soc->reg_map->qdma.ctx_ptr);
++ } else {
++ int idx;
++
++ idx = txd_to_idx(ring, txd, soc->txrx.txd_size);
++ mtk_w32(eth, NEXT_DESP_IDX(idx, ring->dma_size),
++ MT7628_TX_CTX_IDX0);
++ }
++out:
++ spin_unlock(&eth->page_lock);
++
++ return err;
++}
++
++static int mtk_xdp_xmit(struct net_device *dev, int num_frame,
++ struct xdp_frame **frames, u32 flags)
++{
++ struct mtk_mac *mac = netdev_priv(dev);
++ struct mtk_hw_stats *hw_stats = mac->hw_stats;
++ struct mtk_eth *eth = mac->hw;
++ int i, nxmit = 0;
++
++ if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
++ return -EINVAL;
++
++ for (i = 0; i < num_frame; i++) {
++ if (mtk_xdp_submit_frame(eth, frames[i], dev, true))
++ break;
++ nxmit++;
++ }
++
++ u64_stats_update_begin(&hw_stats->syncp);
++ hw_stats->xdp_stats.tx_xdp_xmit += nxmit;
++ hw_stats->xdp_stats.tx_xdp_xmit_errors += num_frame - nxmit;
++ u64_stats_update_end(&hw_stats->syncp);
++
++ return nxmit;
++}
++
+ static u32 mtk_xdp_run(struct mtk_eth *eth, struct mtk_rx_ring *ring,
+ struct xdp_buff *xdp, struct net_device *dev)
+ {
+@@ -1494,6 +1623,18 @@ static u32 mtk_xdp_run(struct mtk_eth *e
+
+ count = &hw_stats->xdp_stats.rx_xdp_redirect;
+ goto update_stats;
++ case XDP_TX: {
++ struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp);
++
++ if (mtk_xdp_submit_frame(eth, xdpf, dev, false)) {
++ count = &hw_stats->xdp_stats.rx_xdp_tx_errors;
++ act = XDP_DROP;
++ break;
++ }
++
++ count = &hw_stats->xdp_stats.rx_xdp_tx;
++ goto update_stats;
++ }
+ default:
+ bpf_warn_invalid_xdp_action(act);
+ fallthrough;
+@@ -1727,9 +1868,8 @@ static int mtk_poll_tx_qdma(struct mtk_e
+ {
+ const struct mtk_reg_map *reg_map = eth->soc->reg_map;
+ struct mtk_tx_ring *ring = &eth->tx_ring;
+- struct mtk_tx_dma *desc;
+- struct sk_buff *skb;
+ struct mtk_tx_buf *tx_buf;
++ struct mtk_tx_dma *desc;
+ u32 cpu, dma;
+
+ cpu = ring->last_free_ptr;
+@@ -1750,15 +1890,21 @@ static int mtk_poll_tx_qdma(struct mtk_e
+ if (tx_buf->flags & MTK_TX_FLAGS_FPORT1)
+ mac = 1;
+
+- skb = tx_buf->skb;
+- if (!skb)
++ if (!tx_buf->data)
+ break;
+
+- if (skb != (struct sk_buff *)MTK_DMA_DUMMY_DESC) {
++ if (tx_buf->type == MTK_TYPE_SKB &&
++ tx_buf->data != (void *)MTK_DMA_DUMMY_DESC) {
++ struct sk_buff *skb = tx_buf->data;
++
+ bytes[mac] += skb->len;
+ done[mac]++;
+ budget--;
++ } else if (tx_buf->type == MTK_TYPE_XDP_TX ||
++ tx_buf->type == MTK_TYPE_XDP_NDO) {
++ budget--;
+ }
++
+ mtk_tx_unmap(eth, tx_buf, true);
+
+ ring->last_free = desc;
+@@ -1777,9 +1923,8 @@ static int mtk_poll_tx_pdma(struct mtk_e
+ unsigned int *done, unsigned int *bytes)
+ {
+ struct mtk_tx_ring *ring = &eth->tx_ring;
+- struct mtk_tx_dma *desc;
+- struct sk_buff *skb;
+ struct mtk_tx_buf *tx_buf;
++ struct mtk_tx_dma *desc;
+ u32 cpu, dma;
+
+ cpu = ring->cpu_idx;
+@@ -1787,14 +1932,18 @@ static int mtk_poll_tx_pdma(struct mtk_e
+
+ while ((cpu != dma) && budget) {
+ tx_buf = &ring->buf[cpu];
+- skb = tx_buf->skb;
+- if (!skb)
++ if (!tx_buf->data)
+ break;
+
+- if (skb != (struct sk_buff *)MTK_DMA_DUMMY_DESC) {
++ if (tx_buf->type == MTK_TYPE_SKB &&
++ tx_buf->data != (void *)MTK_DMA_DUMMY_DESC) {
++ struct sk_buff *skb = tx_buf->data;
+ bytes[0] += skb->len;
+ done[0]++;
+ budget--;
++ } else if (tx_buf->type == MTK_TYPE_XDP_TX ||
++ tx_buf->type == MTK_TYPE_XDP_NDO) {
++ budget--;
+ }
+
+ mtk_tx_unmap(eth, tx_buf, true);
+@@ -3475,6 +3624,7 @@ static const struct net_device_ops mtk_n
+ #endif
+ .ndo_setup_tc = mtk_eth_setup_tc,
+ .ndo_bpf = mtk_xdp,
++ .ndo_xdp_xmit = mtk_xdp_xmit,
+ };
+
+ static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
+--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
++++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
+@@ -693,6 +693,12 @@ enum mtk_dev_state {
+ MTK_RESETTING
+ };
+
++enum mtk_tx_buf_type {
++ MTK_TYPE_SKB,
++ MTK_TYPE_XDP_TX,
++ MTK_TYPE_XDP_NDO,
++};
++
+ /* struct mtk_tx_buf - This struct holds the pointers to the memory pointed at
+ * by the TX descriptor s
+ * @skb: The SKB pointer of the packet being sent
+@@ -702,7 +708,9 @@ enum mtk_dev_state {
+ * @dma_len1: The length of the second segment
+ */
+ struct mtk_tx_buf {
+- struct sk_buff *skb;
++ enum mtk_tx_buf_type type;
++ void *data;
++
+ u32 flags;
+ DEFINE_DMA_UNMAP_ADDR(dma_addr0);
+ DEFINE_DMA_UNMAP_LEN(dma_len0);