aboutsummaryrefslogtreecommitdiffstats
path: root/target/linux/generic/backport-5.15/706-06-v6.0-net-ethernet-mtk_eth_soc-introduce-xdp-multi-frag-su.patch
diff options
context:
space:
mode:
Diffstat (limited to 'target/linux/generic/backport-5.15/706-06-v6.0-net-ethernet-mtk_eth_soc-introduce-xdp-multi-frag-su.patch')
-rw-r--r--target/linux/generic/backport-5.15/706-06-v6.0-net-ethernet-mtk_eth_soc-introduce-xdp-multi-frag-su.patch218
1 files changed, 218 insertions, 0 deletions
diff --git a/target/linux/generic/backport-5.15/706-06-v6.0-net-ethernet-mtk_eth_soc-introduce-xdp-multi-frag-su.patch b/target/linux/generic/backport-5.15/706-06-v6.0-net-ethernet-mtk_eth_soc-introduce-xdp-multi-frag-su.patch
new file mode 100644
index 0000000000..e75861bc82
--- /dev/null
+++ b/target/linux/generic/backport-5.15/706-06-v6.0-net-ethernet-mtk_eth_soc-introduce-xdp-multi-frag-su.patch
@@ -0,0 +1,218 @@
+From 155738a4f319538a09f734ce1f5a2eac3ada1de2 Mon Sep 17 00:00:00 2001
+From: Lorenzo Bianconi <lorenzo@kernel.org>
+Date: Wed, 27 Jul 2022 23:20:51 +0200
+Subject: [PATCH] net: ethernet: mtk_eth_soc: introduce xdp multi-frag support
+
+Add the capability to map non-linear xdp frames in XDP_TX and
+ndo_xdp_xmit callback.
+
+Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+---
+ drivers/net/ethernet/mediatek/mtk_eth_soc.c | 125 +++++++++++++-------
+ 1 file changed, 82 insertions(+), 43 deletions(-)
+
+--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
++++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+@@ -987,23 +987,22 @@ static void mtk_tx_unmap(struct mtk_eth
+ }
+ }
+
+- if (tx_buf->type == MTK_TYPE_SKB) {
+- if (tx_buf->data &&
+- tx_buf->data != (void *)MTK_DMA_DUMMY_DESC) {
++ if (tx_buf->data && tx_buf->data != (void *)MTK_DMA_DUMMY_DESC) {
++ if (tx_buf->type == MTK_TYPE_SKB) {
+ struct sk_buff *skb = tx_buf->data;
+
+ if (napi)
+ napi_consume_skb(skb, napi);
+ else
+ dev_kfree_skb_any(skb);
+- }
+- } else if (tx_buf->data) {
+- struct xdp_frame *xdpf = tx_buf->data;
++ } else {
++ struct xdp_frame *xdpf = tx_buf->data;
+
+- if (napi && tx_buf->type == MTK_TYPE_XDP_TX)
+- xdp_return_frame_rx_napi(xdpf);
+- else
+- xdp_return_frame(xdpf);
++ if (napi && tx_buf->type == MTK_TYPE_XDP_TX)
++ xdp_return_frame_rx_napi(xdpf);
++ else
++ xdp_return_frame(xdpf);
++ }
+ }
+ tx_buf->flags = 0;
+ tx_buf->data = NULL;
+@@ -1506,6 +1505,8 @@ static int mtk_xdp_frame_map(struct mtk_
+ mtk_tx_set_dma_desc(dev, txd, txd_info);
+
+ tx_buf->flags |= !mac->id ? MTK_TX_FLAGS_FPORT0 : MTK_TX_FLAGS_FPORT1;
++ tx_buf->type = dma_map ? MTK_TYPE_XDP_NDO : MTK_TYPE_XDP_TX;
++ tx_buf->data = (void *)MTK_DMA_DUMMY_DESC;
+
+ txd_pdma = qdma_to_pdma(ring, txd);
+ setup_tx_buf(eth, tx_buf, txd_pdma, txd_info->addr, txd_info->size,
+@@ -1517,43 +1518,69 @@ static int mtk_xdp_frame_map(struct mtk_
+ static int mtk_xdp_submit_frame(struct mtk_eth *eth, struct xdp_frame *xdpf,
+ struct net_device *dev, bool dma_map)
+ {
++ struct skb_shared_info *sinfo = xdp_get_shared_info_from_frame(xdpf);
+ const struct mtk_soc_data *soc = eth->soc;
+ struct mtk_tx_ring *ring = &eth->tx_ring;
+ struct mtk_tx_dma_desc_info txd_info = {
+ .size = xdpf->len,
+ .first = true,
+- .last = true,
++ .last = !xdp_frame_has_frags(xdpf),
+ };
+- int err = 0, index = 0, n_desc = 1;
+- struct mtk_tx_dma *txd, *txd_pdma;
+- struct mtk_tx_buf *tx_buf;
++ int err, index = 0, n_desc = 1, nr_frags;
++ struct mtk_tx_dma *htxd, *txd, *txd_pdma;
++ struct mtk_tx_buf *htx_buf, *tx_buf;
++ void *data = xdpf->data;
+
+ if (unlikely(test_bit(MTK_RESETTING, &eth->state)))
+ return -EBUSY;
+
+- if (unlikely(atomic_read(&ring->free_count) <= 1))
++ nr_frags = unlikely(xdp_frame_has_frags(xdpf)) ? sinfo->nr_frags : 0;
++ if (unlikely(atomic_read(&ring->free_count) <= 1 + nr_frags))
+ return -EBUSY;
+
+ spin_lock(&eth->page_lock);
+
+ txd = ring->next_free;
+ if (txd == ring->last_free) {
+- err = -ENOMEM;
+- goto out;
++ spin_unlock(&eth->page_lock);
++ return -ENOMEM;
+ }
++ htxd = txd;
+
+ tx_buf = mtk_desc_to_tx_buf(ring, txd, soc->txrx.txd_size);
+ memset(tx_buf, 0, sizeof(*tx_buf));
++ htx_buf = tx_buf;
+
+- err = mtk_xdp_frame_map(eth, dev, &txd_info, txd, tx_buf,
+- xdpf->data, xdpf->headroom, index,
+- dma_map);
+- if (err < 0)
+- goto out;
++ for (;;) {
++ err = mtk_xdp_frame_map(eth, dev, &txd_info, txd, tx_buf,
++ data, xdpf->headroom, index, dma_map);
++ if (err < 0)
++ goto unmap;
++
++ if (txd_info.last)
++ break;
+
++ if (MTK_HAS_CAPS(soc->caps, MTK_QDMA) || (index & 0x1)) {
++ txd = mtk_qdma_phys_to_virt(ring, txd->txd2);
++ txd_pdma = qdma_to_pdma(ring, txd);
++ if (txd == ring->last_free)
++ goto unmap;
++
++ tx_buf = mtk_desc_to_tx_buf(ring, txd,
++ soc->txrx.txd_size);
++ memset(tx_buf, 0, sizeof(*tx_buf));
++ n_desc++;
++ }
++
++ memset(&txd_info, 0, sizeof(struct mtk_tx_dma_desc_info));
++ txd_info.size = skb_frag_size(&sinfo->frags[index]);
++ txd_info.last = index + 1 == nr_frags;
++ data = skb_frag_address(&sinfo->frags[index]);
++
++ index++;
++ }
+ /* store xdpf for cleanup */
+- tx_buf->type = dma_map ? MTK_TYPE_XDP_NDO : MTK_TYPE_XDP_TX;
+- tx_buf->data = xdpf;
++ htx_buf->data = xdpf;
+
+ if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
+ txd_pdma = qdma_to_pdma(ring, txd);
+@@ -1580,7 +1607,24 @@ static int mtk_xdp_submit_frame(struct m
+ mtk_w32(eth, NEXT_DESP_IDX(idx, ring->dma_size),
+ MT7628_TX_CTX_IDX0);
+ }
+-out:
++
++ spin_unlock(&eth->page_lock);
++
++ return 0;
++
++unmap:
++ while (htxd != txd) {
++ txd_pdma = qdma_to_pdma(ring, htxd);
++ tx_buf = mtk_desc_to_tx_buf(ring, htxd, soc->txrx.txd_size);
++ mtk_tx_unmap(eth, tx_buf, false);
++
++ htxd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
++ if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA))
++ txd_pdma->txd2 = TX_DMA_DESP2_DEF;
++
++ htxd = mtk_qdma_phys_to_virt(ring, htxd->txd2);
++ }
++
+ spin_unlock(&eth->page_lock);
+
+ return err;
+@@ -1909,18 +1953,15 @@ static int mtk_poll_tx_qdma(struct mtk_e
+ if (!tx_buf->data)
+ break;
+
+- if (tx_buf->type == MTK_TYPE_SKB &&
+- tx_buf->data != (void *)MTK_DMA_DUMMY_DESC) {
+- struct sk_buff *skb = tx_buf->data;
++ if (tx_buf->data != (void *)MTK_DMA_DUMMY_DESC) {
++ if (tx_buf->type == MTK_TYPE_SKB) {
++ struct sk_buff *skb = tx_buf->data;
+
+- bytes[mac] += skb->len;
+- done[mac]++;
+- budget--;
+- } else if (tx_buf->type == MTK_TYPE_XDP_TX ||
+- tx_buf->type == MTK_TYPE_XDP_NDO) {
++ bytes[mac] += skb->len;
++ done[mac]++;
++ }
+ budget--;
+ }
+-
+ mtk_tx_unmap(eth, tx_buf, true);
+
+ ring->last_free = desc;
+@@ -1951,17 +1992,15 @@ static int mtk_poll_tx_pdma(struct mtk_e
+ if (!tx_buf->data)
+ break;
+
+- if (tx_buf->type == MTK_TYPE_SKB &&
+- tx_buf->data != (void *)MTK_DMA_DUMMY_DESC) {
+- struct sk_buff *skb = tx_buf->data;
+- bytes[0] += skb->len;
+- done[0]++;
+- budget--;
+- } else if (tx_buf->type == MTK_TYPE_XDP_TX ||
+- tx_buf->type == MTK_TYPE_XDP_NDO) {
++ if (tx_buf->data != (void *)MTK_DMA_DUMMY_DESC) {
++ if (tx_buf->type == MTK_TYPE_SKB) {
++ struct sk_buff *skb = tx_buf->data;
++
++ bytes[0] += skb->len;
++ done[0]++;
++ }
+ budget--;
+ }
+-
+ mtk_tx_unmap(eth, tx_buf, true);
+
+ desc = ring->dma + cpu * eth->soc->txrx.txd_size;