aboutsummaryrefslogtreecommitdiffstats
path: root/target/linux/lantiq/patches-5.10/0705-v5.17-net-lantiq_xrx200-add-ingress-SG-DMA-support.patch
diff options
context:
space:
mode:
authorThomas Nixon <tom@tomn.co.uk>2021-07-09 22:58:18 +0000
committerHauke Mehrtens <hauke@hauke-m.de>2022-01-16 20:51:14 +0100
commit255268ce1a218a670e653dc5c83067f704164b7c (patch)
treec460ac7e5735dc241e508ad3abd214e6156540f7 /target/linux/lantiq/patches-5.10/0705-v5.17-net-lantiq_xrx200-add-ingress-SG-DMA-support.patch
parent607f06f81cc630448484800e47830fbf0cbc1e24 (diff)
downloadupstream-255268ce1a218a670e653dc5c83067f704164b7c.tar.gz
upstream-255268ce1a218a670e653dc5c83067f704164b7c.tar.bz2
upstream-255268ce1a218a670e653dc5c83067f704164b7c.zip
lantiq: xrx200: enable use of baby jumbo frames
xrx200 max MTU is reduced so that it works correctly when set to the max, and the max MTU of the switch is increased to match. In 5.10, the switch driver now enables non-standard MTUs on a per-port basis, with the overall frame size set based on the cpu port. When the MTU is not used, this should have no effect. The maximum packet size is limited as large packets cause the switch to lock up. 0702-net-lantiq-add-support-for-jumbo-frames.patch comes from net-next commit 998ac358019e491217e752bc6dcbb3afb2a6fa3e. In 5.4, all switch ports are configured to accept the max MTU, as 5.4 does not have port_max_mtu/port_change_mtu callbacks. Signed-off-by: Thomas Nixon <tom@tomn.co.uk>
Diffstat (limited to 'target/linux/lantiq/patches-5.10/0705-v5.17-net-lantiq_xrx200-add-ingress-SG-DMA-support.patch')
-rw-r--r--target/linux/lantiq/patches-5.10/0705-v5.17-net-lantiq_xrx200-add-ingress-SG-DMA-support.patch104
1 files changed, 104 insertions, 0 deletions
diff --git a/target/linux/lantiq/patches-5.10/0705-v5.17-net-lantiq_xrx200-add-ingress-SG-DMA-support.patch b/target/linux/lantiq/patches-5.10/0705-v5.17-net-lantiq_xrx200-add-ingress-SG-DMA-support.patch
new file mode 100644
index 0000000000..f2c36952fc
--- /dev/null
+++ b/target/linux/lantiq/patches-5.10/0705-v5.17-net-lantiq_xrx200-add-ingress-SG-DMA-support.patch
@@ -0,0 +1,104 @@
+From c3e6b2c35b34214c58c1e90d65dab5f5393608e7 Mon Sep 17 00:00:00 2001
+From: Aleksander Jan Bajkowski <olek2@wp.pl>
+Date: Mon, 3 Jan 2022 20:43:16 +0100
+Subject: [PATCH] net: lantiq_xrx200: add ingress SG DMA support
+
+This patch adds support for scatter gather DMA. DMA in PMAC splits
+the packet into several buffers when the MTU on the CPU port is
+less than the MTU of the switch. The first buffer starts at an
+offset of NET_IP_ALIGN. In subsequent buffers, dma ignores the
+offset. Thanks to this patch, the user can still connect to the
+device in such a situation. For normal configurations, the patch
+has no effect on performance.
+
+Signed-off-by: Aleksander Jan Bajkowski <olek2@wp.pl>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+---
+ drivers/net/ethernet/lantiq_xrx200.c | 47 +++++++++++++++++++++++-----
+ 1 file changed, 40 insertions(+), 7 deletions(-)
+
+--- a/drivers/net/ethernet/lantiq_xrx200.c
++++ b/drivers/net/ethernet/lantiq_xrx200.c
+@@ -26,6 +26,9 @@
+ #define XRX200_DMA_RX 0
+ #define XRX200_DMA_TX 1
+
++#define XRX200_DMA_PACKET_COMPLETE 0
++#define XRX200_DMA_PACKET_IN_PROGRESS 1
++
+ /* cpu port mac */
+ #define PMAC_RX_IPG 0x0024
+ #define PMAC_RX_IPG_MASK 0xf
+@@ -61,6 +64,9 @@ struct xrx200_chan {
+ struct ltq_dma_channel dma;
+ struct sk_buff *skb[LTQ_DESC_NUM];
+
++ struct sk_buff *skb_head;
++ struct sk_buff *skb_tail;
++
+ struct xrx200_priv *priv;
+ };
+
+@@ -204,7 +210,8 @@ static int xrx200_hw_receive(struct xrx2
+ struct xrx200_priv *priv = ch->priv;
+ struct ltq_dma_desc *desc = &ch->dma.desc_base[ch->dma.desc];
+ struct sk_buff *skb = ch->skb[ch->dma.desc];
+- int len = (desc->ctl & LTQ_DMA_SIZE_MASK);
++ u32 ctl = desc->ctl;
++ int len = (ctl & LTQ_DMA_SIZE_MASK);
+ struct net_device *net_dev = priv->net_dev;
+ int ret;
+
+@@ -220,12 +227,36 @@ static int xrx200_hw_receive(struct xrx2
+ }
+
+ skb_put(skb, len);
+- skb->protocol = eth_type_trans(skb, net_dev);
+- netif_receive_skb(skb);
+- net_dev->stats.rx_packets++;
+- net_dev->stats.rx_bytes += len;
+
+- return 0;
++ /* add buffers to skb via skb->frag_list */
++ if (ctl & LTQ_DMA_SOP) {
++ ch->skb_head = skb;
++ ch->skb_tail = skb;
++ } else if (ch->skb_head) {
++ if (ch->skb_head == ch->skb_tail)
++ skb_shinfo(ch->skb_tail)->frag_list = skb;
++ else
++ ch->skb_tail->next = skb;
++ ch->skb_tail = skb;
++ skb_reserve(ch->skb_tail, -NET_IP_ALIGN);
++ ch->skb_head->len += skb->len;
++ ch->skb_head->data_len += skb->len;
++ ch->skb_head->truesize += skb->truesize;
++ }
++
++ if (ctl & LTQ_DMA_EOP) {
++ ch->skb_head->protocol = eth_type_trans(ch->skb_head, net_dev);
++ netif_receive_skb(ch->skb_head);
++ net_dev->stats.rx_packets++;
++ net_dev->stats.rx_bytes += ch->skb_head->len;
++ ch->skb_head = NULL;
++ ch->skb_tail = NULL;
++ ret = XRX200_DMA_PACKET_COMPLETE;
++ } else {
++ ret = XRX200_DMA_PACKET_IN_PROGRESS;
++ }
++
++ return ret;
+ }
+
+ static int xrx200_poll_rx(struct napi_struct *napi, int budget)
+@@ -240,7 +271,9 @@ static int xrx200_poll_rx(struct napi_st
+
+ if ((desc->ctl & (LTQ_DMA_OWN | LTQ_DMA_C)) == LTQ_DMA_C) {
+ ret = xrx200_hw_receive(ch);
+- if (ret)
++ if (ret == XRX200_DMA_PACKET_IN_PROGRESS)
++ continue;
++ if (ret != XRX200_DMA_PACKET_COMPLETE)
+ return ret;
+ rx++;
+ } else {