diff options
Diffstat (limited to 'target/linux/lantiq/patches-5.10')
6 files changed, 484 insertions, 12 deletions
diff --git a/target/linux/lantiq/patches-5.10/0702-v5.16-net-lantiq-add-support-for-jumbo-frames.patch b/target/linux/lantiq/patches-5.10/0702-v5.16-net-lantiq-add-support-for-jumbo-frames.patch new file mode 100644 index 0000000000..5bbf752dba --- /dev/null +++ b/target/linux/lantiq/patches-5.10/0702-v5.16-net-lantiq-add-support-for-jumbo-frames.patch @@ -0,0 +1,145 @@ +From 998ac358019e491217e752bc6dcbb3afb2a6fa3e Mon Sep 17 00:00:00 2001 +From: Aleksander Jan Bajkowski <olek2@wp.pl> +Date: Sun, 19 Sep 2021 20:24:28 +0200 +Subject: [PATCH] net: lantiq: add support for jumbo frames + +Add support for jumbo frames. Full support for jumbo frames requires +changes in the DSA switch driver (lantiq_gswip.c). + +Tested on BT Hone Hub 5A. + +Signed-off-by: Aleksander Jan Bajkowski <olek2@wp.pl> +Signed-off-by: David S. Miller <davem@davemloft.net> +--- + drivers/net/ethernet/lantiq_xrx200.c | 64 +++++++++++++++++++++++++--- + 1 file changed, 57 insertions(+), 7 deletions(-) + +--- a/drivers/net/ethernet/lantiq_xrx200.c ++++ b/drivers/net/ethernet/lantiq_xrx200.c +@@ -14,13 +14,15 @@ + #include <linux/clk.h> + #include <linux/delay.h> + ++#include <linux/if_vlan.h> ++ + #include <linux/of_net.h> + #include <linux/of_platform.h> + + #include <xway_dma.h> + + /* DMA */ +-#define XRX200_DMA_DATA_LEN 0x600 ++#define XRX200_DMA_DATA_LEN (SZ_64K - 1) + #define XRX200_DMA_RX 0 + #define XRX200_DMA_TX 1 + +@@ -106,7 +108,8 @@ static void xrx200_flush_dma(struct xrx2 + break; + + desc->ctl = LTQ_DMA_OWN | LTQ_DMA_RX_OFFSET(NET_IP_ALIGN) | +- XRX200_DMA_DATA_LEN; ++ (ch->priv->net_dev->mtu + VLAN_ETH_HLEN + ++ ETH_FCS_LEN); + ch->dma.desc++; + ch->dma.desc %= LTQ_DESC_NUM; + } +@@ -154,19 +157,20 @@ static int xrx200_close(struct net_devic + + static int xrx200_alloc_skb(struct xrx200_chan *ch) + { ++ int len = ch->priv->net_dev->mtu + VLAN_ETH_HLEN + ETH_FCS_LEN; + struct sk_buff *skb = ch->skb[ch->dma.desc]; + dma_addr_t mapping; + int ret = 0; + + ch->skb[ch->dma.desc] = netdev_alloc_skb_ip_align(ch->priv->net_dev, +- XRX200_DMA_DATA_LEN); ++ len); + if (!ch->skb[ch->dma.desc]) { + ret = -ENOMEM; + goto skip; + } + + mapping = dma_map_single(ch->priv->dev, ch->skb[ch->dma.desc]->data, +- XRX200_DMA_DATA_LEN, DMA_FROM_DEVICE); ++ len, DMA_FROM_DEVICE); + if (unlikely(dma_mapping_error(ch->priv->dev, mapping))) { + dev_kfree_skb_any(ch->skb[ch->dma.desc]); + ch->skb[ch->dma.desc] = skb; +@@ -179,8 +183,7 @@ static int xrx200_alloc_skb(struct xrx20 + wmb(); + skip: + ch->dma.desc_base[ch->dma.desc].ctl = +- LTQ_DMA_OWN | LTQ_DMA_RX_OFFSET(NET_IP_ALIGN) | +- XRX200_DMA_DATA_LEN; ++ LTQ_DMA_OWN | LTQ_DMA_RX_OFFSET(NET_IP_ALIGN) | len; + + return ret; + } +@@ -340,10 +343,57 @@ err_drop: + return NETDEV_TX_OK; + } + ++static int ++xrx200_change_mtu(struct net_device *net_dev, int new_mtu) ++{ ++ struct xrx200_priv *priv = netdev_priv(net_dev); ++ struct xrx200_chan *ch_rx = &priv->chan_rx; ++ int old_mtu = net_dev->mtu; ++ bool running = false; ++ struct sk_buff *skb; ++ int curr_desc; ++ int ret = 0; ++ ++ net_dev->mtu = new_mtu; ++ ++ if (new_mtu <= old_mtu) ++ return ret; ++ ++ running = netif_running(net_dev); ++ if (running) { ++ napi_disable(&ch_rx->napi); ++ ltq_dma_close(&ch_rx->dma); ++ } ++ ++ xrx200_poll_rx(&ch_rx->napi, LTQ_DESC_NUM); ++ curr_desc = ch_rx->dma.desc; ++ ++ for (ch_rx->dma.desc = 0; ch_rx->dma.desc < LTQ_DESC_NUM; ++ ch_rx->dma.desc++) { ++ skb = ch_rx->skb[ch_rx->dma.desc]; ++ ret = xrx200_alloc_skb(ch_rx); ++ if (ret) { ++ net_dev->mtu = old_mtu; ++ break; ++ } ++ dev_kfree_skb_any(skb); ++ } ++ ++ ch_rx->dma.desc = curr_desc; ++ if (running) { ++ napi_enable(&ch_rx->napi); ++ ltq_dma_open(&ch_rx->dma); ++ ltq_dma_enable_irq(&ch_rx->dma); ++ } ++ ++ return ret; ++} ++ + static const struct net_device_ops xrx200_netdev_ops = { + .ndo_open = xrx200_open, + .ndo_stop = xrx200_close, + .ndo_start_xmit = xrx200_start_xmit, ++ .ndo_change_mtu = xrx200_change_mtu, + .ndo_set_mac_address = eth_mac_addr, + .ndo_validate_addr = eth_validate_addr, + }; +@@ -454,7 +504,7 @@ static int xrx200_probe(struct platform_ + net_dev->netdev_ops = &xrx200_netdev_ops; + SET_NETDEV_DEV(net_dev, dev); + net_dev->min_mtu = ETH_ZLEN; +- net_dev->max_mtu = XRX200_DMA_DATA_LEN; ++ net_dev->max_mtu = XRX200_DMA_DATA_LEN - VLAN_ETH_HLEN - ETH_FCS_LEN; + + /* load the memory ranges */ + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); diff --git a/target/linux/lantiq/patches-5.10/0703-net-lantiq-enable-jumbo-frames-on-GSWIP.patch b/target/linux/lantiq/patches-5.10/0703-net-lantiq-enable-jumbo-frames-on-GSWIP.patch new file mode 100644 index 0000000000..9ff0baed43 --- /dev/null +++ b/target/linux/lantiq/patches-5.10/0703-net-lantiq-enable-jumbo-frames-on-GSWIP.patch @@ -0,0 +1,101 @@ +From 24a43ae2ac0ea06c474b1c80dc75651294d49321 Mon Sep 17 00:00:00 2001 +From: Thomas Nixon <tom@tomn.co.uk> +Date: Sat, 2 Oct 2021 00:48:05 +0100 +Subject: [PATCH 2/2] net: lantiq: enable jumbo frames on GSWIP + +This enables non-standard MTUs on a per-port basis, with the overall +frame size set based on the CPU port. + +When the MTU is not changed, this should have no effect. + +Long packets crash the switch with MTUs of greater than 2526, so the +maximum is limited for now. + +Signed-off-by: Thomas Nixon <tom@tomn.co.uk> +--- + drivers/net/dsa/lantiq_gswip.c | 46 +++++++++++++++++++++++++++++++--- + 1 file changed, 42 insertions(+), 4 deletions(-) + +--- a/drivers/net/dsa/lantiq_gswip.c ++++ b/drivers/net/dsa/lantiq_gswip.c +@@ -238,6 +238,11 @@ + + #define XRX200_GPHY_FW_ALIGN (16 * 1024) + ++/* maximum packet size supported by the switch; in theory this should be 9600, ++ * but long packets currently cause lock-ups with an MTU of over 2526 ++ */ ++#define GSWIP_MAX_PACKET_LENGTH 2556 ++ + struct gswip_hw_info { + int max_ports; + int cpu_port; +@@ -851,10 +856,6 @@ static int gswip_setup(struct dsa_switch + gswip_switch_mask(priv, 0, GSWIP_PCE_PCTRL_0_INGRESS, + GSWIP_PCE_PCTRL_0p(cpu_port)); + +- gswip_switch_mask(priv, 0, GSWIP_MAC_CTRL_2_MLEN, +- GSWIP_MAC_CTRL_2p(cpu_port)); +- gswip_switch_w(priv, VLAN_ETH_FRAME_LEN + 8 + ETH_FCS_LEN, +- GSWIP_MAC_FLEN); + gswip_switch_mask(priv, 0, GSWIP_BM_QUEUE_GCTRL_GL_MOD, + GSWIP_BM_QUEUE_GCTRL); + +@@ -871,6 +872,8 @@ static int gswip_setup(struct dsa_switch + return err; + } + ++ ds->mtu_enforcement_ingress = true; ++ + gswip_port_enable(ds, cpu_port, NULL); + return 0; + } +@@ -1433,6 +1436,39 @@ static int gswip_port_fdb_dump(struct ds + return 0; + } + ++static int gswip_port_max_mtu(struct dsa_switch *ds, int port) ++{ ++ /* includes 8 bytes for special header */ ++ return GSWIP_MAX_PACKET_LENGTH - VLAN_ETH_HLEN - ETH_FCS_LEN; ++} ++ ++static int gswip_port_change_mtu(struct dsa_switch *ds, int port, int new_mtu) ++{ ++ struct gswip_priv *priv = ds->priv; ++ int cpu_port = priv->hw_info->cpu_port; ++ ++ /* cpu port always has maximum mtu of user ports, so use it to set ++ * switch frame size, including 8 byte special header ++ */ ++ if (port == cpu_port) { ++ new_mtu += 8; ++ gswip_switch_w(priv, VLAN_ETH_HLEN + new_mtu + ETH_FCS_LEN, ++ GSWIP_MAC_FLEN); ++ } ++ ++ /* enable MLEN for ports with non-standard MTUs, including the special ++ * header on the CPU port added above ++ */ ++ if (new_mtu != ETH_DATA_LEN) ++ gswip_switch_mask(priv, 0, GSWIP_MAC_CTRL_2_MLEN, ++ GSWIP_MAC_CTRL_2p(port)); ++ else ++ gswip_switch_mask(priv, GSWIP_MAC_CTRL_2_MLEN, 0, ++ GSWIP_MAC_CTRL_2p(port)); ++ ++ return 0; ++} ++ + static void gswip_phylink_validate(struct dsa_switch *ds, int port, + unsigned long *supported, + struct phylink_link_state *state) +@@ -1776,6 +1812,8 @@ static const struct dsa_switch_ops gswip + .port_fdb_add = gswip_port_fdb_add, + .port_fdb_del = gswip_port_fdb_del, + .port_fdb_dump = gswip_port_fdb_dump, ++ .port_change_mtu = gswip_port_change_mtu, ++ .port_max_mtu = gswip_port_max_mtu, + .phylink_validate = gswip_phylink_validate, + .phylink_mac_config = gswip_phylink_mac_config, + .phylink_mac_link_down = gswip_phylink_mac_link_down, diff --git a/target/linux/lantiq/patches-5.10/0704-v5.16-net-lantiq_xrx200-increase-buffer-reservation.patch b/target/linux/lantiq/patches-5.10/0704-v5.16-net-lantiq_xrx200-increase-buffer-reservation.patch new file mode 100644 index 0000000000..77c0eb6354 --- /dev/null +++ b/target/linux/lantiq/patches-5.10/0704-v5.16-net-lantiq_xrx200-increase-buffer-reservation.patch @@ -0,0 +1,122 @@ +From 1488fc204568f707fe2a42a913788c00a95af30e Mon Sep 17 00:00:00 2001 +From: Aleksander Jan Bajkowski <olek2@wp.pl> +Date: Fri, 17 Dec 2021 01:07:40 +0100 +Subject: [PATCH] net: lantiq_xrx200: increase buffer reservation + +If the user sets a lower mtu on the CPU port than on the switch, +then DMA inserts a few more bytes into the buffer than expected. +In the worst case, it may exceed the size of the buffer. The +experiments showed that the buffer should be a multiple of the +burst length value. This patch rounds the length of the rx buffer +upwards and fixes this bug. The reservation of FCS space in the +buffer has been removed as PMAC strips the FCS. + +Fixes: 998ac358019e ("net: lantiq: add support for jumbo frames") +Reported-by: Thomas Nixon <tom@tomn.co.uk> +Signed-off-by: Aleksander Jan Bajkowski <olek2@wp.pl> +Signed-off-by: Jakub Kicinski <kuba@kernel.org> +--- + drivers/net/ethernet/lantiq_xrx200.c | 34 ++++++++++++++++++++-------- + 1 file changed, 24 insertions(+), 10 deletions(-) + +--- a/drivers/net/ethernet/lantiq_xrx200.c ++++ b/drivers/net/ethernet/lantiq_xrx200.c +@@ -70,6 +70,8 @@ struct xrx200_priv { + struct xrx200_chan chan_tx; + struct xrx200_chan chan_rx; + ++ u16 rx_buf_size; ++ + struct net_device *net_dev; + struct device *dev; + +@@ -96,6 +98,16 @@ static void xrx200_pmac_mask(struct xrx2 + xrx200_pmac_w32(priv, val, offset); + } + ++static int xrx200_max_frame_len(int mtu) ++{ ++ return VLAN_ETH_HLEN + mtu; ++} ++ ++static int xrx200_buffer_size(int mtu) ++{ ++ return round_up(xrx200_max_frame_len(mtu), 4 * XRX200_DMA_BURST_LEN); ++} ++ + /* drop all the packets from the DMA ring */ + static void xrx200_flush_dma(struct xrx200_chan *ch) + { +@@ -108,8 +120,7 @@ static void xrx200_flush_dma(struct xrx2 + break; + + desc->ctl = LTQ_DMA_OWN | LTQ_DMA_RX_OFFSET(NET_IP_ALIGN) | +- (ch->priv->net_dev->mtu + VLAN_ETH_HLEN + +- ETH_FCS_LEN); ++ ch->priv->rx_buf_size; + ch->dma.desc++; + ch->dma.desc %= LTQ_DESC_NUM; + } +@@ -157,21 +168,21 @@ static int xrx200_close(struct net_devic + + static int xrx200_alloc_skb(struct xrx200_chan *ch) + { +- int len = ch->priv->net_dev->mtu + VLAN_ETH_HLEN + ETH_FCS_LEN; + struct sk_buff *skb = ch->skb[ch->dma.desc]; ++ struct xrx200_priv *priv = ch->priv; + dma_addr_t mapping; + int ret = 0; + +- ch->skb[ch->dma.desc] = netdev_alloc_skb_ip_align(ch->priv->net_dev, +- len); ++ ch->skb[ch->dma.desc] = netdev_alloc_skb_ip_align(priv->net_dev, ++ priv->rx_buf_size); + if (!ch->skb[ch->dma.desc]) { + ret = -ENOMEM; + goto skip; + } + +- mapping = dma_map_single(ch->priv->dev, ch->skb[ch->dma.desc]->data, +- len, DMA_FROM_DEVICE); +- if (unlikely(dma_mapping_error(ch->priv->dev, mapping))) { ++ mapping = dma_map_single(priv->dev, ch->skb[ch->dma.desc]->data, ++ priv->rx_buf_size, DMA_FROM_DEVICE); ++ if (unlikely(dma_mapping_error(priv->dev, mapping))) { + dev_kfree_skb_any(ch->skb[ch->dma.desc]); + ch->skb[ch->dma.desc] = skb; + ret = -ENOMEM; +@@ -183,7 +194,7 @@ static int xrx200_alloc_skb(struct xrx20 + wmb(); + skip: + ch->dma.desc_base[ch->dma.desc].ctl = +- LTQ_DMA_OWN | LTQ_DMA_RX_OFFSET(NET_IP_ALIGN) | len; ++ LTQ_DMA_OWN | LTQ_DMA_RX_OFFSET(NET_IP_ALIGN) | priv->rx_buf_size; + + return ret; + } +@@ -355,6 +366,7 @@ xrx200_change_mtu(struct net_device *net + int ret = 0; + + net_dev->mtu = new_mtu; ++ priv->rx_buf_size = xrx200_buffer_size(new_mtu); + + if (new_mtu <= old_mtu) + return ret; +@@ -374,6 +386,7 @@ xrx200_change_mtu(struct net_device *net + ret = xrx200_alloc_skb(ch_rx); + if (ret) { + net_dev->mtu = old_mtu; ++ priv->rx_buf_size = xrx200_buffer_size(old_mtu); + break; + } + dev_kfree_skb_any(skb); +@@ -504,7 +517,8 @@ static int xrx200_probe(struct platform_ + net_dev->netdev_ops = &xrx200_netdev_ops; + SET_NETDEV_DEV(net_dev, dev); + net_dev->min_mtu = ETH_ZLEN; +- net_dev->max_mtu = XRX200_DMA_DATA_LEN - VLAN_ETH_HLEN - ETH_FCS_LEN; ++ net_dev->max_mtu = XRX200_DMA_DATA_LEN - xrx200_max_frame_len(0); ++ priv->rx_buf_size = xrx200_buffer_size(ETH_DATA_LEN); + + /* load the memory ranges */ + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); diff --git a/target/linux/lantiq/patches-5.10/0705-v5.17-net-lantiq_xrx200-add-ingress-SG-DMA-support.patch b/target/linux/lantiq/patches-5.10/0705-v5.17-net-lantiq_xrx200-add-ingress-SG-DMA-support.patch new file mode 100644 index 0000000000..f2c36952fc --- /dev/null +++ b/target/linux/lantiq/patches-5.10/0705-v5.17-net-lantiq_xrx200-add-ingress-SG-DMA-support.patch @@ -0,0 +1,104 @@ +From c3e6b2c35b34214c58c1e90d65dab5f5393608e7 Mon Sep 17 00:00:00 2001 +From: Aleksander Jan Bajkowski <olek2@wp.pl> +Date: Mon, 3 Jan 2022 20:43:16 +0100 +Subject: [PATCH] net: lantiq_xrx200: add ingress SG DMA support + +This patch adds support for scatter gather DMA. DMA in PMAC splits +the packet into several buffers when the MTU on the CPU port is +less than the MTU of the switch. The first buffer starts at an +offset of NET_IP_ALIGN. In subsequent buffers, dma ignores the +offset. Thanks to this patch, the user can still connect to the +device in such a situation. For normal configurations, the patch +has no effect on performance. + +Signed-off-by: Aleksander Jan Bajkowski <olek2@wp.pl> +Signed-off-by: David S. Miller <davem@davemloft.net> +--- + drivers/net/ethernet/lantiq_xrx200.c | 47 +++++++++++++++++++++++----- + 1 file changed, 40 insertions(+), 7 deletions(-) + +--- a/drivers/net/ethernet/lantiq_xrx200.c ++++ b/drivers/net/ethernet/lantiq_xrx200.c +@@ -26,6 +26,9 @@ + #define XRX200_DMA_RX 0 + #define XRX200_DMA_TX 1 + ++#define XRX200_DMA_PACKET_COMPLETE 0 ++#define XRX200_DMA_PACKET_IN_PROGRESS 1 ++ + /* cpu port mac */ + #define PMAC_RX_IPG 0x0024 + #define PMAC_RX_IPG_MASK 0xf +@@ -61,6 +64,9 @@ struct xrx200_chan { + struct ltq_dma_channel dma; + struct sk_buff *skb[LTQ_DESC_NUM]; + ++ struct sk_buff *skb_head; ++ struct sk_buff *skb_tail; ++ + struct xrx200_priv *priv; + }; + +@@ -204,7 +210,8 @@ static int xrx200_hw_receive(struct xrx2 + struct xrx200_priv *priv = ch->priv; + struct ltq_dma_desc *desc = &ch->dma.desc_base[ch->dma.desc]; + struct sk_buff *skb = ch->skb[ch->dma.desc]; +- int len = (desc->ctl & LTQ_DMA_SIZE_MASK); ++ u32 ctl = desc->ctl; ++ int len = (ctl & LTQ_DMA_SIZE_MASK); + struct net_device *net_dev = priv->net_dev; + int ret; + +@@ -220,12 +227,36 @@ static int xrx200_hw_receive(struct xrx2 + } + + skb_put(skb, len); +- skb->protocol = eth_type_trans(skb, net_dev); +- netif_receive_skb(skb); +- net_dev->stats.rx_packets++; +- net_dev->stats.rx_bytes += len; + +- return 0; ++ /* add buffers to skb via skb->frag_list */ ++ if (ctl & LTQ_DMA_SOP) { ++ ch->skb_head = skb; ++ ch->skb_tail = skb; ++ } else if (ch->skb_head) { ++ if (ch->skb_head == ch->skb_tail) ++ skb_shinfo(ch->skb_tail)->frag_list = skb; ++ else ++ ch->skb_tail->next = skb; ++ ch->skb_tail = skb; ++ skb_reserve(ch->skb_tail, -NET_IP_ALIGN); ++ ch->skb_head->len += skb->len; ++ ch->skb_head->data_len += skb->len; ++ ch->skb_head->truesize += skb->truesize; ++ } ++ ++ if (ctl & LTQ_DMA_EOP) { ++ ch->skb_head->protocol = eth_type_trans(ch->skb_head, net_dev); ++ netif_receive_skb(ch->skb_head); ++ net_dev->stats.rx_packets++; ++ net_dev->stats.rx_bytes += ch->skb_head->len; ++ ch->skb_head = NULL; ++ ch->skb_tail = NULL; ++ ret = XRX200_DMA_PACKET_COMPLETE; ++ } else { ++ ret = XRX200_DMA_PACKET_IN_PROGRESS; ++ } ++ ++ return ret; + } + + static int xrx200_poll_rx(struct napi_struct *napi, int budget) +@@ -240,7 +271,9 @@ static int xrx200_poll_rx(struct napi_st + + if ((desc->ctl & (LTQ_DMA_OWN | LTQ_DMA_C)) == LTQ_DMA_C) { + ret = xrx200_hw_receive(ch); +- if (ret) ++ if (ret == XRX200_DMA_PACKET_IN_PROGRESS) ++ continue; ++ if (ret != XRX200_DMA_PACKET_COMPLETE) + return ret; + rx++; + } else { diff --git a/target/linux/lantiq/patches-5.10/0710-v5.16-net-lantiq-configure-the-burst-length-in-ethernet-dr.patch b/target/linux/lantiq/patches-5.10/0710-v5.16-net-lantiq-configure-the-burst-length-in-ethernet-dr.patch index bf3b903616..4fb7d0767b 100644 --- a/target/linux/lantiq/patches-5.10/0710-v5.16-net-lantiq-configure-the-burst-length-in-ethernet-dr.patch +++ b/target/linux/lantiq/patches-5.10/0710-v5.16-net-lantiq-configure-the-burst-length-in-ethernet-dr.patch @@ -75,7 +75,7 @@ Signed-off-by: David S. Miller <davem@davemloft.net> priv->txch.netdev = dev; --- a/drivers/net/ethernet/lantiq_xrx200.c +++ b/drivers/net/ethernet/lantiq_xrx200.c -@@ -71,6 +71,9 @@ struct xrx200_priv { +@@ -81,6 +81,9 @@ struct xrx200_priv { struct net_device *net_dev; struct device *dev; @@ -85,7 +85,7 @@ Signed-off-by: David S. Miller <davem@davemloft.net> __iomem void *pmac_reg; }; -@@ -316,8 +319,8 @@ static netdev_tx_t xrx200_start_xmit(str +@@ -363,8 +366,8 @@ static netdev_tx_t xrx200_start_xmit(str if (unlikely(dma_mapping_error(priv->dev, mapping))) goto err_drop; @@ -96,7 +96,7 @@ Signed-off-by: David S. Miller <davem@davemloft.net> desc->addr = mapping - byte_offset; /* Make sure the address is written before we give it to HW */ -@@ -369,7 +372,7 @@ static int xrx200_dma_init(struct xrx200 +@@ -465,7 +468,7 @@ static int xrx200_dma_init(struct xrx200 int ret = 0; int i; @@ -105,7 +105,7 @@ Signed-off-by: David S. Miller <davem@davemloft.net> ch_rx->dma.nr = XRX200_DMA_RX; ch_rx->dma.dev = priv->dev; -@@ -487,6 +490,18 @@ static int xrx200_probe(struct platform_ +@@ -584,6 +587,18 @@ static int xrx200_probe(struct platform_ if (err) eth_hw_addr_random(net_dev); diff --git a/target/linux/lantiq/patches-5.10/0711-v5.16-net-lantiq_xrx200-Hardcode-the-burst-length-value.patch b/target/linux/lantiq/patches-5.10/0711-v5.16-net-lantiq_xrx200-Hardcode-the-burst-length-value.patch index 0b68179a48..7448af8c26 100644 --- a/target/linux/lantiq/patches-5.10/0711-v5.16-net-lantiq_xrx200-Hardcode-the-burst-length-value.patch +++ b/target/linux/lantiq/patches-5.10/0711-v5.16-net-lantiq_xrx200-Hardcode-the-burst-length-value.patch @@ -15,15 +15,15 @@ Signed-off-by: David S. Miller <davem@davemloft.net> --- a/drivers/net/ethernet/lantiq_xrx200.c +++ b/drivers/net/ethernet/lantiq_xrx200.c -@@ -23,6 +23,7 @@ - #define XRX200_DMA_DATA_LEN 0x600 +@@ -25,6 +25,7 @@ + #define XRX200_DMA_DATA_LEN (SZ_64K - 1) #define XRX200_DMA_RX 0 #define XRX200_DMA_TX 1 +#define XRX200_DMA_BURST_LEN 8 - /* cpu port mac */ - #define PMAC_RX_IPG 0x0024 -@@ -71,9 +72,6 @@ struct xrx200_priv { + #define XRX200_DMA_PACKET_COMPLETE 0 + #define XRX200_DMA_PACKET_IN_PROGRESS 1 +@@ -81,9 +82,6 @@ struct xrx200_priv { struct net_device *net_dev; struct device *dev; @@ -33,7 +33,7 @@ Signed-off-by: David S. Miller <davem@davemloft.net> __iomem void *pmac_reg; }; -@@ -320,7 +318,7 @@ static netdev_tx_t xrx200_start_xmit(str +@@ -367,7 +365,7 @@ static netdev_tx_t xrx200_start_xmit(str goto err_drop; /* dma needs to start on a burst length value aligned address */ @@ -42,7 +42,7 @@ Signed-off-by: David S. Miller <davem@davemloft.net> desc->addr = mapping - byte_offset; /* Make sure the address is written before we give it to HW */ -@@ -372,7 +370,8 @@ static int xrx200_dma_init(struct xrx200 +@@ -468,7 +466,8 @@ static int xrx200_dma_init(struct xrx200 int ret = 0; int i; @@ -52,7 +52,7 @@ Signed-off-by: David S. Miller <davem@davemloft.net> ch_rx->dma.nr = XRX200_DMA_RX; ch_rx->dma.dev = priv->dev; -@@ -490,18 +489,6 @@ static int xrx200_probe(struct platform_ +@@ -587,18 +586,6 @@ static int xrx200_probe(struct platform_ if (err) eth_hw_addr_random(net_dev); |