aboutsummaryrefslogtreecommitdiffstats
path: root/target/linux/sunxi/patches-4.9/0052-stmmac-form-4-12.patch
diff options
context:
space:
mode:
authorKevin Darbyshire-Bryant <ldir@darbyshire-bryant.me.uk>2018-04-24 12:19:43 +0000
committerMathias Kresin <dev@kresin.me>2018-04-26 08:53:54 +0200
commit9aa196e0f260986991dc8ea65a219f81aed0197e (patch)
tree99abd0b8596eb91fb0837c0e49772fc202429cad /target/linux/sunxi/patches-4.9/0052-stmmac-form-4-12.patch
parentbdb0de1bbce235244bcd0503c71886409379f4fc (diff)
downloadupstream-9aa196e0f260986991dc8ea65a219f81aed0197e.tar.gz
upstream-9aa196e0f260986991dc8ea65a219f81aed0197e.tar.bz2
upstream-9aa196e0f260986991dc8ea65a219f81aed0197e.zip
kernel: bump 4.9 to 4.9.96
Refresh patches, following required reworking: ar71xx/patches-4.9/930-chipidea-pullup.patch layerscape/patches-4.9/302-dts-support-layercape.patch sunxi/patches-4.9/0052-stmmac-form-4-12.patch Fixes for CVEs: CVE-2018-1108 CVE-2018-1092 Tested on: ar71xx Archer C7 v2 Signed-off-by: Kevin Darbyshire-Bryant <ldir@darbyshire-bryant.me.uk> Tested-by: Koen Vandeputte <koen.vandeputte@ncentric.com> Tested-by: Arjen de Korte <build+openwrt@de-korte.org>
Diffstat (limited to 'target/linux/sunxi/patches-4.9/0052-stmmac-form-4-12.patch')
-rw-r--r--target/linux/sunxi/patches-4.9/0052-stmmac-form-4-12.patch242
1 files changed, 97 insertions, 145 deletions
diff --git a/target/linux/sunxi/patches-4.9/0052-stmmac-form-4-12.patch b/target/linux/sunxi/patches-4.9/0052-stmmac-form-4-12.patch
index 1356a4b9e9..aa6b813daf 100644
--- a/target/linux/sunxi/patches-4.9/0052-stmmac-form-4-12.patch
+++ b/target/linux/sunxi/patches-4.9/0052-stmmac-form-4-12.patch
@@ -2782,43 +2782,7 @@
}
}
-@@ -477,7 +552,10 @@ static int stmmac_hwtstamp_ioctl(struct
- /* PTP v1, UDP, any kind of event packet */
- config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
- /* take time stamp for all event messages */
-- snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
-+ if (priv->plat->has_gmac4)
-+ snap_type_sel = PTP_GMAC4_TCR_SNAPTYPSEL_1;
-+ else
-+ snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
-
- ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
- ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
-@@ -509,7 +587,10 @@ static int stmmac_hwtstamp_ioctl(struct
- config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
- ptp_v2 = PTP_TCR_TSVER2ENA;
- /* take time stamp for all event messages */
-- snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
-+ if (priv->plat->has_gmac4)
-+ snap_type_sel = PTP_GMAC4_TCR_SNAPTYPSEL_1;
-+ else
-+ snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
-
- ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
- ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
-@@ -543,7 +624,10 @@ static int stmmac_hwtstamp_ioctl(struct
- config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
- ptp_v2 = PTP_TCR_TSVER2ENA;
- /* take time stamp for all event messages */
-- snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
-+ if (priv->plat->has_gmac4)
-+ snap_type_sel = PTP_GMAC4_TCR_SNAPTYPSEL_1;
-+ else
-+ snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
-
- ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
- ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
-@@ -679,6 +763,19 @@ static void stmmac_release_ptp(struct st
+@@ -688,6 +763,19 @@ static void stmmac_release_ptp(struct st
}
/**
@@ -2838,7 +2802,7 @@
* stmmac_adjust_link - adjusts the link parameters
* @dev: net device structure
* Description: this is the helper called by the physical abstraction layer
-@@ -693,7 +790,6 @@ static void stmmac_adjust_link(struct ne
+@@ -702,7 +790,6 @@ static void stmmac_adjust_link(struct ne
struct phy_device *phydev = dev->phydev;
unsigned long flags;
int new_state = 0;
@@ -2846,7 +2810,7 @@
if (!phydev)
return;
-@@ -715,8 +811,7 @@ static void stmmac_adjust_link(struct ne
+@@ -724,8 +811,7 @@ static void stmmac_adjust_link(struct ne
}
/* Flow Control operation */
if (phydev->pause)
@@ -2856,7 +2820,7 @@
if (phydev->speed != priv->speed) {
new_state = 1;
-@@ -884,22 +979,56 @@ static int stmmac_init_phy(struct net_de
+@@ -893,22 +979,56 @@ static int stmmac_init_phy(struct net_de
return 0;
}
@@ -2887,7 +2851,7 @@
+
+ /* Display RX ring */
+ priv->hw->desc->display_ring(head_rx, DMA_RX_SIZE, true);
- }
++ }
+}
+
+static void stmmac_display_tx_rings(struct stmmac_priv *priv)
@@ -2906,26 +2870,26 @@
+ head_tx = (void *)tx_q->dma_etx;
+ else
+ head_tx = (void *)tx_q->dma_tx;
-
-- /* Display Rx ring */
-- priv->hw->desc->display_ring(head_rx, DMA_RX_SIZE, true);
-- /* Display Tx ring */
-- priv->hw->desc->display_ring(head_tx, DMA_TX_SIZE, false);
++
+ priv->hw->desc->display_ring(head_tx, DMA_TX_SIZE, false);
-+ }
+ }
+}
+
+static void stmmac_display_rings(struct stmmac_priv *priv)
+{
+ /* Display RX ring */
+ stmmac_display_rx_rings(priv);
-+
+
+- /* Display Rx ring */
+- priv->hw->desc->display_ring(head_rx, DMA_RX_SIZE, true);
+- /* Display Tx ring */
+- priv->hw->desc->display_ring(head_tx, DMA_TX_SIZE, false);
+ /* Display TX ring */
+ stmmac_display_tx_rings(priv);
}
static int stmmac_set_bfsize(int mtu, int bufsize)
-@@ -919,48 +1048,88 @@ static int stmmac_set_bfsize(int mtu, in
+@@ -928,48 +1048,88 @@ static int stmmac_set_bfsize(int mtu, in
}
/**
@@ -3024,7 +2988,7 @@
struct sk_buff *skb;
skb = __netdev_alloc_skb_ip_align(priv->dev, priv->dma_buf_sz, flags);
-@@ -969,20 +1138,20 @@ static int stmmac_init_rx_buffers(struct
+@@ -978,20 +1138,20 @@ static int stmmac_init_rx_buffers(struct
"%s: Rx init fails; skb is NULL\n", __func__);
return -ENOMEM;
}
@@ -3050,7 +3014,7 @@
if ((priv->hw->mode->init_desc3) &&
(priv->dma_buf_sz == BUF_SIZE_16KiB))
-@@ -991,30 +1160,71 @@ static int stmmac_init_rx_buffers(struct
+@@ -1000,30 +1160,71 @@ static int stmmac_init_rx_buffers(struct
return 0;
}
@@ -3131,7 +3095,7 @@
if (priv->hw->mode->set_16kib_bfsize)
bfsize = priv->hw->mode->set_16kib_bfsize(dev->mtu);
-@@ -1024,235 +1234,409 @@ static int init_dma_desc_rings(struct ne
+@@ -1033,235 +1234,409 @@ static int init_dma_desc_rings(struct ne
priv->dma_buf_sz = bfsize;
@@ -3151,11 +3115,17 @@
- p = priv->dma_rx + i;
+ for (queue = 0; queue < rx_count; queue++) {
+ struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
-+
+
+- ret = stmmac_init_rx_buffers(priv, p, i, flags);
+- if (ret)
+- goto err_init_rx_buffers;
+ netif_dbg(priv, probe, priv->dev,
+ "(%s) dma_rx_phy=0x%08x\n", __func__,
+ (u32)rx_q->dma_rx_phy);
-+
+
+- netif_dbg(priv, probe, priv->dev, "[%p]\t[%p]\t[%x]\n",
+- priv->rx_skbuff[i], priv->rx_skbuff[i]->data,
+- (unsigned int)priv->rx_skbuff_dma[i]);
+ for (i = 0; i < DMA_RX_SIZE; i++) {
+ struct dma_desc *p;
+
@@ -3173,16 +3143,10 @@
+ rx_q->rx_skbuff[i], rx_q->rx_skbuff[i]->data,
+ (unsigned int)rx_q->rx_skbuff_dma[i]);
+ }
-
-- ret = stmmac_init_rx_buffers(priv, p, i, flags);
-- if (ret)
-- goto err_init_rx_buffers;
++
+ rx_q->cur_rx = 0;
+ rx_q->dirty_rx = (unsigned int)(i - DMA_RX_SIZE);
-
-- netif_dbg(priv, probe, priv->dev, "[%p]\t[%p]\t[%x]\n",
-- priv->rx_skbuff[i], priv->rx_skbuff[i]->data,
-- (unsigned int)priv->rx_skbuff_dma[i]);
++
+ stmmac_clear_rx_descriptors(priv, queue);
+
+ /* Setup the chained descriptor addresses */
@@ -3644,9 +3608,9 @@
+err_dma_buffers:
+ free_dma_tx_desc_resources(priv);
+
- return ret;
- }
-
++ return ret;
++}
++
+/**
+ * alloc_dma_desc_resources - alloc TX/RX resources.
+ * @priv: private structure
@@ -3665,9 +3629,9 @@
+
+ ret = alloc_dma_tx_desc_resources(priv);
+
-+ return ret;
-+}
-+
+ return ret;
+ }
+
+/**
+ * free_dma_desc_resources - free dma desc resources
+ * @priv: private structure
@@ -3706,7 +3670,7 @@
}
/**
-@@ -1262,19 +1646,104 @@ static void free_dma_desc_resources(stru
+@@ -1271,19 +1646,104 @@ static void free_dma_desc_resources(stru
*/
static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv)
{
@@ -3822,7 +3786,7 @@
}
/**
-@@ -1285,11 +1754,20 @@ static void stmmac_mac_enable_rx_queues(
+@@ -1294,11 +1754,20 @@ static void stmmac_mac_enable_rx_queues(
*/
static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
{
@@ -3847,7 +3811,7 @@
/*
* In case of GMAC, SF mode can be enabled
* to perform the TX COE in HW. This depends on:
-@@ -1297,37 +1775,53 @@ static void stmmac_dma_operation_mode(st
+@@ -1306,37 +1775,53 @@ static void stmmac_dma_operation_mode(st
* 2) There is no bugged Jumbo frame support
* that needs to not insert csum in the TDES.
*/
@@ -3911,7 +3875,7 @@
status = priv->hw->desc->tx_status(&priv->dev->stats,
&priv->xstats, p,
-@@ -1348,48 +1842,51 @@ static void stmmac_tx_clean(struct stmma
+@@ -1357,48 +1842,51 @@ static void stmmac_tx_clean(struct stmma
stmmac_get_tx_hwtstamp(priv, p, skb);
}
@@ -3981,7 +3945,7 @@
}
if ((priv->eee_enabled) && (!priv->tx_path_in_lpi_mode)) {
-@@ -1399,45 +1896,76 @@ static void stmmac_tx_clean(struct stmma
+@@ -1408,45 +1896,76 @@ static void stmmac_tx_clean(struct stmma
netif_tx_unlock(priv->dev);
}
@@ -4073,7 +4037,7 @@
}
/**
-@@ -1449,31 +1977,43 @@ static void stmmac_tx_err(struct stmmac_
+@@ -1458,31 +1977,43 @@ static void stmmac_tx_err(struct stmmac_
*/
static void stmmac_dma_interrupt(struct stmmac_priv *priv)
{
@@ -4138,7 +4102,7 @@
}
/**
-@@ -1580,6 +2120,13 @@ static void stmmac_check_ether_addr(stru
+@@ -1589,6 +2120,13 @@ static void stmmac_check_ether_addr(stru
*/
static int stmmac_init_dma_engine(struct stmmac_priv *priv)
{
@@ -4152,7 +4116,7 @@
int atds = 0;
int ret = 0;
-@@ -1597,19 +2144,49 @@ static int stmmac_init_dma_engine(struct
+@@ -1606,19 +2144,49 @@ static int stmmac_init_dma_engine(struct
return ret;
}
@@ -4214,7 +4178,7 @@
}
if (priv->plat->axi && priv->hw->dma->axi)
-@@ -1627,8 +2204,12 @@ static int stmmac_init_dma_engine(struct
+@@ -1636,8 +2204,12 @@ static int stmmac_init_dma_engine(struct
static void stmmac_tx_timer(unsigned long data)
{
struct stmmac_priv *priv = (struct stmmac_priv *)data;
@@ -4228,7 +4192,7 @@
}
/**
-@@ -1650,6 +2231,196 @@ static void stmmac_init_tx_coalesce(stru
+@@ -1659,6 +2231,196 @@ static void stmmac_init_tx_coalesce(stru
add_timer(&priv->txtimer);
}
@@ -4425,7 +4389,7 @@
/**
* stmmac_hw_setup - setup mac in a usable state.
* @dev : pointer to the device structure.
-@@ -1665,6 +2436,9 @@ static void stmmac_init_tx_coalesce(stru
+@@ -1674,6 +2436,9 @@ static void stmmac_init_tx_coalesce(stru
static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
{
struct stmmac_priv *priv = netdev_priv(dev);
@@ -4435,7 +4399,7 @@
int ret;
/* DMA initialization and SW reset */
-@@ -1694,9 +2468,9 @@ static int stmmac_hw_setup(struct net_de
+@@ -1703,9 +2468,9 @@ static int stmmac_hw_setup(struct net_de
/* Initialize the MAC Core */
priv->hw->mac->core_init(priv->hw, dev->mtu);
@@ -4448,7 +4412,7 @@
ret = priv->hw->mac->rx_ipc(priv->hw);
if (!ret) {
-@@ -1706,10 +2480,7 @@ static int stmmac_hw_setup(struct net_de
+@@ -1715,10 +2480,7 @@ static int stmmac_hw_setup(struct net_de
}
/* Enable the MAC Rx/Tx */
@@ -4460,7 +4424,7 @@
/* Set the HW DMA mode and the COE */
stmmac_dma_operation_mode(priv);
-@@ -1717,6 +2488,10 @@ static int stmmac_hw_setup(struct net_de
+@@ -1726,6 +2488,10 @@ static int stmmac_hw_setup(struct net_de
stmmac_mmc_setup(priv);
if (init_ptp) {
@@ -4471,7 +4435,7 @@
ret = stmmac_init_ptp(priv);
if (ret == -EOPNOTSUPP)
netdev_warn(priv->dev, "PTP not supported by HW\n");
-@@ -1731,35 +2506,37 @@ static int stmmac_hw_setup(struct net_de
+@@ -1740,35 +2506,37 @@ static int stmmac_hw_setup(struct net_de
__func__);
#endif
/* Start the ball rolling... */
@@ -4523,7 +4487,7 @@
/**
* stmmac_open - open entry point of the driver
* @dev : pointer to the device structure.
-@@ -1828,7 +2605,7 @@ static int stmmac_open(struct net_device
+@@ -1837,7 +2605,7 @@ static int stmmac_open(struct net_device
netdev_err(priv->dev,
"%s: ERROR: allocating the IRQ %d (error: %d)\n",
__func__, dev->irq, ret);
@@ -4532,7 +4496,7 @@
}
/* Request the Wake IRQ in case of another line is used for WoL */
-@@ -1855,8 +2632,8 @@ static int stmmac_open(struct net_device
+@@ -1864,8 +2632,8 @@ static int stmmac_open(struct net_device
}
}
@@ -4543,7 +4507,7 @@
return 0;
-@@ -1865,7 +2642,12 @@ lpiirq_error:
+@@ -1874,7 +2642,12 @@ lpiirq_error:
free_irq(priv->wol_irq, dev);
wolirq_error:
free_irq(dev->irq, dev);
@@ -4556,7 +4520,7 @@
init_error:
free_dma_desc_resources(priv);
dma_desc_error:
-@@ -1894,9 +2676,9 @@ static int stmmac_release(struct net_dev
+@@ -1903,9 +2676,9 @@ static int stmmac_release(struct net_dev
phy_disconnect(dev->phydev);
}
@@ -4568,7 +4532,7 @@
del_timer_sync(&priv->txtimer);
-@@ -1908,14 +2690,13 @@ static int stmmac_release(struct net_dev
+@@ -1917,14 +2690,13 @@ static int stmmac_release(struct net_dev
free_irq(priv->lpi_irq, dev);
/* Stop TX/RX DMA and clear the descriptors */
@@ -4585,7 +4549,7 @@
netif_carrier_off(dev);
-@@ -1934,22 +2715,24 @@ static int stmmac_release(struct net_dev
+@@ -1943,22 +2715,24 @@ static int stmmac_release(struct net_dev
* @des: buffer start address
* @total_len: total length to fill in descriptors
* @last_segmant: condition for the last descriptor
@@ -4614,7 +4578,7 @@
desc->des0 = cpu_to_le32(des + (total_len - tmp_len));
buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ?
-@@ -1957,7 +2740,7 @@ static void stmmac_tso_allocator(struct
+@@ -1966,7 +2740,7 @@ static void stmmac_tso_allocator(struct
priv->hw->desc->prepare_tso_tx_desc(desc, 0, buff_size,
0, 1,
@@ -4623,7 +4587,7 @@
0, 0);
tmp_len -= TSO_MAX_BUFF_SIZE;
-@@ -1993,23 +2776,28 @@ static void stmmac_tso_allocator(struct
+@@ -2002,23 +2776,28 @@ static void stmmac_tso_allocator(struct
*/
static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
{
@@ -4658,7 +4622,7 @@
/* This is a hard error, log it. */
netdev_err(priv->dev,
"%s: Tx Ring full when queue awake\n",
-@@ -2024,10 +2812,10 @@ static netdev_tx_t stmmac_tso_xmit(struc
+@@ -2033,10 +2812,10 @@ static netdev_tx_t stmmac_tso_xmit(struc
/* set new MSS value if needed */
if (mss != priv->mss) {
@@ -4671,7 +4635,7 @@
}
if (netif_msg_tx_queued(priv)) {
-@@ -2037,9 +2825,9 @@ static netdev_tx_t stmmac_tso_xmit(struc
+@@ -2046,9 +2825,9 @@ static netdev_tx_t stmmac_tso_xmit(struc
skb->data_len);
}
@@ -4683,7 +4647,7 @@
first = desc;
/* first descriptor: fill Headers on Buf1 */
-@@ -2048,9 +2836,8 @@ static netdev_tx_t stmmac_tso_xmit(struc
+@@ -2057,9 +2836,8 @@ static netdev_tx_t stmmac_tso_xmit(struc
if (dma_mapping_error(priv->device, des))
goto dma_map_err;
@@ -4695,7 +4659,7 @@
first->des0 = cpu_to_le32(des);
-@@ -2061,7 +2848,7 @@ static netdev_tx_t stmmac_tso_xmit(struc
+@@ -2070,7 +2848,7 @@ static netdev_tx_t stmmac_tso_xmit(struc
/* If needed take extra descriptors to fill the remaining payload */
tmp_pay_len = pay_len - TSO_MAX_BUFF_SIZE;
@@ -4704,7 +4668,7 @@
/* Prepare fragments */
for (i = 0; i < nfrags; i++) {
-@@ -2070,24 +2857,34 @@ static netdev_tx_t stmmac_tso_xmit(struc
+@@ -2079,24 +2857,34 @@ static netdev_tx_t stmmac_tso_xmit(struc
des = skb_frag_dma_map(priv->device, frag, 0,
skb_frag_size(frag),
DMA_TO_DEVICE);
@@ -4748,7 +4712,7 @@
}
dev->stats.tx_bytes += skb->len;
-@@ -2119,7 +2916,7 @@ static netdev_tx_t stmmac_tso_xmit(struc
+@@ -2128,7 +2916,7 @@ static netdev_tx_t stmmac_tso_xmit(struc
priv->hw->desc->prepare_tso_tx_desc(first, 1,
proto_hdr_len,
pay_len,
@@ -4757,7 +4721,7 @@
tcp_hdrlen(skb) / 4, (skb->len - proto_hdr_len));
/* If context desc is used to change MSS */
-@@ -2134,20 +2931,20 @@ static netdev_tx_t stmmac_tso_xmit(struc
+@@ -2143,20 +2931,20 @@ static netdev_tx_t stmmac_tso_xmit(struc
if (netif_msg_pktdata(priv)) {
pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n",
@@ -4784,7 +4748,7 @@
return NETDEV_TX_OK;
-@@ -2171,21 +2968,27 @@ static netdev_tx_t stmmac_xmit(struct sk
+@@ -2180,21 +2968,27 @@ static netdev_tx_t stmmac_xmit(struct sk
struct stmmac_priv *priv = netdev_priv(dev);
unsigned int nopaged_len = skb_headlen(skb);
int i, csum_insertion = 0, is_jumbo = 0;
@@ -4816,7 +4780,7 @@
/* This is a hard error, log it. */
netdev_err(priv->dev,
"%s: Tx Ring full when queue awake\n",
-@@ -2197,20 +3000,18 @@ static netdev_tx_t stmmac_xmit(struct sk
+@@ -2206,20 +3000,18 @@ static netdev_tx_t stmmac_xmit(struct sk
if (priv->tx_path_in_lpi_mode)
stmmac_disable_eee_mode(priv);
@@ -4840,7 +4804,7 @@
enh_desc = priv->plat->enh_desc;
/* To program the descriptors according to the size of the frame */
if (enh_desc)
-@@ -2218,7 +3019,7 @@ static netdev_tx_t stmmac_xmit(struct sk
+@@ -2227,7 +3019,7 @@ static netdev_tx_t stmmac_xmit(struct sk
if (unlikely(is_jumbo) && likely(priv->synopsys_id <
DWMAC_CORE_4_00)) {
@@ -4849,7 +4813,7 @@
if (unlikely(entry < 0))
goto dma_map_err;
}
-@@ -2231,48 +3032,56 @@ static netdev_tx_t stmmac_xmit(struct sk
+@@ -2240,48 +3032,56 @@ static netdev_tx_t stmmac_xmit(struct sk
entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
if (likely(priv->extend_desc))
@@ -4919,7 +4883,7 @@
priv->hw->desc->display_ring(tx_head, DMA_TX_SIZE, false);
-@@ -2280,10 +3089,10 @@ static netdev_tx_t stmmac_xmit(struct sk
+@@ -2289,10 +3089,10 @@ static netdev_tx_t stmmac_xmit(struct sk
print_pkt(skb->data, skb->len);
}
@@ -4932,7 +4896,7 @@
}
dev->stats.tx_bytes += skb->len;
-@@ -2318,14 +3127,14 @@ static netdev_tx_t stmmac_xmit(struct sk
+@@ -2327,14 +3127,14 @@ static netdev_tx_t stmmac_xmit(struct sk
if (dma_mapping_error(priv->device, des))
goto dma_map_err;
@@ -4950,7 +4914,7 @@
if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
priv->hwts_tx_en)) {
-@@ -2337,7 +3146,7 @@ static netdev_tx_t stmmac_xmit(struct sk
+@@ -2346,7 +3146,7 @@ static netdev_tx_t stmmac_xmit(struct sk
/* Prepare the first descriptor setting the OWN bit too */
priv->hw->desc->prepare_tx_desc(first, 1, nopaged_len,
csum_insertion, priv->mode, 1,
@@ -4959,7 +4923,7 @@
/* The own bit must be the latest setting done when prepare the
* descriptor and then barrier is needed to make sure that
-@@ -2346,13 +3155,13 @@ static netdev_tx_t stmmac_xmit(struct sk
+@@ -2355,13 +3155,13 @@ static netdev_tx_t stmmac_xmit(struct sk
dma_wmb();
}
@@ -4976,7 +4940,7 @@
return NETDEV_TX_OK;
-@@ -2380,9 +3189,9 @@ static void stmmac_rx_vlan(struct net_de
+@@ -2389,9 +3189,9 @@ static void stmmac_rx_vlan(struct net_de
}
@@ -4988,7 +4952,7 @@
return 0;
return 1;
-@@ -2391,30 +3200,33 @@ static inline int stmmac_rx_threshold_co
+@@ -2400,30 +3200,33 @@ static inline int stmmac_rx_threshold_co
/**
* stmmac_rx_refill - refill used skb preallocated buffers
* @priv: driver private structure
@@ -5029,7 +4993,7 @@
if (unlikely(net_ratelimit()))
dev_err(priv->device,
"fail to alloc skb entry %d\n",
-@@ -2422,28 +3234,28 @@ static inline void stmmac_rx_refill(stru
+@@ -2431,28 +3234,28 @@ static inline void stmmac_rx_refill(stru
break;
}
@@ -5066,7 +5030,7 @@
netif_dbg(priv, rx_status, priv->dev,
"refill entry #%d\n", entry);
-@@ -2459,31 +3271,33 @@ static inline void stmmac_rx_refill(stru
+@@ -2468,31 +3271,33 @@ static inline void stmmac_rx_refill(stru
entry = STMMAC_GET_ENTRY(entry, DMA_RX_SIZE);
}
@@ -5107,7 +5071,7 @@
priv->hw->desc->display_ring(rx_head, DMA_RX_SIZE, true);
}
-@@ -2493,9 +3307,9 @@ static int stmmac_rx(struct stmmac_priv
+@@ -2502,9 +3307,9 @@ static int stmmac_rx(struct stmmac_priv
struct dma_desc *np;
if (priv->extend_desc)
@@ -5119,7 +5083,7 @@
/* read the status of the incoming frame */
status = priv->hw->desc->rx_status(&priv->dev->stats,
-@@ -2506,20 +3320,20 @@ static int stmmac_rx(struct stmmac_priv
+@@ -2515,20 +3320,20 @@ static int stmmac_rx(struct stmmac_priv
count++;
@@ -5145,7 +5109,7 @@
entry);
if (unlikely(status == discard_frame)) {
priv->dev->stats.rx_errors++;
-@@ -2529,9 +3343,9 @@ static int stmmac_rx(struct stmmac_priv
+@@ -2538,9 +3343,9 @@ static int stmmac_rx(struct stmmac_priv
* them in stmmac_rx_refill() function so that
* device can reuse it.
*/
@@ -5157,7 +5121,7 @@
priv->dma_buf_sz,
DMA_FROM_DEVICE);
}
-@@ -2579,7 +3393,7 @@ static int stmmac_rx(struct stmmac_priv
+@@ -2588,7 +3393,7 @@ static int stmmac_rx(struct stmmac_priv
*/
if (unlikely(!priv->plat->has_gmac4 &&
((frame_len < priv->rx_copybreak) ||
@@ -5166,7 +5130,7 @@
skb = netdev_alloc_skb_ip_align(priv->dev,
frame_len);
if (unlikely(!skb)) {
-@@ -2591,21 +3405,21 @@ static int stmmac_rx(struct stmmac_priv
+@@ -2600,21 +3405,21 @@ static int stmmac_rx(struct stmmac_priv
}
dma_sync_single_for_cpu(priv->device,
@@ -5192,7 +5156,7 @@
if (unlikely(!skb)) {
netdev_err(priv->dev,
"%s: Inconsistent Rx chain\n",
-@@ -2614,12 +3428,12 @@ static int stmmac_rx(struct stmmac_priv
+@@ -2623,12 +3428,12 @@ static int stmmac_rx(struct stmmac_priv
break;
}
prefetch(skb->data - NET_IP_ALIGN);
@@ -5208,7 +5172,7 @@
priv->dma_buf_sz,
DMA_FROM_DEVICE);
}
-@@ -2641,7 +3455,7 @@ static int stmmac_rx(struct stmmac_priv
+@@ -2650,7 +3455,7 @@ static int stmmac_rx(struct stmmac_priv
else
skb->ip_summed = CHECKSUM_UNNECESSARY;
@@ -5217,7 +5181,7 @@
priv->dev->stats.rx_packets++;
priv->dev->stats.rx_bytes += frame_len;
-@@ -2649,7 +3463,7 @@ static int stmmac_rx(struct stmmac_priv
+@@ -2658,7 +3463,7 @@ static int stmmac_rx(struct stmmac_priv
entry = next_entry;
}
@@ -5226,7 +5190,7 @@
priv->xstats.rx_pkt_n += count;
-@@ -2666,16 +3480,24 @@ static int stmmac_rx(struct stmmac_priv
+@@ -2675,16 +3480,24 @@ static int stmmac_rx(struct stmmac_priv
*/
static int stmmac_poll(struct napi_struct *napi, int budget)
{
@@ -5255,7 +5219,7 @@
}
return work_done;
}
-@@ -2691,9 +3513,12 @@ static int stmmac_poll(struct napi_struc
+@@ -2700,9 +3513,12 @@ static int stmmac_poll(struct napi_struc
static void stmmac_tx_timeout(struct net_device *dev)
{
struct stmmac_priv *priv = netdev_priv(dev);
@@ -5269,7 +5233,7 @@
}
/**
-@@ -2816,6 +3641,12 @@ static irqreturn_t stmmac_interrupt(int
+@@ -2825,6 +3641,12 @@ static irqreturn_t stmmac_interrupt(int
{
struct net_device *dev = (struct net_device *)dev_id;
struct stmmac_priv *priv = netdev_priv(dev);
@@ -5282,7 +5246,7 @@
if (priv->irq_wake)
pm_wakeup_event(priv->device, 0);
-@@ -2829,16 +3660,30 @@ static irqreturn_t stmmac_interrupt(int
+@@ -2838,16 +3660,30 @@ static irqreturn_t stmmac_interrupt(int
if ((priv->plat->has_gmac) || (priv->plat->has_gmac4)) {
int status = priv->hw->mac->host_irq_status(priv->hw,
&priv->xstats);
@@ -5317,7 +5281,7 @@
}
/* PCS link status */
-@@ -2923,7 +3768,7 @@ static void sysfs_display_ring(void *hea
+@@ -2932,7 +3768,7 @@ static void sysfs_display_ring(void *hea
ep++;
} else {
seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
@@ -5326,7 +5290,7 @@
le32_to_cpu(p->des0), le32_to_cpu(p->des1),
le32_to_cpu(p->des2), le32_to_cpu(p->des3));
p++;
-@@ -2936,17 +3781,40 @@ static int stmmac_sysfs_ring_read(struct
+@@ -2945,17 +3781,40 @@ static int stmmac_sysfs_ring_read(struct
{
struct net_device *dev = seq->private;
struct stmmac_priv *priv = netdev_priv(dev);
@@ -5377,7 +5341,7 @@
}
return 0;
-@@ -3229,11 +4097,14 @@ int stmmac_dvr_probe(struct device *devi
+@@ -3238,11 +4097,14 @@ int stmmac_dvr_probe(struct device *devi
struct plat_stmmacenet_data *plat_dat,
struct stmmac_resources *res)
{
@@ -5394,7 +5358,7 @@
if (!ndev)
return -ENOMEM;
-@@ -3275,6 +4146,10 @@ int stmmac_dvr_probe(struct device *devi
+@@ -3284,6 +4146,10 @@ int stmmac_dvr_probe(struct device *devi
if (ret)
goto error_hw_init;
@@ -5405,7 +5369,7 @@
ndev->netdev_ops = &stmmac_netdev_ops;
ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
-@@ -3307,7 +4182,12 @@ int stmmac_dvr_probe(struct device *devi
+@@ -3316,7 +4182,12 @@ int stmmac_dvr_probe(struct device *devi
"Enable RX Mitigation via HW Watchdog Timer\n");
}
@@ -5419,7 +5383,7 @@
spin_lock_init(&priv->lock);
-@@ -3352,7 +4232,11 @@ error_netdev_register:
+@@ -3361,7 +4232,11 @@ error_netdev_register:
priv->hw->pcs != STMMAC_PCS_RTBI)
stmmac_mdio_unregister(ndev);
error_mdio_register:
@@ -5432,7 +5396,7 @@
error_hw_init:
free_netdev(ndev);
-@@ -3373,10 +4257,9 @@ int stmmac_dvr_remove(struct device *dev
+@@ -3382,10 +4257,9 @@ int stmmac_dvr_remove(struct device *dev
netdev_info(priv->dev, "%s: removing driver", __func__);
@@ -5445,7 +5409,7 @@
netif_carrier_off(ndev);
unregister_netdev(ndev);
if (priv->plat->stmmac_rst)
-@@ -3415,20 +4298,19 @@ int stmmac_suspend(struct device *dev)
+@@ -3424,20 +4298,19 @@ int stmmac_suspend(struct device *dev)
spin_lock_irqsave(&priv->lock, flags);
netif_device_detach(ndev);
@@ -5470,7 +5434,7 @@
pinctrl_pm_select_sleep_state(priv->device);
/* Disable clock in case of PWM is off */
clk_disable(priv->plat->pclk);
-@@ -3444,6 +4326,31 @@ int stmmac_suspend(struct device *dev)
+@@ -3453,6 +4326,31 @@ int stmmac_suspend(struct device *dev)
EXPORT_SYMBOL_GPL(stmmac_suspend);
/**
@@ -5502,7 +5466,7 @@
* stmmac_resume - resume callback
* @dev: device pointer
* Description: when resume this function is invoked to setup the DMA and CORE
-@@ -3483,10 +4390,8 @@ int stmmac_resume(struct device *dev)
+@@ -3492,10 +4390,8 @@ int stmmac_resume(struct device *dev)
spin_lock_irqsave(&priv->lock, flags);
@@ -5515,7 +5479,7 @@
/* reset private mss value to force mss context settings at
* next tso xmit (only used for gmac4).
*/
-@@ -3498,9 +4403,9 @@ int stmmac_resume(struct device *dev)
+@@ -3507,9 +4403,9 @@ int stmmac_resume(struct device *dev)
stmmac_init_tx_coalesce(priv);
stmmac_set_rx_mode(ndev);
@@ -5867,18 +5831,6 @@
plat->clk_ptp_rate = clk_get_rate(plat->clk_ptp_ref);
dev_dbg(&pdev->dev, "PTP rate %d\n", plat->clk_ptp_rate);
}
---- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h
-+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h
-@@ -59,7 +59,8 @@
- /* Enable Snapshot for Messages Relevant to Master */
- #define PTP_TCR_TSMSTRENA BIT(15)
- /* Select PTP packets for Taking Snapshots */
--#define PTP_TCR_SNAPTYPSEL_1 GENMASK(17, 16)
-+#define PTP_TCR_SNAPTYPSEL_1 BIT(16)
-+#define PTP_GMAC4_TCR_SNAPTYPSEL_1 GENMASK(17, 16)
- /* Enable MAC address for PTP Frame Filtering */
- #define PTP_TCR_TSENMACADDR BIT(18)
-
--- a/include/linux/stmmac.h
+++ b/include/linux/stmmac.h
@@ -28,6 +28,9 @@