aboutsummaryrefslogtreecommitdiffstats
path: root/target/linux/sunxi/patches-4.9
diff options
context:
space:
mode:
authorHauke Mehrtens <hauke@hauke-m.de>2017-12-16 21:09:41 +0100
committerHauke Mehrtens <hauke@hauke-m.de>2017-12-19 22:45:27 +0100
commitf704b643b9cab8fa6d3eb7c33a7fbd117ecd821a (patch)
tree4ba50f0c0cea60c564d9b76b49ac2985386c4d56 /target/linux/sunxi/patches-4.9
parente80ab48777d83d8c24fcd397a85649f08b6b5f87 (diff)
downloadupstream-f704b643b9cab8fa6d3eb7c33a7fbd117ecd821a.tar.gz
upstream-f704b643b9cab8fa6d3eb7c33a7fbd117ecd821a.tar.bz2
upstream-f704b643b9cab8fa6d3eb7c33a7fbd117ecd821a.zip
kernel: Update kernel 4.9 to 4.9.70
Runtime tested on lantiq. Signed-off-by: Hauke Mehrtens <hauke@hauke-m.de>
Diffstat (limited to 'target/linux/sunxi/patches-4.9')
-rw-r--r--target/linux/sunxi/patches-4.9/0050-stmmac-form-4-10.patch100
-rw-r--r--target/linux/sunxi/patches-4.9/0051-stmmac-form-4-11.patch22
-rw-r--r--target/linux/sunxi/patches-4.9/0052-stmmac-form-4-12.patch100
-rw-r--r--target/linux/sunxi/patches-4.9/0053-stmmac-form-4-13.patch16
4 files changed, 119 insertions, 119 deletions
diff --git a/target/linux/sunxi/patches-4.9/0050-stmmac-form-4-10.patch b/target/linux/sunxi/patches-4.9/0050-stmmac-form-4-10.patch
index b88c19e256..ca2eb3ac8f 100644
--- a/target/linux/sunxi/patches-4.9/0050-stmmac-form-4-10.patch
+++ b/target/linux/sunxi/patches-4.9/0050-stmmac-form-4-10.patch
@@ -2464,7 +2464,7 @@
return ret;
}
}
-@@ -1798,33 +1793,36 @@ static int stmmac_open(struct net_device
+@@ -1799,33 +1794,36 @@ static int stmmac_open(struct net_device
ret = alloc_dma_desc_resources(priv);
if (ret < 0) {
@@ -2508,7 +2508,7 @@
goto init_error;
}
-@@ -1833,8 +1831,9 @@ static int stmmac_open(struct net_device
+@@ -1834,8 +1832,9 @@ static int stmmac_open(struct net_device
ret = request_irq(priv->wol_irq, stmmac_interrupt,
IRQF_SHARED, dev->name, dev);
if (unlikely(ret < 0)) {
@@ -2520,7 +2520,7 @@
goto wolirq_error;
}
}
-@@ -1844,8 +1843,9 @@ static int stmmac_open(struct net_device
+@@ -1845,8 +1844,9 @@ static int stmmac_open(struct net_device
ret = request_irq(priv->lpi_irq, stmmac_interrupt, IRQF_SHARED,
dev->name, dev);
if (unlikely(ret < 0)) {
@@ -2532,7 +2532,7 @@
goto lpiirq_error;
}
}
-@@ -1864,8 +1864,8 @@ wolirq_error:
+@@ -1865,8 +1865,8 @@ wolirq_error:
init_error:
free_dma_desc_resources(priv);
dma_desc_error:
@@ -2543,7 +2543,7 @@
return ret;
}
-@@ -1884,10 +1884,9 @@ static int stmmac_release(struct net_dev
+@@ -1885,10 +1885,9 @@ static int stmmac_release(struct net_dev
del_timer_sync(&priv->eee_ctrl_timer);
/* Stop and disconnect the PHY */
@@ -2557,7 +2557,7 @@
}
netif_stop_queue(dev);
-@@ -1947,13 +1946,13 @@ static void stmmac_tso_allocator(struct
+@@ -1948,13 +1947,13 @@ static void stmmac_tso_allocator(struct
priv->cur_tx = STMMAC_GET_ENTRY(priv->cur_tx, DMA_TX_SIZE);
desc = priv->dma_tx + priv->cur_tx;
@@ -2573,7 +2573,7 @@
0, 0);
tmp_len -= TSO_MAX_BUFF_SIZE;
-@@ -1998,8 +1997,6 @@ static netdev_tx_t stmmac_tso_xmit(struc
+@@ -1999,8 +1998,6 @@ static netdev_tx_t stmmac_tso_xmit(struc
u8 proto_hdr_len;
int i;
@@ -2582,7 +2582,7 @@
/* Compute header lengths */
proto_hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
-@@ -2009,9 +2006,10 @@ static netdev_tx_t stmmac_tso_xmit(struc
+@@ -2010,9 +2007,10 @@ static netdev_tx_t stmmac_tso_xmit(struc
if (!netif_queue_stopped(dev)) {
netif_stop_queue(dev);
/* This is a hard error, log it. */
@@ -2595,7 +2595,7 @@
return NETDEV_TX_BUSY;
}
-@@ -2049,11 +2047,11 @@ static netdev_tx_t stmmac_tso_xmit(struc
+@@ -2050,11 +2048,11 @@ static netdev_tx_t stmmac_tso_xmit(struc
priv->tx_skbuff_dma[first_entry].len = skb_headlen(skb);
priv->tx_skbuff[first_entry] = skb;
@@ -2609,7 +2609,7 @@
/* If needed take extra descriptors to fill the remaining payload */
tmp_pay_len = pay_len - TSO_MAX_BUFF_SIZE;
-@@ -2082,8 +2080,8 @@ static netdev_tx_t stmmac_tso_xmit(struc
+@@ -2083,8 +2081,8 @@ static netdev_tx_t stmmac_tso_xmit(struc
priv->cur_tx = STMMAC_GET_ENTRY(priv->cur_tx, DMA_TX_SIZE);
if (unlikely(stmmac_tx_avail(priv) <= (MAX_SKB_FRAGS + 1))) {
@@ -2620,7 +2620,7 @@
netif_stop_queue(dev);
}
-@@ -2127,7 +2125,7 @@ static netdev_tx_t stmmac_tso_xmit(struc
+@@ -2128,7 +2126,7 @@ static netdev_tx_t stmmac_tso_xmit(struc
* descriptor and then barrier is needed to make sure that
* all is coherent before granting the DMA engine.
*/
@@ -2629,7 +2629,7 @@
if (netif_msg_pktdata(priv)) {
pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n",
-@@ -2146,11 +2144,9 @@ static netdev_tx_t stmmac_tso_xmit(struc
+@@ -2147,11 +2145,9 @@ static netdev_tx_t stmmac_tso_xmit(struc
priv->hw->dma->set_tx_tail_ptr(priv->ioaddr, priv->tx_tail_addr,
STMMAC_CHAN0);
@@ -2641,7 +2641,7 @@
dev_err(priv->device, "Tx dma map failed\n");
dev_kfree_skb(skb);
priv->dev->stats.tx_dropped++;
-@@ -2182,14 +2178,13 @@ static netdev_tx_t stmmac_xmit(struct sk
+@@ -2183,14 +2179,13 @@ static netdev_tx_t stmmac_xmit(struct sk
return stmmac_tso_xmit(skb, dev);
}
@@ -2659,7 +2659,7 @@
}
return NETDEV_TX_BUSY;
}
-@@ -2242,13 +2237,11 @@ static netdev_tx_t stmmac_xmit(struct sk
+@@ -2243,13 +2238,11 @@ static netdev_tx_t stmmac_xmit(struct sk
priv->tx_skbuff[entry] = NULL;
@@ -2678,7 +2678,7 @@
priv->tx_skbuff_dma[entry].map_as_page = true;
priv->tx_skbuff_dma[entry].len = len;
-@@ -2266,9 +2259,10 @@ static netdev_tx_t stmmac_xmit(struct sk
+@@ -2267,9 +2260,10 @@ static netdev_tx_t stmmac_xmit(struct sk
if (netif_msg_pktdata(priv)) {
void *tx_head;
@@ -2692,7 +2692,7 @@
if (priv->extend_desc)
tx_head = (void *)priv->dma_etx;
-@@ -2277,13 +2271,13 @@ static netdev_tx_t stmmac_xmit(struct sk
+@@ -2278,13 +2272,13 @@ static netdev_tx_t stmmac_xmit(struct sk
priv->hw->desc->display_ring(tx_head, DMA_TX_SIZE, false);
@@ -2709,7 +2709,7 @@
netif_stop_queue(dev);
}
-@@ -2319,13 +2313,11 @@ static netdev_tx_t stmmac_xmit(struct sk
+@@ -2320,13 +2314,11 @@ static netdev_tx_t stmmac_xmit(struct sk
if (dma_mapping_error(priv->device, des))
goto dma_map_err;
@@ -2728,7 +2728,7 @@
priv->tx_skbuff_dma[first_entry].len = nopaged_len;
priv->tx_skbuff_dma[first_entry].last_segment = last_segment;
-@@ -2346,7 +2338,7 @@ static netdev_tx_t stmmac_xmit(struct sk
+@@ -2347,7 +2339,7 @@ static netdev_tx_t stmmac_xmit(struct sk
* descriptor and then barrier is needed to make sure that
* all is coherent before granting the DMA engine.
*/
@@ -2737,7 +2737,7 @@
}
netdev_sent_queue(dev, skb->len);
-@@ -2357,12 +2349,10 @@ static netdev_tx_t stmmac_xmit(struct sk
+@@ -2358,12 +2350,10 @@ static netdev_tx_t stmmac_xmit(struct sk
priv->hw->dma->set_tx_tail_ptr(priv->ioaddr, priv->tx_tail_addr,
STMMAC_CHAN0);
@@ -2751,7 +2751,7 @@
dev_kfree_skb(skb);
priv->dev->stats.tx_dropped++;
return NETDEV_TX_OK;
-@@ -2433,16 +2423,16 @@ static inline void stmmac_rx_refill(stru
+@@ -2434,16 +2424,16 @@ static inline void stmmac_rx_refill(stru
DMA_FROM_DEVICE);
if (dma_mapping_error(priv->device,
priv->rx_skbuff_dma[entry])) {
@@ -2771,7 +2771,7 @@
}
if (priv->hw->mode->refill_desc3)
priv->hw->mode->refill_desc3(priv, p);
-@@ -2450,17 +2440,17 @@ static inline void stmmac_rx_refill(stru
+@@ -2451,17 +2441,17 @@ static inline void stmmac_rx_refill(stru
if (priv->rx_zeroc_thresh > 0)
priv->rx_zeroc_thresh--;
@@ -2793,7 +2793,7 @@
entry = STMMAC_GET_ENTRY(entry, DMA_RX_SIZE);
}
-@@ -2484,7 +2474,7 @@ static int stmmac_rx(struct stmmac_priv
+@@ -2485,7 +2475,7 @@ static int stmmac_rx(struct stmmac_priv
if (netif_msg_rx_status(priv)) {
void *rx_head;
@@ -2802,7 +2802,7 @@
if (priv->extend_desc)
rx_head = (void *)priv->dma_erx;
else
-@@ -2546,9 +2536,9 @@ static int stmmac_rx(struct stmmac_priv
+@@ -2547,9 +2537,9 @@ static int stmmac_rx(struct stmmac_priv
unsigned int des;
if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00))
@@ -2814,7 +2814,7 @@
frame_len = priv->hw->desc->get_rx_frame_len(p, coe);
-@@ -2557,9 +2547,9 @@ static int stmmac_rx(struct stmmac_priv
+@@ -2558,9 +2548,9 @@ static int stmmac_rx(struct stmmac_priv
* ignored
*/
if (frame_len > priv->dma_buf_sz) {
@@ -2827,7 +2827,7 @@
priv->dev->stats.rx_length_errors++;
break;
}
-@@ -2571,11 +2561,11 @@ static int stmmac_rx(struct stmmac_priv
+@@ -2572,11 +2562,11 @@ static int stmmac_rx(struct stmmac_priv
frame_len -= ETH_FCS_LEN;
if (netif_msg_rx_status(priv)) {
@@ -2843,7 +2843,7 @@
}
/* The zero-copy is always used for all the sizes
-@@ -2612,8 +2602,9 @@ static int stmmac_rx(struct stmmac_priv
+@@ -2613,8 +2603,9 @@ static int stmmac_rx(struct stmmac_priv
} else {
skb = priv->rx_skbuff[entry];
if (unlikely(!skb)) {
@@ -2855,7 +2855,7 @@
priv->dev->stats.rx_dropped++;
break;
}
-@@ -2629,7 +2620,8 @@ static int stmmac_rx(struct stmmac_priv
+@@ -2630,7 +2621,8 @@ static int stmmac_rx(struct stmmac_priv
}
if (netif_msg_pktdata(priv)) {
@@ -2865,7 +2865,7 @@
print_pkt(skb->data, frame_len);
}
-@@ -2732,7 +2724,7 @@ static int stmmac_change_mtu(struct net_
+@@ -2733,7 +2725,7 @@ static int stmmac_change_mtu(struct net_
int max_mtu;
if (netif_running(dev)) {
@@ -2874,7 +2874,7 @@
return -EBUSY;
}
-@@ -2824,7 +2816,7 @@ static irqreturn_t stmmac_interrupt(int
+@@ -2825,7 +2817,7 @@ static irqreturn_t stmmac_interrupt(int
pm_wakeup_event(priv->device, 0);
if (unlikely(!dev)) {
@@ -2883,7 +2883,7 @@
return IRQ_NONE;
}
-@@ -2882,7 +2874,6 @@ static void stmmac_poll_controller(struc
+@@ -2883,7 +2875,6 @@ static void stmmac_poll_controller(struc
*/
static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
@@ -2891,7 +2891,7 @@
int ret = -EOPNOTSUPP;
if (!netif_running(dev))
-@@ -2892,9 +2883,9 @@ static int stmmac_ioctl(struct net_devic
+@@ -2893,9 +2884,9 @@ static int stmmac_ioctl(struct net_devic
case SIOCGMIIPHY:
case SIOCGMIIREG:
case SIOCSMIIREG:
@@ -2903,7 +2903,7 @@
break;
case SIOCSHWTSTAMP:
ret = stmmac_hwtstamp_ioctl(dev, rq);
-@@ -2922,14 +2913,17 @@ static void sysfs_display_ring(void *hea
+@@ -2923,14 +2914,17 @@ static void sysfs_display_ring(void *hea
x = *(u64 *) ep;
seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
i, (unsigned int)virt_to_phys(ep),
@@ -2924,7 +2924,7 @@
p++;
}
seq_printf(seq, "\n");
-@@ -2961,6 +2955,8 @@ static int stmmac_sysfs_ring_open(struct
+@@ -2962,6 +2956,8 @@ static int stmmac_sysfs_ring_open(struct
return single_open(file, stmmac_sysfs_ring_read, inode->i_private);
}
@@ -2933,7 +2933,7 @@
static const struct file_operations stmmac_rings_status_fops = {
.owner = THIS_MODULE,
.open = stmmac_sysfs_ring_open,
-@@ -2983,11 +2979,11 @@ static int stmmac_sysfs_dma_cap_read(str
+@@ -2984,11 +2980,11 @@ static int stmmac_sysfs_dma_cap_read(str
seq_printf(seq, "\tDMA HW features\n");
seq_printf(seq, "==============================\n");
@@ -2948,7 +2948,7 @@
(priv->dma_cap.half_duplex) ? "Y" : "N");
seq_printf(seq, "\tHash Filter: %s\n",
(priv->dma_cap.hash_filter) ? "Y" : "N");
-@@ -3005,9 +3001,9 @@ static int stmmac_sysfs_dma_cap_read(str
+@@ -3006,9 +3002,9 @@ static int stmmac_sysfs_dma_cap_read(str
(priv->dma_cap.rmon) ? "Y" : "N");
seq_printf(seq, "\tIEEE 1588-2002 Time Stamp: %s\n",
(priv->dma_cap.time_stamp) ? "Y" : "N");
@@ -2960,7 +2960,7 @@
(priv->dma_cap.eee) ? "Y" : "N");
seq_printf(seq, "\tAV features: %s\n", (priv->dma_cap.av) ? "Y" : "N");
seq_printf(seq, "\tChecksum Offload in TX: %s\n",
-@@ -3054,8 +3050,7 @@ static int stmmac_init_fs(struct net_dev
+@@ -3055,8 +3051,7 @@ static int stmmac_init_fs(struct net_dev
priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir);
if (!priv->dbgfs_dir || IS_ERR(priv->dbgfs_dir)) {
@@ -2970,7 +2970,7 @@
return -ENOMEM;
}
-@@ -3067,7 +3062,7 @@ static int stmmac_init_fs(struct net_dev
+@@ -3068,7 +3063,7 @@ static int stmmac_init_fs(struct net_dev
&stmmac_rings_status_fops);
if (!priv->dbgfs_rings_status || IS_ERR(priv->dbgfs_rings_status)) {
@@ -2979,7 +2979,7 @@
debugfs_remove_recursive(priv->dbgfs_dir);
return -ENOMEM;
-@@ -3079,7 +3074,7 @@ static int stmmac_init_fs(struct net_dev
+@@ -3080,7 +3075,7 @@ static int stmmac_init_fs(struct net_dev
dev, &stmmac_dma_cap_fops);
if (!priv->dbgfs_dma_cap || IS_ERR(priv->dbgfs_dma_cap)) {
@@ -2988,7 +2988,7 @@
debugfs_remove_recursive(priv->dbgfs_dir);
return -ENOMEM;
-@@ -3151,11 +3146,11 @@ static int stmmac_hw_init(struct stmmac_
+@@ -3152,11 +3147,11 @@ static int stmmac_hw_init(struct stmmac_
} else {
if (chain_mode) {
priv->hw->mode = &chain_mode_ops;
@@ -3002,7 +3002,7 @@
priv->mode = STMMAC_RING_MODE;
}
}
-@@ -3163,7 +3158,7 @@ static int stmmac_hw_init(struct stmmac_
+@@ -3164,7 +3159,7 @@ static int stmmac_hw_init(struct stmmac_
/* Get the HW capability (new GMAC newer than 3.50a) */
priv->hw_cap_support = stmmac_get_hw_features(priv);
if (priv->hw_cap_support) {
@@ -3011,7 +3011,7 @@
/* We can override some gmac/dma configuration fields: e.g.
* enh_desc, tx_coe (e.g. that are passed through the
-@@ -3188,8 +3183,9 @@ static int stmmac_hw_init(struct stmmac_
+@@ -3189,8 +3184,9 @@ static int stmmac_hw_init(struct stmmac_
else if (priv->dma_cap.rx_coe_type1)
priv->plat->rx_coe = STMMAC_RX_COE_TYPE1;
@@ -3023,7 +3023,7 @@
/* To use alternate (extended), normal or GMAC4 descriptor structures */
if (priv->synopsys_id >= DWMAC_CORE_4_00)
-@@ -3199,20 +3195,20 @@ static int stmmac_hw_init(struct stmmac_
+@@ -3200,20 +3196,20 @@ static int stmmac_hw_init(struct stmmac_
if (priv->plat->rx_coe) {
priv->hw->rx_csum = priv->plat->rx_coe;
@@ -3049,7 +3049,7 @@
return 0;
}
-@@ -3271,8 +3267,8 @@ int stmmac_dvr_probe(struct device *devi
+@@ -3272,8 +3268,8 @@ int stmmac_dvr_probe(struct device *devi
priv->stmmac_clk = devm_clk_get(priv->device, STMMAC_RESOURCE_NAME);
if (IS_ERR(priv->stmmac_clk)) {
@@ -3060,7 +3060,7 @@
/* If failed to obtain stmmac_clk and specific clk_csr value
* is NOT passed from the platform, probe fail.
*/
-@@ -3321,7 +3317,7 @@ int stmmac_dvr_probe(struct device *devi
+@@ -3322,7 +3318,7 @@ int stmmac_dvr_probe(struct device *devi
if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
ndev->hw_features |= NETIF_F_TSO;
priv->tso = true;
@@ -3069,7 +3069,7 @@
}
ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
-@@ -3341,13 +3337,13 @@ int stmmac_dvr_probe(struct device *devi
+@@ -3342,13 +3338,13 @@ int stmmac_dvr_probe(struct device *devi
*/
if ((priv->synopsys_id >= DWMAC_CORE_3_50) && (!priv->plat->riwt_off)) {
priv->use_riwt = 1;
@@ -3085,7 +3085,7 @@
/* If a specific clk_csr value is passed from the platform
* this means that the CSR Clock Range selection cannot be
-@@ -3368,15 +3364,17 @@ int stmmac_dvr_probe(struct device *devi
+@@ -3369,15 +3365,17 @@ int stmmac_dvr_probe(struct device *devi
/* MDIO bus Registration */
ret = stmmac_mdio_register(ndev);
if (ret < 0) {
@@ -3107,7 +3107,7 @@
goto error_netdev_register;
}
-@@ -3387,7 +3385,7 @@ error_netdev_register:
+@@ -3388,7 +3386,7 @@ error_netdev_register:
priv->hw->pcs != STMMAC_PCS_TBI &&
priv->hw->pcs != STMMAC_PCS_RTBI)
stmmac_mdio_unregister(ndev);
@@ -3116,7 +3116,7 @@
netif_napi_del(&priv->napi);
error_hw_init:
clk_disable_unprepare(priv->pclk);
-@@ -3411,7 +3409,7 @@ int stmmac_dvr_remove(struct device *dev
+@@ -3412,7 +3410,7 @@ int stmmac_dvr_remove(struct device *dev
struct net_device *ndev = dev_get_drvdata(dev);
struct stmmac_priv *priv = netdev_priv(ndev);
@@ -3125,7 +3125,7 @@
priv->hw->dma->stop_rx(priv->ioaddr);
priv->hw->dma->stop_tx(priv->ioaddr);
-@@ -3449,8 +3447,8 @@ int stmmac_suspend(struct device *dev)
+@@ -3450,8 +3448,8 @@ int stmmac_suspend(struct device *dev)
if (!ndev || !netif_running(ndev))
return 0;
@@ -3136,7 +3136,7 @@
spin_lock_irqsave(&priv->lock, flags);
-@@ -3544,8 +3542,8 @@ int stmmac_resume(struct device *dev)
+@@ -3545,8 +3543,8 @@ int stmmac_resume(struct device *dev)
spin_unlock_irqrestore(&priv->lock, flags);
diff --git a/target/linux/sunxi/patches-4.9/0051-stmmac-form-4-11.patch b/target/linux/sunxi/patches-4.9/0051-stmmac-form-4-11.patch
index abd1304485..fc626cbb9b 100644
--- a/target/linux/sunxi/patches-4.9/0051-stmmac-form-4-11.patch
+++ b/target/linux/sunxi/patches-4.9/0051-stmmac-form-4-11.patch
@@ -1718,7 +1718,7 @@
priv->tx_lpi_timer = STMMAC_DEFAULT_TWT_LS;
if ((priv->use_riwt) && (priv->hw->dma->rx_watchdog)) {
-@@ -2519,7 +2517,7 @@ static int stmmac_rx(struct stmmac_priv
+@@ -2520,7 +2518,7 @@ static int stmmac_rx(struct stmmac_priv
if (unlikely(status == discard_frame)) {
priv->dev->stats.rx_errors++;
if (priv->hwts_rx_en && !priv->extend_desc) {
@@ -1727,7 +1727,7 @@
* with timestamp value, hence reinitialize
* them in stmmac_rx_refill() function so that
* device can reuse it.
-@@ -2542,7 +2540,7 @@ static int stmmac_rx(struct stmmac_priv
+@@ -2543,7 +2541,7 @@ static int stmmac_rx(struct stmmac_priv
frame_len = priv->hw->desc->get_rx_frame_len(p, coe);
@@ -1736,7 +1736,7 @@
* (preallocated during init) then the packet is
* ignored
*/
-@@ -2762,7 +2760,7 @@ static netdev_features_t stmmac_fix_feat
+@@ -2763,7 +2761,7 @@ static netdev_features_t stmmac_fix_feat
/* Some GMAC devices have a bugged Jumbo frame support that
* needs to have the Tx COE disabled for oversized frames
* (due to limited buffer sizes). In this case we disable
@@ -1745,7 +1745,7 @@
*/
if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN))
features &= ~NETIF_F_CSUM_MASK;
-@@ -2908,9 +2906,7 @@ static void sysfs_display_ring(void *hea
+@@ -2909,9 +2907,7 @@ static void sysfs_display_ring(void *hea
struct dma_desc *p = (struct dma_desc *)head;
for (i = 0; i < size; i++) {
@@ -1755,7 +1755,7 @@
seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
i, (unsigned int)virt_to_phys(ep),
le32_to_cpu(ep->basic.des0),
-@@ -2919,7 +2915,6 @@ static void sysfs_display_ring(void *hea
+@@ -2920,7 +2916,6 @@ static void sysfs_display_ring(void *hea
le32_to_cpu(ep->basic.des3));
ep++;
} else {
@@ -1763,7 +1763,7 @@
seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
i, (unsigned int)virt_to_phys(ep),
le32_to_cpu(p->des0), le32_to_cpu(p->des1),
-@@ -2989,7 +2984,7 @@ static int stmmac_sysfs_dma_cap_read(str
+@@ -2990,7 +2985,7 @@ static int stmmac_sysfs_dma_cap_read(str
(priv->dma_cap.hash_filter) ? "Y" : "N");
seq_printf(seq, "\tMultiple MAC address registers: %s\n",
(priv->dma_cap.multi_addr) ? "Y" : "N");
@@ -1772,7 +1772,7 @@
(priv->dma_cap.pcs) ? "Y" : "N");
seq_printf(seq, "\tSMA (MDIO) Interface: %s\n",
(priv->dma_cap.sma_mdio) ? "Y" : "N");
-@@ -3265,44 +3260,8 @@ int stmmac_dvr_probe(struct device *devi
+@@ -3266,44 +3261,8 @@ int stmmac_dvr_probe(struct device *devi
if ((phyaddr >= 0) && (phyaddr <= 31))
priv->plat->phy_addr = phyaddr;
@@ -1819,7 +1819,7 @@
/* Init MAC and get the capabilities */
ret = stmmac_hw_init(priv);
-@@ -3388,10 +3347,6 @@ error_netdev_register:
+@@ -3389,10 +3348,6 @@ error_netdev_register:
error_mdio_register:
netif_napi_del(&priv->napi);
error_hw_init:
@@ -1830,7 +1830,7 @@
free_netdev(ndev);
return ret;
-@@ -3417,10 +3372,10 @@ int stmmac_dvr_remove(struct device *dev
+@@ -3418,10 +3373,10 @@ int stmmac_dvr_remove(struct device *dev
stmmac_set_mac(priv->ioaddr, false);
netif_carrier_off(ndev);
unregister_netdev(ndev);
@@ -1845,7 +1845,7 @@
if (priv->hw->pcs != STMMAC_PCS_RGMII &&
priv->hw->pcs != STMMAC_PCS_TBI &&
priv->hw->pcs != STMMAC_PCS_RTBI)
-@@ -3469,14 +3424,14 @@ int stmmac_suspend(struct device *dev)
+@@ -3470,14 +3425,14 @@ int stmmac_suspend(struct device *dev)
stmmac_set_mac(priv->ioaddr, false);
pinctrl_pm_select_sleep_state(priv->device);
/* Disable clock in case of PWM is off */
@@ -1864,7 +1864,7 @@
return 0;
}
EXPORT_SYMBOL_GPL(stmmac_suspend);
-@@ -3509,9 +3464,9 @@ int stmmac_resume(struct device *dev)
+@@ -3510,9 +3465,9 @@ int stmmac_resume(struct device *dev)
priv->irq_wake = 0;
} else {
pinctrl_pm_select_default_state(priv->device);
diff --git a/target/linux/sunxi/patches-4.9/0052-stmmac-form-4-12.patch b/target/linux/sunxi/patches-4.9/0052-stmmac-form-4-12.patch
index 285e4d2762..18f58e947c 100644
--- a/target/linux/sunxi/patches-4.9/0052-stmmac-form-4-12.patch
+++ b/target/linux/sunxi/patches-4.9/0052-stmmac-form-4-12.patch
@@ -4523,7 +4523,7 @@
/**
* stmmac_open - open entry point of the driver
* @dev : pointer to the device structure.
-@@ -1821,7 +2598,7 @@ static int stmmac_open(struct net_device
+@@ -1822,7 +2599,7 @@ static int stmmac_open(struct net_device
netdev_err(priv->dev,
"%s: ERROR: allocating the IRQ %d (error: %d)\n",
__func__, dev->irq, ret);
@@ -4532,7 +4532,7 @@
}
/* Request the Wake IRQ in case of another line is used for WoL */
-@@ -1848,8 +2625,8 @@ static int stmmac_open(struct net_device
+@@ -1849,8 +2626,8 @@ static int stmmac_open(struct net_device
}
}
@@ -4543,7 +4543,7 @@
return 0;
-@@ -1858,7 +2635,12 @@ lpiirq_error:
+@@ -1859,7 +2636,12 @@ lpiirq_error:
free_irq(priv->wol_irq, dev);
wolirq_error:
free_irq(dev->irq, dev);
@@ -4556,7 +4556,7 @@
init_error:
free_dma_desc_resources(priv);
dma_desc_error:
-@@ -1887,9 +2669,9 @@ static int stmmac_release(struct net_dev
+@@ -1888,9 +2670,9 @@ static int stmmac_release(struct net_dev
phy_disconnect(dev->phydev);
}
@@ -4568,7 +4568,7 @@
del_timer_sync(&priv->txtimer);
-@@ -1901,14 +2683,13 @@ static int stmmac_release(struct net_dev
+@@ -1902,14 +2684,13 @@ static int stmmac_release(struct net_dev
free_irq(priv->lpi_irq, dev);
/* Stop TX/RX DMA and clear the descriptors */
@@ -4585,7 +4585,7 @@
netif_carrier_off(dev);
-@@ -1927,22 +2708,24 @@ static int stmmac_release(struct net_dev
+@@ -1928,22 +2709,24 @@ static int stmmac_release(struct net_dev
* @des: buffer start address
* @total_len: total length to fill in descriptors
* @last_segmant: condition for the last descriptor
@@ -4614,7 +4614,7 @@
desc->des0 = cpu_to_le32(des + (total_len - tmp_len));
buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ?
-@@ -1950,7 +2733,7 @@ static void stmmac_tso_allocator(struct
+@@ -1951,7 +2734,7 @@ static void stmmac_tso_allocator(struct
priv->hw->desc->prepare_tso_tx_desc(desc, 0, buff_size,
0, 1,
@@ -4623,7 +4623,7 @@
0, 0);
tmp_len -= TSO_MAX_BUFF_SIZE;
-@@ -1986,23 +2769,28 @@ static void stmmac_tso_allocator(struct
+@@ -1987,23 +2770,28 @@ static void stmmac_tso_allocator(struct
*/
static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
{
@@ -4658,7 +4658,7 @@
/* This is a hard error, log it. */
netdev_err(priv->dev,
"%s: Tx Ring full when queue awake\n",
-@@ -2017,10 +2805,10 @@ static netdev_tx_t stmmac_tso_xmit(struc
+@@ -2018,10 +2806,10 @@ static netdev_tx_t stmmac_tso_xmit(struc
/* set new MSS value if needed */
if (mss != priv->mss) {
@@ -4671,7 +4671,7 @@
}
if (netif_msg_tx_queued(priv)) {
-@@ -2030,9 +2818,9 @@ static netdev_tx_t stmmac_tso_xmit(struc
+@@ -2031,9 +2819,9 @@ static netdev_tx_t stmmac_tso_xmit(struc
skb->data_len);
}
@@ -4683,7 +4683,7 @@
first = desc;
/* first descriptor: fill Headers on Buf1 */
-@@ -2041,9 +2829,8 @@ static netdev_tx_t stmmac_tso_xmit(struc
+@@ -2042,9 +2830,8 @@ static netdev_tx_t stmmac_tso_xmit(struc
if (dma_mapping_error(priv->device, des))
goto dma_map_err;
@@ -4695,7 +4695,7 @@
first->des0 = cpu_to_le32(des);
-@@ -2054,7 +2841,7 @@ static netdev_tx_t stmmac_tso_xmit(struc
+@@ -2055,7 +2842,7 @@ static netdev_tx_t stmmac_tso_xmit(struc
/* If needed take extra descriptors to fill the remaining payload */
tmp_pay_len = pay_len - TSO_MAX_BUFF_SIZE;
@@ -4704,7 +4704,7 @@
/* Prepare fragments */
for (i = 0; i < nfrags; i++) {
-@@ -2063,24 +2850,34 @@ static netdev_tx_t stmmac_tso_xmit(struc
+@@ -2064,24 +2851,34 @@ static netdev_tx_t stmmac_tso_xmit(struc
des = skb_frag_dma_map(priv->device, frag, 0,
skb_frag_size(frag),
DMA_TO_DEVICE);
@@ -4748,7 +4748,7 @@
}
dev->stats.tx_bytes += skb->len;
-@@ -2112,7 +2909,7 @@ static netdev_tx_t stmmac_tso_xmit(struc
+@@ -2113,7 +2910,7 @@ static netdev_tx_t stmmac_tso_xmit(struc
priv->hw->desc->prepare_tso_tx_desc(first, 1,
proto_hdr_len,
pay_len,
@@ -4757,7 +4757,7 @@
tcp_hdrlen(skb) / 4, (skb->len - proto_hdr_len));
/* If context desc is used to change MSS */
-@@ -2127,20 +2924,20 @@ static netdev_tx_t stmmac_tso_xmit(struc
+@@ -2128,20 +2925,20 @@ static netdev_tx_t stmmac_tso_xmit(struc
if (netif_msg_pktdata(priv)) {
pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n",
@@ -4784,7 +4784,7 @@
return NETDEV_TX_OK;
-@@ -2164,21 +2961,27 @@ static netdev_tx_t stmmac_xmit(struct sk
+@@ -2165,21 +2962,27 @@ static netdev_tx_t stmmac_xmit(struct sk
struct stmmac_priv *priv = netdev_priv(dev);
unsigned int nopaged_len = skb_headlen(skb);
int i, csum_insertion = 0, is_jumbo = 0;
@@ -4816,7 +4816,7 @@
/* This is a hard error, log it. */
netdev_err(priv->dev,
"%s: Tx Ring full when queue awake\n",
-@@ -2190,20 +2993,18 @@ static netdev_tx_t stmmac_xmit(struct sk
+@@ -2191,20 +2994,18 @@ static netdev_tx_t stmmac_xmit(struct sk
if (priv->tx_path_in_lpi_mode)
stmmac_disable_eee_mode(priv);
@@ -4840,7 +4840,7 @@
enh_desc = priv->plat->enh_desc;
/* To program the descriptors according to the size of the frame */
if (enh_desc)
-@@ -2211,7 +3012,7 @@ static netdev_tx_t stmmac_xmit(struct sk
+@@ -2212,7 +3013,7 @@ static netdev_tx_t stmmac_xmit(struct sk
if (unlikely(is_jumbo) && likely(priv->synopsys_id <
DWMAC_CORE_4_00)) {
@@ -4849,7 +4849,7 @@
if (unlikely(entry < 0))
goto dma_map_err;
}
-@@ -2224,48 +3025,56 @@ static netdev_tx_t stmmac_xmit(struct sk
+@@ -2225,48 +3026,56 @@ static netdev_tx_t stmmac_xmit(struct sk
entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
if (likely(priv->extend_desc))
@@ -4919,7 +4919,7 @@
priv->hw->desc->display_ring(tx_head, DMA_TX_SIZE, false);
-@@ -2273,10 +3082,10 @@ static netdev_tx_t stmmac_xmit(struct sk
+@@ -2274,10 +3083,10 @@ static netdev_tx_t stmmac_xmit(struct sk
print_pkt(skb->data, skb->len);
}
@@ -4932,7 +4932,7 @@
}
dev->stats.tx_bytes += skb->len;
-@@ -2311,14 +3120,14 @@ static netdev_tx_t stmmac_xmit(struct sk
+@@ -2312,14 +3121,14 @@ static netdev_tx_t stmmac_xmit(struct sk
if (dma_mapping_error(priv->device, des))
goto dma_map_err;
@@ -4950,7 +4950,7 @@
if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
priv->hwts_tx_en)) {
-@@ -2330,7 +3139,7 @@ static netdev_tx_t stmmac_xmit(struct sk
+@@ -2331,7 +3140,7 @@ static netdev_tx_t stmmac_xmit(struct sk
/* Prepare the first descriptor setting the OWN bit too */
priv->hw->desc->prepare_tx_desc(first, 1, nopaged_len,
csum_insertion, priv->mode, 1,
@@ -4959,7 +4959,7 @@
/* The own bit must be the latest setting done when prepare the
* descriptor and then barrier is needed to make sure that
-@@ -2339,13 +3148,13 @@ static netdev_tx_t stmmac_xmit(struct sk
+@@ -2340,13 +3149,13 @@ static netdev_tx_t stmmac_xmit(struct sk
dma_wmb();
}
@@ -4976,7 +4976,7 @@
return NETDEV_TX_OK;
-@@ -2373,9 +3182,9 @@ static void stmmac_rx_vlan(struct net_de
+@@ -2374,9 +3183,9 @@ static void stmmac_rx_vlan(struct net_de
}
@@ -4988,7 +4988,7 @@
return 0;
return 1;
-@@ -2384,30 +3193,33 @@ static inline int stmmac_rx_threshold_co
+@@ -2385,30 +3194,33 @@ static inline int stmmac_rx_threshold_co
/**
* stmmac_rx_refill - refill used skb preallocated buffers
* @priv: driver private structure
@@ -5029,7 +5029,7 @@
if (unlikely(net_ratelimit()))
dev_err(priv->device,
"fail to alloc skb entry %d\n",
-@@ -2415,28 +3227,28 @@ static inline void stmmac_rx_refill(stru
+@@ -2416,28 +3228,28 @@ static inline void stmmac_rx_refill(stru
break;
}
@@ -5066,7 +5066,7 @@
netif_dbg(priv, rx_status, priv->dev,
"refill entry #%d\n", entry);
-@@ -2452,31 +3264,33 @@ static inline void stmmac_rx_refill(stru
+@@ -2453,31 +3265,33 @@ static inline void stmmac_rx_refill(stru
entry = STMMAC_GET_ENTRY(entry, DMA_RX_SIZE);
}
@@ -5107,7 +5107,7 @@
priv->hw->desc->display_ring(rx_head, DMA_RX_SIZE, true);
}
-@@ -2486,9 +3300,9 @@ static int stmmac_rx(struct stmmac_priv
+@@ -2487,9 +3301,9 @@ static int stmmac_rx(struct stmmac_priv
struct dma_desc *np;
if (priv->extend_desc)
@@ -5119,7 +5119,7 @@
/* read the status of the incoming frame */
status = priv->hw->desc->rx_status(&priv->dev->stats,
-@@ -2499,20 +3313,20 @@ static int stmmac_rx(struct stmmac_priv
+@@ -2500,20 +3314,20 @@ static int stmmac_rx(struct stmmac_priv
count++;
@@ -5145,7 +5145,7 @@
entry);
if (unlikely(status == discard_frame)) {
priv->dev->stats.rx_errors++;
-@@ -2522,9 +3336,9 @@ static int stmmac_rx(struct stmmac_priv
+@@ -2523,9 +3337,9 @@ static int stmmac_rx(struct stmmac_priv
* them in stmmac_rx_refill() function so that
* device can reuse it.
*/
@@ -5157,7 +5157,7 @@
priv->dma_buf_sz,
DMA_FROM_DEVICE);
}
-@@ -2572,7 +3386,7 @@ static int stmmac_rx(struct stmmac_priv
+@@ -2573,7 +3387,7 @@ static int stmmac_rx(struct stmmac_priv
*/
if (unlikely(!priv->plat->has_gmac4 &&
((frame_len < priv->rx_copybreak) ||
@@ -5166,7 +5166,7 @@
skb = netdev_alloc_skb_ip_align(priv->dev,
frame_len);
if (unlikely(!skb)) {
-@@ -2584,21 +3398,21 @@ static int stmmac_rx(struct stmmac_priv
+@@ -2585,21 +3399,21 @@ static int stmmac_rx(struct stmmac_priv
}
dma_sync_single_for_cpu(priv->device,
@@ -5192,7 +5192,7 @@
if (unlikely(!skb)) {
netdev_err(priv->dev,
"%s: Inconsistent Rx chain\n",
-@@ -2607,12 +3421,12 @@ static int stmmac_rx(struct stmmac_priv
+@@ -2608,12 +3422,12 @@ static int stmmac_rx(struct stmmac_priv
break;
}
prefetch(skb->data - NET_IP_ALIGN);
@@ -5208,7 +5208,7 @@
priv->dma_buf_sz,
DMA_FROM_DEVICE);
}
-@@ -2634,7 +3448,7 @@ static int stmmac_rx(struct stmmac_priv
+@@ -2635,7 +3449,7 @@ static int stmmac_rx(struct stmmac_priv
else
skb->ip_summed = CHECKSUM_UNNECESSARY;
@@ -5217,7 +5217,7 @@
priv->dev->stats.rx_packets++;
priv->dev->stats.rx_bytes += frame_len;
-@@ -2642,7 +3456,7 @@ static int stmmac_rx(struct stmmac_priv
+@@ -2643,7 +3457,7 @@ static int stmmac_rx(struct stmmac_priv
entry = next_entry;
}
@@ -5226,7 +5226,7 @@
priv->xstats.rx_pkt_n += count;
-@@ -2659,16 +3473,24 @@ static int stmmac_rx(struct stmmac_priv
+@@ -2660,16 +3474,24 @@ static int stmmac_rx(struct stmmac_priv
*/
static int stmmac_poll(struct napi_struct *napi, int budget)
{
@@ -5255,7 +5255,7 @@
}
return work_done;
}
-@@ -2684,9 +3506,12 @@ static int stmmac_poll(struct napi_struc
+@@ -2685,9 +3507,12 @@ static int stmmac_poll(struct napi_struc
static void stmmac_tx_timeout(struct net_device *dev)
{
struct stmmac_priv *priv = netdev_priv(dev);
@@ -5269,7 +5269,7 @@
}
/**
-@@ -2809,6 +3634,12 @@ static irqreturn_t stmmac_interrupt(int
+@@ -2810,6 +3635,12 @@ static irqreturn_t stmmac_interrupt(int
{
struct net_device *dev = (struct net_device *)dev_id;
struct stmmac_priv *priv = netdev_priv(dev);
@@ -5282,7 +5282,7 @@
if (priv->irq_wake)
pm_wakeup_event(priv->device, 0);
-@@ -2822,16 +3653,30 @@ static irqreturn_t stmmac_interrupt(int
+@@ -2823,16 +3654,30 @@ static irqreturn_t stmmac_interrupt(int
if ((priv->plat->has_gmac) || (priv->plat->has_gmac4)) {
int status = priv->hw->mac->host_irq_status(priv->hw,
&priv->xstats);
@@ -5317,7 +5317,7 @@
}
/* PCS link status */
-@@ -2916,7 +3761,7 @@ static void sysfs_display_ring(void *hea
+@@ -2917,7 +3762,7 @@ static void sysfs_display_ring(void *hea
ep++;
} else {
seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
@@ -5326,7 +5326,7 @@
le32_to_cpu(p->des0), le32_to_cpu(p->des1),
le32_to_cpu(p->des2), le32_to_cpu(p->des3));
p++;
-@@ -2929,17 +3774,40 @@ static int stmmac_sysfs_ring_read(struct
+@@ -2930,17 +3775,40 @@ static int stmmac_sysfs_ring_read(struct
{
struct net_device *dev = seq->private;
struct stmmac_priv *priv = netdev_priv(dev);
@@ -5377,7 +5377,7 @@
}
return 0;
-@@ -3222,11 +4090,14 @@ int stmmac_dvr_probe(struct device *devi
+@@ -3223,11 +4091,14 @@ int stmmac_dvr_probe(struct device *devi
struct plat_stmmacenet_data *plat_dat,
struct stmmac_resources *res)
{
@@ -5394,7 +5394,7 @@
if (!ndev)
return -ENOMEM;
-@@ -3268,6 +4139,10 @@ int stmmac_dvr_probe(struct device *devi
+@@ -3269,6 +4140,10 @@ int stmmac_dvr_probe(struct device *devi
if (ret)
goto error_hw_init;
@@ -5405,7 +5405,7 @@
ndev->netdev_ops = &stmmac_netdev_ops;
ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
-@@ -3300,7 +4175,12 @@ int stmmac_dvr_probe(struct device *devi
+@@ -3301,7 +4176,12 @@ int stmmac_dvr_probe(struct device *devi
"Enable RX Mitigation via HW Watchdog Timer\n");
}
@@ -5419,7 +5419,7 @@
spin_lock_init(&priv->lock);
-@@ -3345,7 +4225,11 @@ error_netdev_register:
+@@ -3346,7 +4226,11 @@ error_netdev_register:
priv->hw->pcs != STMMAC_PCS_RTBI)
stmmac_mdio_unregister(ndev);
error_mdio_register:
@@ -5432,7 +5432,7 @@
error_hw_init:
free_netdev(ndev);
-@@ -3366,10 +4250,9 @@ int stmmac_dvr_remove(struct device *dev
+@@ -3367,10 +4251,9 @@ int stmmac_dvr_remove(struct device *dev
netdev_info(priv->dev, "%s: removing driver", __func__);
@@ -5445,7 +5445,7 @@
netif_carrier_off(ndev);
unregister_netdev(ndev);
if (priv->plat->stmmac_rst)
-@@ -3408,20 +4291,19 @@ int stmmac_suspend(struct device *dev)
+@@ -3409,20 +4292,19 @@ int stmmac_suspend(struct device *dev)
spin_lock_irqsave(&priv->lock, flags);
netif_device_detach(ndev);
@@ -5470,7 +5470,7 @@
pinctrl_pm_select_sleep_state(priv->device);
/* Disable clock in case of PWM is off */
clk_disable(priv->plat->pclk);
-@@ -3437,6 +4319,31 @@ int stmmac_suspend(struct device *dev)
+@@ -3438,6 +4320,31 @@ int stmmac_suspend(struct device *dev)
EXPORT_SYMBOL_GPL(stmmac_suspend);
/**
@@ -5502,7 +5502,7 @@
* stmmac_resume - resume callback
* @dev: device pointer
* Description: when resume this function is invoked to setup the DMA and CORE
-@@ -3476,10 +4383,8 @@ int stmmac_resume(struct device *dev)
+@@ -3477,10 +4384,8 @@ int stmmac_resume(struct device *dev)
spin_lock_irqsave(&priv->lock, flags);
@@ -5515,7 +5515,7 @@
/* reset private mss value to force mss context settings at
* next tso xmit (only used for gmac4).
*/
-@@ -3491,9 +4396,9 @@ int stmmac_resume(struct device *dev)
+@@ -3492,9 +4397,9 @@ int stmmac_resume(struct device *dev)
stmmac_init_tx_coalesce(priv);
stmmac_set_rx_mode(ndev);
diff --git a/target/linux/sunxi/patches-4.9/0053-stmmac-form-4-13.patch b/target/linux/sunxi/patches-4.9/0053-stmmac-form-4-13.patch
index f829b79dc3..161e9f0e6c 100644
--- a/target/linux/sunxi/patches-4.9/0053-stmmac-form-4-13.patch
+++ b/target/linux/sunxi/patches-4.9/0053-stmmac-form-4-13.patch
@@ -1491,7 +1491,7 @@
free_dma_tx_desc_resources(priv);
return ret;
-@@ -2895,8 +2893,7 @@ static netdev_tx_t stmmac_tso_xmit(struc
+@@ -2896,8 +2894,7 @@ static netdev_tx_t stmmac_tso_xmit(struc
priv->xstats.tx_set_ic_bit++;
}
@@ -1501,7 +1501,7 @@
if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
priv->hwts_tx_en)) {
-@@ -2974,7 +2971,7 @@ static netdev_tx_t stmmac_xmit(struct sk
+@@ -2975,7 +2972,7 @@ static netdev_tx_t stmmac_xmit(struct sk
/* Manage oversized TCP frames for GMAC4 device */
if (skb_is_gso(skb) && priv->tso) {
@@ -1510,7 +1510,7 @@
return stmmac_tso_xmit(skb, dev);
}
-@@ -3105,8 +3102,7 @@ static netdev_tx_t stmmac_xmit(struct sk
+@@ -3106,8 +3103,7 @@ static netdev_tx_t stmmac_xmit(struct sk
priv->xstats.tx_set_ic_bit++;
}
@@ -1520,7 +1520,7 @@
/* Ready to fill the first descriptor and set the OWN bit w/o any
* problems because all the descriptors are actually ready to be
-@@ -3983,7 +3979,9 @@ static int stmmac_hw_init(struct stmmac_
+@@ -3984,7 +3980,9 @@ static int stmmac_hw_init(struct stmmac_
struct mac_device_info *mac;
/* Identify the MAC HW device */
@@ -1531,7 +1531,7 @@
priv->dev->priv_flags |= IFF_UNICAST_FLT;
mac = dwmac1000_setup(priv->ioaddr,
priv->plat->multicast_filter_bins,
-@@ -4003,6 +4001,10 @@ static int stmmac_hw_init(struct stmmac_
+@@ -4004,6 +4002,10 @@ static int stmmac_hw_init(struct stmmac_
priv->hw = mac;
@@ -1542,7 +1542,7 @@
/* To use the chained or ring mode */
if (priv->synopsys_id >= DWMAC_CORE_4_00) {
priv->hw->mode = &dwmac4_ring_mode_ops;
-@@ -4131,8 +4133,15 @@ int stmmac_dvr_probe(struct device *devi
+@@ -4132,8 +4134,15 @@ int stmmac_dvr_probe(struct device *devi
if ((phyaddr >= 0) && (phyaddr <= 31))
priv->plat->phy_addr = phyaddr;
@@ -1559,7 +1559,7 @@
/* Init MAC and get the capabilities */
ret = stmmac_hw_init(priv);
-@@ -4149,7 +4158,7 @@ int stmmac_dvr_probe(struct device *devi
+@@ -4150,7 +4159,7 @@ int stmmac_dvr_probe(struct device *devi
NETIF_F_RXCSUM;
if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
@@ -1568,7 +1568,7 @@
priv->tso = true;
dev_info(priv->device, "TSO feature enabled\n");
}
-@@ -4311,7 +4320,7 @@ int stmmac_suspend(struct device *dev)
+@@ -4312,7 +4321,7 @@ int stmmac_suspend(struct device *dev)
}
spin_unlock_irqrestore(&priv->lock, flags);