diff options
author | Kevin Darbyshire-Bryant <ldir@darbyshire-bryant.me.uk> | 2018-01-18 13:51:13 +0000 |
---|---|---|
committer | Hauke Mehrtens <hauke@hauke-m.de> | 2018-01-20 20:22:01 +0100 |
commit | d8565a06dc01b55ed1018d571e655c122b9d2a33 (patch) | |
tree | 5f77a07078f849c8077162f38ac69a0af17d82a6 /target/linux/sunxi/patches-4.9 | |
parent | 9ddfac80155b899d1d2eca97dfae5a9e551e9abc (diff) | |
download | upstream-d8565a06dc01b55ed1018d571e655c122b9d2a33.tar.gz upstream-d8565a06dc01b55ed1018d571e655c122b9d2a33.tar.bz2 upstream-d8565a06dc01b55ed1018d571e655c122b9d2a33.zip |
kernel: bump 4.9 to 4.9.77
Refresh patches.
Remove upstreamed patches:
target/linux/generic/backport-4.9/023-2-smsc75xx-use-skb_cow_head-to-deal-with-cloned-skbs.patch
target/linux/generic/backport-4.9/023-3-cx82310_eth-use-skb_cow_head-to-deal-with-cloned-skb.patch
target/linux/generic/backport-4.9/023-4-sr9700-use-skb_cow_head-to-deal-with-cloned-skbs.patch
target/linux/generic/backport-4.9/023-5-lan78xx-use-skb_cow_head-to-deal-with-cloned-skbs.patch
CVEs completely or partially addressed:
CVE-2017-5715
CVE-2017-5753
CVE-2017-17741
CVE-2017-1000410
Compile-tested: ar71xx Archer C7 v2
Run-tested: ar71xx Archer C7 v2
Signed-off-by: Kevin Darbyshire-Bryant <ldir@darbyshire-bryant.me.uk>
Tested-by: Koen Vandeputte <koen.vandeputte@ncentric.com>
Diffstat (limited to 'target/linux/sunxi/patches-4.9')
4 files changed, 200 insertions, 200 deletions
diff --git a/target/linux/sunxi/patches-4.9/0050-stmmac-form-4-10.patch b/target/linux/sunxi/patches-4.9/0050-stmmac-form-4-10.patch index ca2eb3ac8f..69bbb411d8 100644 --- a/target/linux/sunxi/patches-4.9/0050-stmmac-form-4-10.patch +++ b/target/linux/sunxi/patches-4.9/0050-stmmac-form-4-10.patch @@ -2131,9 +2131,9 @@ { + struct net_device *ndev = priv->dev; unsigned long flags; + int interface = priv->plat->interface; bool ret = false; - -@@ -295,7 +297,7 @@ bool stmmac_eee_init(struct stmmac_priv +@@ -301,7 +303,7 @@ bool stmmac_eee_init(struct stmmac_priv int tx_lpi_timer = priv->tx_lpi_timer; /* Check if the PHY supports EEE */ @@ -2142,7 +2142,7 @@ /* To manage at run-time if the EEE cannot be supported * anymore (for example because the lp caps have been * changed). -@@ -303,7 +305,7 @@ bool stmmac_eee_init(struct stmmac_priv +@@ -309,7 +311,7 @@ bool stmmac_eee_init(struct stmmac_priv */ spin_lock_irqsave(&priv->lock, flags); if (priv->eee_active) { @@ -2151,7 +2151,7 @@ del_timer_sync(&priv->eee_ctrl_timer); priv->hw->mac->set_eee_timer(priv->hw, 0, tx_lpi_timer); -@@ -327,12 +329,12 @@ bool stmmac_eee_init(struct stmmac_priv +@@ -333,12 +335,12 @@ bool stmmac_eee_init(struct stmmac_priv tx_lpi_timer); } /* Set HW EEE according to the speed */ @@ -2166,7 +2166,7 @@ } out: return ret; -@@ -450,8 +452,8 @@ static int stmmac_hwtstamp_ioctl(struct +@@ -456,8 +458,8 @@ static int stmmac_hwtstamp_ioctl(struct sizeof(struct hwtstamp_config))) return -EFAULT; @@ -2177,7 +2177,7 @@ /* reserved for future extensions */ if (config.flags) -@@ -697,7 +699,7 @@ static void stmmac_release_ptp(struct st +@@ -703,7 +705,7 @@ static void stmmac_release_ptp(struct st static void stmmac_adjust_link(struct net_device *dev) { struct stmmac_priv *priv = netdev_priv(dev); @@ -2186,7 +2186,7 @@ unsigned long flags; int new_state = 0; unsigned int fc = priv->flow_ctrl, pause_time = priv->pause; -@@ -750,9 +752,9 @@ static void stmmac_adjust_link(struct ne +@@ -756,9 +758,9 @@ static void stmmac_adjust_link(struct ne stmmac_hw_fix_mac_speed(priv); break; default: @@ -2199,7 +2199,7 @@ break; } -@@ -805,10 +807,10 @@ static void stmmac_check_pcs_mode(struct +@@ -811,10 +813,10 @@ static void stmmac_check_pcs_mode(struct (interface == PHY_INTERFACE_MODE_RGMII_ID) || (interface == PHY_INTERFACE_MODE_RGMII_RXID) || (interface == PHY_INTERFACE_MODE_RGMII_TXID)) { @@ -2212,7 +2212,7 @@ priv->hw->pcs = STMMAC_PCS_SGMII; } } -@@ -843,15 +845,15 @@ static int stmmac_init_phy(struct net_de +@@ -849,15 +851,15 @@ static int stmmac_init_phy(struct net_de snprintf(phy_id_fmt, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, bus_id, priv->plat->phy_addr); @@ -2231,7 +2231,7 @@ if (!phydev) return -ENODEV; -@@ -884,10 +886,8 @@ static int stmmac_init_phy(struct net_de +@@ -890,10 +892,8 @@ static int stmmac_init_phy(struct net_de if (phydev->is_pseudo_fixed_link) phydev->irq = PHY_POLL; @@ -2244,7 +2244,7 @@ return 0; } -@@ -973,7 +973,8 @@ static int stmmac_init_rx_buffers(struct +@@ -979,7 +979,8 @@ static int stmmac_init_rx_buffers(struct skb = __netdev_alloc_skb_ip_align(priv->dev, priv->dma_buf_sz, flags); if (!skb) { @@ -2254,7 +2254,7 @@ return -ENOMEM; } priv->rx_skbuff[i] = skb; -@@ -981,15 +982,15 @@ static int stmmac_init_rx_buffers(struct +@@ -987,15 +988,15 @@ static int stmmac_init_rx_buffers(struct priv->dma_buf_sz, DMA_FROM_DEVICE); if (dma_mapping_error(priv->device, priv->rx_skbuff_dma[i])) { @@ -2273,7 +2273,7 @@ if ((priv->hw->mode->init_desc3) && (priv->dma_buf_sz == BUF_SIZE_16KiB)) -@@ -1031,13 +1032,14 @@ static int init_dma_desc_rings(struct ne +@@ -1037,13 +1038,14 @@ static int init_dma_desc_rings(struct ne priv->dma_buf_sz = bfsize; @@ -2294,7 +2294,7 @@ for (i = 0; i < DMA_RX_SIZE; i++) { struct dma_desc *p; if (priv->extend_desc) -@@ -1049,10 +1051,9 @@ static int init_dma_desc_rings(struct ne +@@ -1055,10 +1057,9 @@ static int init_dma_desc_rings(struct ne if (ret) goto err_init_rx_buffers; @@ -2308,7 +2308,7 @@ } priv->cur_rx = 0; priv->dirty_rx = (unsigned int)(i - DMA_RX_SIZE); -@@ -1307,7 +1308,7 @@ static void stmmac_tx_clean(struct stmma +@@ -1313,7 +1314,7 @@ static void stmmac_tx_clean(struct stmma unsigned int bytes_compl = 0, pkts_compl = 0; unsigned int entry = priv->dirty_tx; @@ -2317,7 +2317,7 @@ priv->xstats.tx_clean++; -@@ -1378,22 +1379,17 @@ static void stmmac_tx_clean(struct stmma +@@ -1384,22 +1385,17 @@ static void stmmac_tx_clean(struct stmma netdev_completed_queue(priv->dev, pkts_compl, bytes_compl); if (unlikely(netif_queue_stopped(priv->dev) && @@ -2345,7 +2345,7 @@ } static inline void stmmac_enable_dma_irq(struct stmmac_priv *priv) -@@ -1497,7 +1493,7 @@ static void stmmac_mmc_setup(struct stmm +@@ -1503,7 +1499,7 @@ static void stmmac_mmc_setup(struct stmm dwmac_mmc_ctrl(priv->mmcaddr, mode); memset(&priv->mmc, 0, sizeof(struct stmmac_counters)); } else @@ -2354,7 +2354,7 @@ } /** -@@ -1510,18 +1506,18 @@ static void stmmac_mmc_setup(struct stmm +@@ -1516,18 +1512,18 @@ static void stmmac_mmc_setup(struct stmm static void stmmac_selec_desc_mode(struct stmmac_priv *priv) { if (priv->plat->enh_desc) { @@ -2377,7 +2377,7 @@ priv->hw->desc = &ndesc_ops; } } -@@ -1562,8 +1558,8 @@ static void stmmac_check_ether_addr(stru +@@ -1568,8 +1564,8 @@ static void stmmac_check_ether_addr(stru priv->dev->dev_addr, 0); if (!is_valid_ether_addr(priv->dev->dev_addr)) eth_hw_addr_random(priv->dev); @@ -2388,7 +2388,7 @@ } } -@@ -1577,16 +1573,12 @@ static void stmmac_check_ether_addr(stru +@@ -1583,16 +1579,12 @@ static void stmmac_check_ether_addr(stru */ static int stmmac_init_dma_engine(struct stmmac_priv *priv) { @@ -2408,7 +2408,7 @@ } if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE)) -@@ -1598,8 +1590,8 @@ static int stmmac_init_dma_engine(struct +@@ -1604,8 +1596,8 @@ static int stmmac_init_dma_engine(struct return ret; } @@ -2419,7 +2419,7 @@ if (priv->synopsys_id >= DWMAC_CORE_4_00) { priv->rx_tail_addr = priv->dma_rx_phy + -@@ -1671,7 +1663,8 @@ static int stmmac_hw_setup(struct net_de +@@ -1677,7 +1669,8 @@ static int stmmac_hw_setup(struct net_de /* DMA initialization and SW reset */ ret = stmmac_init_dma_engine(priv); if (ret < 0) { @@ -2429,7 +2429,7 @@ return ret; } -@@ -1700,7 +1693,7 @@ static int stmmac_hw_setup(struct net_de +@@ -1706,7 +1699,7 @@ static int stmmac_hw_setup(struct net_de ret = priv->hw->mac->rx_ipc(priv->hw); if (!ret) { @@ -2438,7 +2438,7 @@ priv->plat->rx_coe = STMMAC_RX_COE_NONE; priv->hw->rx_csum = 0; } -@@ -1725,10 +1718,11 @@ static int stmmac_hw_setup(struct net_de +@@ -1731,10 +1724,11 @@ static int stmmac_hw_setup(struct net_de #ifdef CONFIG_DEBUG_FS ret = stmmac_init_fs(dev); if (ret < 0) @@ -2452,7 +2452,7 @@ priv->hw->dma->start_tx(priv->ioaddr); priv->hw->dma->start_rx(priv->ioaddr); -@@ -1783,8 +1777,9 @@ static int stmmac_open(struct net_device +@@ -1789,8 +1783,9 @@ static int stmmac_open(struct net_device priv->hw->pcs != STMMAC_PCS_RTBI) { ret = stmmac_init_phy(dev); if (ret) { @@ -2464,7 +2464,7 @@ return ret; } } -@@ -1799,33 +1794,36 @@ static int stmmac_open(struct net_device +@@ -1805,33 +1800,36 @@ static int stmmac_open(struct net_device ret = alloc_dma_desc_resources(priv); if (ret < 0) { @@ -2508,7 +2508,7 @@ goto init_error; } -@@ -1834,8 +1832,9 @@ static int stmmac_open(struct net_device +@@ -1840,8 +1838,9 @@ static int stmmac_open(struct net_device ret = request_irq(priv->wol_irq, stmmac_interrupt, IRQF_SHARED, dev->name, dev); if (unlikely(ret < 0)) { @@ -2520,7 +2520,7 @@ goto wolirq_error; } } -@@ -1845,8 +1844,9 @@ static int stmmac_open(struct net_device +@@ -1851,8 +1850,9 @@ static int stmmac_open(struct net_device ret = request_irq(priv->lpi_irq, stmmac_interrupt, IRQF_SHARED, dev->name, dev); if (unlikely(ret < 0)) { @@ -2532,7 +2532,7 @@ goto lpiirq_error; } } -@@ -1865,8 +1865,8 @@ wolirq_error: +@@ -1871,8 +1871,8 @@ wolirq_error: init_error: free_dma_desc_resources(priv); dma_desc_error: @@ -2543,7 +2543,7 @@ return ret; } -@@ -1885,10 +1885,9 @@ static int stmmac_release(struct net_dev +@@ -1891,10 +1891,9 @@ static int stmmac_release(struct net_dev del_timer_sync(&priv->eee_ctrl_timer); /* Stop and disconnect the PHY */ @@ -2557,7 +2557,7 @@ } netif_stop_queue(dev); -@@ -1948,13 +1947,13 @@ static void stmmac_tso_allocator(struct +@@ -1954,13 +1953,13 @@ static void stmmac_tso_allocator(struct priv->cur_tx = STMMAC_GET_ENTRY(priv->cur_tx, DMA_TX_SIZE); desc = priv->dma_tx + priv->cur_tx; @@ -2573,7 +2573,7 @@ 0, 0); tmp_len -= TSO_MAX_BUFF_SIZE; -@@ -1999,8 +1998,6 @@ static netdev_tx_t stmmac_tso_xmit(struc +@@ -2005,8 +2004,6 @@ static netdev_tx_t stmmac_tso_xmit(struc u8 proto_hdr_len; int i; @@ -2582,7 +2582,7 @@ /* Compute header lengths */ proto_hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); -@@ -2010,9 +2007,10 @@ static netdev_tx_t stmmac_tso_xmit(struc +@@ -2016,9 +2013,10 @@ static netdev_tx_t stmmac_tso_xmit(struc if (!netif_queue_stopped(dev)) { netif_stop_queue(dev); /* This is a hard error, log it. */ @@ -2595,7 +2595,7 @@ return NETDEV_TX_BUSY; } -@@ -2050,11 +2048,11 @@ static netdev_tx_t stmmac_tso_xmit(struc +@@ -2056,11 +2054,11 @@ static netdev_tx_t stmmac_tso_xmit(struc priv->tx_skbuff_dma[first_entry].len = skb_headlen(skb); priv->tx_skbuff[first_entry] = skb; @@ -2609,7 +2609,7 @@ /* If needed take extra descriptors to fill the remaining payload */ tmp_pay_len = pay_len - TSO_MAX_BUFF_SIZE; -@@ -2083,8 +2081,8 @@ static netdev_tx_t stmmac_tso_xmit(struc +@@ -2089,8 +2087,8 @@ static netdev_tx_t stmmac_tso_xmit(struc priv->cur_tx = STMMAC_GET_ENTRY(priv->cur_tx, DMA_TX_SIZE); if (unlikely(stmmac_tx_avail(priv) <= (MAX_SKB_FRAGS + 1))) { @@ -2620,7 +2620,7 @@ netif_stop_queue(dev); } -@@ -2128,7 +2126,7 @@ static netdev_tx_t stmmac_tso_xmit(struc +@@ -2134,7 +2132,7 @@ static netdev_tx_t stmmac_tso_xmit(struc * descriptor and then barrier is needed to make sure that * all is coherent before granting the DMA engine. */ @@ -2629,7 +2629,7 @@ if (netif_msg_pktdata(priv)) { pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n", -@@ -2147,11 +2145,9 @@ static netdev_tx_t stmmac_tso_xmit(struc +@@ -2153,11 +2151,9 @@ static netdev_tx_t stmmac_tso_xmit(struc priv->hw->dma->set_tx_tail_ptr(priv->ioaddr, priv->tx_tail_addr, STMMAC_CHAN0); @@ -2641,7 +2641,7 @@ dev_err(priv->device, "Tx dma map failed\n"); dev_kfree_skb(skb); priv->dev->stats.tx_dropped++; -@@ -2183,14 +2179,13 @@ static netdev_tx_t stmmac_xmit(struct sk +@@ -2189,14 +2185,13 @@ static netdev_tx_t stmmac_xmit(struct sk return stmmac_tso_xmit(skb, dev); } @@ -2659,7 +2659,7 @@ } return NETDEV_TX_BUSY; } -@@ -2243,13 +2238,11 @@ static netdev_tx_t stmmac_xmit(struct sk +@@ -2249,13 +2244,11 @@ static netdev_tx_t stmmac_xmit(struct sk priv->tx_skbuff[entry] = NULL; @@ -2678,7 +2678,7 @@ priv->tx_skbuff_dma[entry].map_as_page = true; priv->tx_skbuff_dma[entry].len = len; -@@ -2267,9 +2260,10 @@ static netdev_tx_t stmmac_xmit(struct sk +@@ -2273,9 +2266,10 @@ static netdev_tx_t stmmac_xmit(struct sk if (netif_msg_pktdata(priv)) { void *tx_head; @@ -2692,7 +2692,7 @@ if (priv->extend_desc) tx_head = (void *)priv->dma_etx; -@@ -2278,13 +2272,13 @@ static netdev_tx_t stmmac_xmit(struct sk +@@ -2284,13 +2278,13 @@ static netdev_tx_t stmmac_xmit(struct sk priv->hw->desc->display_ring(tx_head, DMA_TX_SIZE, false); @@ -2709,7 +2709,7 @@ netif_stop_queue(dev); } -@@ -2320,13 +2314,11 @@ static netdev_tx_t stmmac_xmit(struct sk +@@ -2326,13 +2320,11 @@ static netdev_tx_t stmmac_xmit(struct sk if (dma_mapping_error(priv->device, des)) goto dma_map_err; @@ -2728,7 +2728,7 @@ priv->tx_skbuff_dma[first_entry].len = nopaged_len; priv->tx_skbuff_dma[first_entry].last_segment = last_segment; -@@ -2347,7 +2339,7 @@ static netdev_tx_t stmmac_xmit(struct sk +@@ -2353,7 +2345,7 @@ static netdev_tx_t stmmac_xmit(struct sk * descriptor and then barrier is needed to make sure that * all is coherent before granting the DMA engine. */ @@ -2737,7 +2737,7 @@ } netdev_sent_queue(dev, skb->len); -@@ -2358,12 +2350,10 @@ static netdev_tx_t stmmac_xmit(struct sk +@@ -2364,12 +2356,10 @@ static netdev_tx_t stmmac_xmit(struct sk priv->hw->dma->set_tx_tail_ptr(priv->ioaddr, priv->tx_tail_addr, STMMAC_CHAN0); @@ -2751,7 +2751,7 @@ dev_kfree_skb(skb); priv->dev->stats.tx_dropped++; return NETDEV_TX_OK; -@@ -2434,16 +2424,16 @@ static inline void stmmac_rx_refill(stru +@@ -2440,16 +2430,16 @@ static inline void stmmac_rx_refill(stru DMA_FROM_DEVICE); if (dma_mapping_error(priv->device, priv->rx_skbuff_dma[entry])) { @@ -2771,7 +2771,7 @@ } if (priv->hw->mode->refill_desc3) priv->hw->mode->refill_desc3(priv, p); -@@ -2451,17 +2441,17 @@ static inline void stmmac_rx_refill(stru +@@ -2457,17 +2447,17 @@ static inline void stmmac_rx_refill(stru if (priv->rx_zeroc_thresh > 0) priv->rx_zeroc_thresh--; @@ -2793,7 +2793,7 @@ entry = STMMAC_GET_ENTRY(entry, DMA_RX_SIZE); } -@@ -2485,7 +2475,7 @@ static int stmmac_rx(struct stmmac_priv +@@ -2491,7 +2481,7 @@ static int stmmac_rx(struct stmmac_priv if (netif_msg_rx_status(priv)) { void *rx_head; @@ -2802,7 +2802,7 @@ if (priv->extend_desc) rx_head = (void *)priv->dma_erx; else -@@ -2547,9 +2537,9 @@ static int stmmac_rx(struct stmmac_priv +@@ -2553,9 +2543,9 @@ static int stmmac_rx(struct stmmac_priv unsigned int des; if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00)) @@ -2814,7 +2814,7 @@ frame_len = priv->hw->desc->get_rx_frame_len(p, coe); -@@ -2558,9 +2548,9 @@ static int stmmac_rx(struct stmmac_priv +@@ -2564,9 +2554,9 @@ static int stmmac_rx(struct stmmac_priv * ignored */ if (frame_len > priv->dma_buf_sz) { @@ -2827,7 +2827,7 @@ priv->dev->stats.rx_length_errors++; break; } -@@ -2572,11 +2562,11 @@ static int stmmac_rx(struct stmmac_priv +@@ -2578,11 +2568,11 @@ static int stmmac_rx(struct stmmac_priv frame_len -= ETH_FCS_LEN; if (netif_msg_rx_status(priv)) { @@ -2843,7 +2843,7 @@ } /* The zero-copy is always used for all the sizes -@@ -2613,8 +2603,9 @@ static int stmmac_rx(struct stmmac_priv +@@ -2619,8 +2609,9 @@ static int stmmac_rx(struct stmmac_priv } else { skb = priv->rx_skbuff[entry]; if (unlikely(!skb)) { @@ -2855,7 +2855,7 @@ priv->dev->stats.rx_dropped++; break; } -@@ -2630,7 +2621,8 @@ static int stmmac_rx(struct stmmac_priv +@@ -2636,7 +2627,8 @@ static int stmmac_rx(struct stmmac_priv } if (netif_msg_pktdata(priv)) { @@ -2865,7 +2865,7 @@ print_pkt(skb->data, frame_len); } -@@ -2733,7 +2725,7 @@ static int stmmac_change_mtu(struct net_ +@@ -2739,7 +2731,7 @@ static int stmmac_change_mtu(struct net_ int max_mtu; if (netif_running(dev)) { @@ -2874,7 +2874,7 @@ return -EBUSY; } -@@ -2825,7 +2817,7 @@ static irqreturn_t stmmac_interrupt(int +@@ -2831,7 +2823,7 @@ static irqreturn_t stmmac_interrupt(int pm_wakeup_event(priv->device, 0); if (unlikely(!dev)) { @@ -2883,7 +2883,7 @@ return IRQ_NONE; } -@@ -2883,7 +2875,6 @@ static void stmmac_poll_controller(struc +@@ -2889,7 +2881,6 @@ static void stmmac_poll_controller(struc */ static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) { @@ -2891,7 +2891,7 @@ int ret = -EOPNOTSUPP; if (!netif_running(dev)) -@@ -2893,9 +2884,9 @@ static int stmmac_ioctl(struct net_devic +@@ -2899,9 +2890,9 @@ static int stmmac_ioctl(struct net_devic case SIOCGMIIPHY: case SIOCGMIIREG: case SIOCSMIIREG: @@ -2903,7 +2903,7 @@ break; case SIOCSHWTSTAMP: ret = stmmac_hwtstamp_ioctl(dev, rq); -@@ -2923,14 +2914,17 @@ static void sysfs_display_ring(void *hea +@@ -2929,14 +2920,17 @@ static void sysfs_display_ring(void *hea x = *(u64 *) ep; seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n", i, (unsigned int)virt_to_phys(ep), @@ -2924,7 +2924,7 @@ p++; } seq_printf(seq, "\n"); -@@ -2962,6 +2956,8 @@ static int stmmac_sysfs_ring_open(struct +@@ -2968,6 +2962,8 @@ static int stmmac_sysfs_ring_open(struct return single_open(file, stmmac_sysfs_ring_read, inode->i_private); } @@ -2933,7 +2933,7 @@ static const struct file_operations stmmac_rings_status_fops = { .owner = THIS_MODULE, .open = stmmac_sysfs_ring_open, -@@ -2984,11 +2980,11 @@ static int stmmac_sysfs_dma_cap_read(str +@@ -2990,11 +2986,11 @@ static int stmmac_sysfs_dma_cap_read(str seq_printf(seq, "\tDMA HW features\n"); seq_printf(seq, "==============================\n"); @@ -2948,7 +2948,7 @@ (priv->dma_cap.half_duplex) ? "Y" : "N"); seq_printf(seq, "\tHash Filter: %s\n", (priv->dma_cap.hash_filter) ? "Y" : "N"); -@@ -3006,9 +3002,9 @@ static int stmmac_sysfs_dma_cap_read(str +@@ -3012,9 +3008,9 @@ static int stmmac_sysfs_dma_cap_read(str (priv->dma_cap.rmon) ? "Y" : "N"); seq_printf(seq, "\tIEEE 1588-2002 Time Stamp: %s\n", (priv->dma_cap.time_stamp) ? "Y" : "N"); @@ -2960,7 +2960,7 @@ (priv->dma_cap.eee) ? "Y" : "N"); seq_printf(seq, "\tAV features: %s\n", (priv->dma_cap.av) ? "Y" : "N"); seq_printf(seq, "\tChecksum Offload in TX: %s\n", -@@ -3055,8 +3051,7 @@ static int stmmac_init_fs(struct net_dev +@@ -3061,8 +3057,7 @@ static int stmmac_init_fs(struct net_dev priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir); if (!priv->dbgfs_dir || IS_ERR(priv->dbgfs_dir)) { @@ -2970,7 +2970,7 @@ return -ENOMEM; } -@@ -3068,7 +3063,7 @@ static int stmmac_init_fs(struct net_dev +@@ -3074,7 +3069,7 @@ static int stmmac_init_fs(struct net_dev &stmmac_rings_status_fops); if (!priv->dbgfs_rings_status || IS_ERR(priv->dbgfs_rings_status)) { @@ -2979,7 +2979,7 @@ debugfs_remove_recursive(priv->dbgfs_dir); return -ENOMEM; -@@ -3080,7 +3075,7 @@ static int stmmac_init_fs(struct net_dev +@@ -3086,7 +3081,7 @@ static int stmmac_init_fs(struct net_dev dev, &stmmac_dma_cap_fops); if (!priv->dbgfs_dma_cap || IS_ERR(priv->dbgfs_dma_cap)) { @@ -2988,7 +2988,7 @@ debugfs_remove_recursive(priv->dbgfs_dir); return -ENOMEM; -@@ -3152,11 +3147,11 @@ static int stmmac_hw_init(struct stmmac_ +@@ -3158,11 +3153,11 @@ static int stmmac_hw_init(struct stmmac_ } else { if (chain_mode) { priv->hw->mode = &chain_mode_ops; @@ -3002,7 +3002,7 @@ priv->mode = STMMAC_RING_MODE; } } -@@ -3164,7 +3159,7 @@ static int stmmac_hw_init(struct stmmac_ +@@ -3170,7 +3165,7 @@ static int stmmac_hw_init(struct stmmac_ /* Get the HW capability (new GMAC newer than 3.50a) */ priv->hw_cap_support = stmmac_get_hw_features(priv); if (priv->hw_cap_support) { @@ -3011,7 +3011,7 @@ /* We can override some gmac/dma configuration fields: e.g. * enh_desc, tx_coe (e.g. that are passed through the -@@ -3189,8 +3184,9 @@ static int stmmac_hw_init(struct stmmac_ +@@ -3195,8 +3190,9 @@ static int stmmac_hw_init(struct stmmac_ else if (priv->dma_cap.rx_coe_type1) priv->plat->rx_coe = STMMAC_RX_COE_TYPE1; @@ -3023,7 +3023,7 @@ /* To use alternate (extended), normal or GMAC4 descriptor structures */ if (priv->synopsys_id >= DWMAC_CORE_4_00) -@@ -3200,20 +3196,20 @@ static int stmmac_hw_init(struct stmmac_ +@@ -3206,20 +3202,20 @@ static int stmmac_hw_init(struct stmmac_ if (priv->plat->rx_coe) { priv->hw->rx_csum = priv->plat->rx_coe; @@ -3049,7 +3049,7 @@ return 0; } -@@ -3272,8 +3268,8 @@ int stmmac_dvr_probe(struct device *devi +@@ -3278,8 +3274,8 @@ int stmmac_dvr_probe(struct device *devi priv->stmmac_clk = devm_clk_get(priv->device, STMMAC_RESOURCE_NAME); if (IS_ERR(priv->stmmac_clk)) { @@ -3060,7 +3060,7 @@ /* If failed to obtain stmmac_clk and specific clk_csr value * is NOT passed from the platform, probe fail. */ -@@ -3322,7 +3318,7 @@ int stmmac_dvr_probe(struct device *devi +@@ -3328,7 +3324,7 @@ int stmmac_dvr_probe(struct device *devi if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) { ndev->hw_features |= NETIF_F_TSO; priv->tso = true; @@ -3069,7 +3069,7 @@ } ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA; ndev->watchdog_timeo = msecs_to_jiffies(watchdog); -@@ -3342,13 +3338,13 @@ int stmmac_dvr_probe(struct device *devi +@@ -3348,13 +3344,13 @@ int stmmac_dvr_probe(struct device *devi */ if ((priv->synopsys_id >= DWMAC_CORE_3_50) && (!priv->plat->riwt_off)) { priv->use_riwt = 1; @@ -3085,7 +3085,7 @@ /* If a specific clk_csr value is passed from the platform * this means that the CSR Clock Range selection cannot be -@@ -3369,15 +3365,17 @@ int stmmac_dvr_probe(struct device *devi +@@ -3375,15 +3371,17 @@ int stmmac_dvr_probe(struct device *devi /* MDIO bus Registration */ ret = stmmac_mdio_register(ndev); if (ret < 0) { @@ -3107,7 +3107,7 @@ goto error_netdev_register; } -@@ -3388,7 +3386,7 @@ error_netdev_register: +@@ -3394,7 +3392,7 @@ error_netdev_register: priv->hw->pcs != STMMAC_PCS_TBI && priv->hw->pcs != STMMAC_PCS_RTBI) stmmac_mdio_unregister(ndev); @@ -3116,7 +3116,7 @@ netif_napi_del(&priv->napi); error_hw_init: clk_disable_unprepare(priv->pclk); -@@ -3412,7 +3410,7 @@ int stmmac_dvr_remove(struct device *dev +@@ -3418,7 +3416,7 @@ int stmmac_dvr_remove(struct device *dev struct net_device *ndev = dev_get_drvdata(dev); struct stmmac_priv *priv = netdev_priv(ndev); @@ -3125,7 +3125,7 @@ priv->hw->dma->stop_rx(priv->ioaddr); priv->hw->dma->stop_tx(priv->ioaddr); -@@ -3450,8 +3448,8 @@ int stmmac_suspend(struct device *dev) +@@ -3456,8 +3454,8 @@ int stmmac_suspend(struct device *dev) if (!ndev || !netif_running(ndev)) return 0; @@ -3136,7 +3136,7 @@ spin_lock_irqsave(&priv->lock, flags); -@@ -3545,8 +3543,8 @@ int stmmac_resume(struct device *dev) +@@ -3551,8 +3549,8 @@ int stmmac_resume(struct device *dev) spin_unlock_irqrestore(&priv->lock, flags); diff --git a/target/linux/sunxi/patches-4.9/0051-stmmac-form-4-11.patch b/target/linux/sunxi/patches-4.9/0051-stmmac-form-4-11.patch index fc626cbb9b..31d045670b 100644 --- a/target/linux/sunxi/patches-4.9/0051-stmmac-form-4-11.patch +++ b/target/linux/sunxi/patches-4.9/0051-stmmac-form-4-11.patch @@ -1460,7 +1460,7 @@ } /** -@@ -415,7 +412,7 @@ static void stmmac_get_rx_hwtstamp(struc +@@ -421,7 +418,7 @@ static void stmmac_get_rx_hwtstamp(struc /** * stmmac_hwtstamp_ioctl - control hardware timestamping. * @dev: device pointer. @@ -1469,7 +1469,7 @@ * a proprietary structure used to pass information to the driver. * Description: * This function configures the MAC to enable/disable both outgoing(TX) -@@ -606,7 +603,7 @@ static int stmmac_hwtstamp_ioctl(struct +@@ -612,7 +609,7 @@ static int stmmac_hwtstamp_ioctl(struct /* program Sub Second Increment reg */ sec_inc = priv->hw->ptp->config_sub_second_increment( @@ -1478,7 +1478,7 @@ priv->plat->has_gmac4); temp = div_u64(1000000000ULL, sec_inc); -@@ -616,7 +613,7 @@ static int stmmac_hwtstamp_ioctl(struct +@@ -622,7 +619,7 @@ static int stmmac_hwtstamp_ioctl(struct * where, freq_div_ratio = 1e9ns/sec_inc */ temp = (u64)(temp << 32); @@ -1487,7 +1487,7 @@ priv->hw->ptp->config_addend(priv->ptpaddr, priv->default_addend); -@@ -644,18 +641,6 @@ static int stmmac_init_ptp(struct stmmac +@@ -650,18 +647,6 @@ static int stmmac_init_ptp(struct stmmac if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp)) return -EOPNOTSUPP; @@ -1506,7 +1506,7 @@ priv->adv_ts = 0; /* Check if adv_ts can be enabled for dwmac 4.x core */ if (priv->plat->has_gmac4 && priv->dma_cap.atime_stamp) -@@ -682,8 +667,8 @@ static int stmmac_init_ptp(struct stmmac +@@ -688,8 +673,8 @@ static int stmmac_init_ptp(struct stmmac static void stmmac_release_ptp(struct stmmac_priv *priv) { @@ -1517,7 +1517,7 @@ stmmac_ptp_unregister(priv); } -@@ -704,7 +689,7 @@ static void stmmac_adjust_link(struct ne +@@ -710,7 +695,7 @@ static void stmmac_adjust_link(struct ne int new_state = 0; unsigned int fc = priv->flow_ctrl, pause_time = priv->pause; @@ -1526,7 +1526,7 @@ return; spin_lock_irqsave(&priv->lock, flags); -@@ -731,33 +716,36 @@ static void stmmac_adjust_link(struct ne +@@ -737,33 +722,36 @@ static void stmmac_adjust_link(struct ne new_state = 1; switch (phydev->speed) { case 1000: @@ -1577,7 +1577,7 @@ priv->speed = phydev->speed; } -@@ -770,8 +758,8 @@ static void stmmac_adjust_link(struct ne +@@ -776,8 +764,8 @@ static void stmmac_adjust_link(struct ne } else if (priv->oldlink) { new_state = 1; priv->oldlink = 0; @@ -1588,7 +1588,7 @@ } if (new_state && netif_msg_link(priv)) -@@ -833,8 +821,8 @@ static int stmmac_init_phy(struct net_de +@@ -839,8 +827,8 @@ static int stmmac_init_phy(struct net_de int interface = priv->plat->interface; int max_speed = priv->plat->max_speed; priv->oldlink = 0; @@ -1599,7 +1599,7 @@ if (priv->plat->phy_node) { phydev = of_phy_connect(dev, priv->plat->phy_node, -@@ -886,9 +874,7 @@ static int stmmac_init_phy(struct net_de +@@ -892,9 +880,7 @@ static int stmmac_init_phy(struct net_de if (phydev->is_pseudo_fixed_link) phydev->irq = PHY_POLL; @@ -1610,7 +1610,7 @@ return 0; } -@@ -1014,7 +1000,7 @@ static void stmmac_free_rx_buffers(struc +@@ -1020,7 +1006,7 @@ static void stmmac_free_rx_buffers(struc * @dev: net device structure * @flags: gfp flag. * Description: this function initializes the DMA RX/TX descriptors @@ -1619,7 +1619,7 @@ * modes. */ static int init_dma_desc_rings(struct net_device *dev, gfp_t flags) -@@ -1127,13 +1113,6 @@ static void dma_free_tx_skbufs(struct st +@@ -1133,13 +1119,6 @@ static void dma_free_tx_skbufs(struct st int i; for (i = 0; i < DMA_TX_SIZE; i++) { @@ -1633,7 +1633,7 @@ if (priv->tx_skbuff_dma[i].buf) { if (priv->tx_skbuff_dma[i].map_as_page) dma_unmap_page(priv->device, -@@ -1147,7 +1126,7 @@ static void dma_free_tx_skbufs(struct st +@@ -1153,7 +1132,7 @@ static void dma_free_tx_skbufs(struct st DMA_TO_DEVICE); } @@ -1642,7 +1642,7 @@ dev_kfree_skb_any(priv->tx_skbuff[i]); priv->tx_skbuff[i] = NULL; priv->tx_skbuff_dma[i].buf = 0; -@@ -1271,6 +1250,28 @@ static void free_dma_desc_resources(stru +@@ -1277,6 +1256,28 @@ static void free_dma_desc_resources(stru } /** @@ -1671,7 +1671,7 @@ * stmmac_dma_operation_mode - HW DMA operation mode * @priv: driver private structure * Description: it is used for configuring the DMA operation mode register in -@@ -1671,10 +1672,6 @@ static int stmmac_hw_setup(struct net_de +@@ -1677,10 +1678,6 @@ static int stmmac_hw_setup(struct net_de /* Copy the MAC addr into the HW */ priv->hw->mac->set_umac_addr(priv->hw, dev->dev_addr, 0); @@ -1682,7 +1682,7 @@ /* PS and related bits will be programmed according to the speed */ if (priv->hw->pcs) { int speed = priv->plat->mac_port_sel_speed; -@@ -1691,6 +1688,10 @@ static int stmmac_hw_setup(struct net_de +@@ -1697,6 +1694,10 @@ static int stmmac_hw_setup(struct net_de /* Initialize the MAC Core */ priv->hw->mac->core_init(priv->hw, dev->mtu); @@ -1693,7 +1693,7 @@ ret = priv->hw->mac->rx_ipc(priv->hw); if (!ret) { netdev_warn(priv->dev, "RX IPC Checksum Offload disabled\n"); -@@ -1711,8 +1712,10 @@ static int stmmac_hw_setup(struct net_de +@@ -1717,8 +1718,10 @@ static int stmmac_hw_setup(struct net_de if (init_ptp) { ret = stmmac_init_ptp(priv); @@ -1706,7 +1706,7 @@ } #ifdef CONFIG_DEBUG_FS -@@ -1726,11 +1729,6 @@ static int stmmac_hw_setup(struct net_de +@@ -1732,11 +1735,6 @@ static int stmmac_hw_setup(struct net_de priv->hw->dma->start_tx(priv->ioaddr); priv->hw->dma->start_rx(priv->ioaddr); @@ -1718,7 +1718,7 @@ priv->tx_lpi_timer = STMMAC_DEFAULT_TWT_LS; if ((priv->use_riwt) && (priv->hw->dma->rx_watchdog)) { -@@ -2520,7 +2518,7 @@ static int stmmac_rx(struct stmmac_priv +@@ -2526,7 +2524,7 @@ static int stmmac_rx(struct stmmac_priv if (unlikely(status == discard_frame)) { priv->dev->stats.rx_errors++; if (priv->hwts_rx_en && !priv->extend_desc) { @@ -1727,7 +1727,7 @@ * with timestamp value, hence reinitialize * them in stmmac_rx_refill() function so that * device can reuse it. -@@ -2543,7 +2541,7 @@ static int stmmac_rx(struct stmmac_priv +@@ -2549,7 +2547,7 @@ static int stmmac_rx(struct stmmac_priv frame_len = priv->hw->desc->get_rx_frame_len(p, coe); @@ -1736,7 +1736,7 @@ * (preallocated during init) then the packet is * ignored */ -@@ -2763,7 +2761,7 @@ static netdev_features_t stmmac_fix_feat +@@ -2769,7 +2767,7 @@ static netdev_features_t stmmac_fix_feat /* Some GMAC devices have a bugged Jumbo frame support that * needs to have the Tx COE disabled for oversized frames * (due to limited buffer sizes). In this case we disable @@ -1745,7 +1745,7 @@ */ if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN)) features &= ~NETIF_F_CSUM_MASK; -@@ -2909,9 +2907,7 @@ static void sysfs_display_ring(void *hea +@@ -2915,9 +2913,7 @@ static void sysfs_display_ring(void *hea struct dma_desc *p = (struct dma_desc *)head; for (i = 0; i < size; i++) { @@ -1755,7 +1755,7 @@ seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n", i, (unsigned int)virt_to_phys(ep), le32_to_cpu(ep->basic.des0), -@@ -2920,7 +2916,6 @@ static void sysfs_display_ring(void *hea +@@ -2926,7 +2922,6 @@ static void sysfs_display_ring(void *hea le32_to_cpu(ep->basic.des3)); ep++; } else { @@ -1763,7 +1763,7 @@ seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n", i, (unsigned int)virt_to_phys(ep), le32_to_cpu(p->des0), le32_to_cpu(p->des1), -@@ -2990,7 +2985,7 @@ static int stmmac_sysfs_dma_cap_read(str +@@ -2996,7 +2991,7 @@ static int stmmac_sysfs_dma_cap_read(str (priv->dma_cap.hash_filter) ? "Y" : "N"); seq_printf(seq, "\tMultiple MAC address registers: %s\n", (priv->dma_cap.multi_addr) ? "Y" : "N"); @@ -1772,7 +1772,7 @@ (priv->dma_cap.pcs) ? "Y" : "N"); seq_printf(seq, "\tSMA (MDIO) Interface: %s\n", (priv->dma_cap.sma_mdio) ? "Y" : "N"); -@@ -3266,44 +3261,8 @@ int stmmac_dvr_probe(struct device *devi +@@ -3272,44 +3267,8 @@ int stmmac_dvr_probe(struct device *devi if ((phyaddr >= 0) && (phyaddr <= 31)) priv->plat->phy_addr = phyaddr; @@ -1819,7 +1819,7 @@ /* Init MAC and get the capabilities */ ret = stmmac_hw_init(priv); -@@ -3389,10 +3348,6 @@ error_netdev_register: +@@ -3395,10 +3354,6 @@ error_netdev_register: error_mdio_register: netif_napi_del(&priv->napi); error_hw_init: @@ -1830,7 +1830,7 @@ free_netdev(ndev); return ret; -@@ -3418,10 +3373,10 @@ int stmmac_dvr_remove(struct device *dev +@@ -3424,10 +3379,10 @@ int stmmac_dvr_remove(struct device *dev stmmac_set_mac(priv->ioaddr, false); netif_carrier_off(ndev); unregister_netdev(ndev); @@ -1845,7 +1845,7 @@ if (priv->hw->pcs != STMMAC_PCS_RGMII && priv->hw->pcs != STMMAC_PCS_TBI && priv->hw->pcs != STMMAC_PCS_RTBI) -@@ -3470,14 +3425,14 @@ int stmmac_suspend(struct device *dev) +@@ -3476,14 +3431,14 @@ int stmmac_suspend(struct device *dev) stmmac_set_mac(priv->ioaddr, false); pinctrl_pm_select_sleep_state(priv->device); /* Disable clock in case of PWM is off */ @@ -1864,7 +1864,7 @@ return 0; } EXPORT_SYMBOL_GPL(stmmac_suspend); -@@ -3510,9 +3465,9 @@ int stmmac_resume(struct device *dev) +@@ -3516,9 +3471,9 @@ int stmmac_resume(struct device *dev) priv->irq_wake = 0; } else { pinctrl_pm_select_default_state(priv->device); diff --git a/target/linux/sunxi/patches-4.9/0052-stmmac-form-4-12.patch b/target/linux/sunxi/patches-4.9/0052-stmmac-form-4-12.patch index 18f58e947c..1356a4b9e9 100644 --- a/target/linux/sunxi/patches-4.9/0052-stmmac-form-4-12.patch +++ b/target/linux/sunxi/patches-4.9/0052-stmmac-form-4-12.patch @@ -2742,7 +2742,7 @@ priv->hw->mac->set_eee_mode(priv->hw, priv->plat->en_tx_lpi_clockgating); } -@@ -359,14 +434,14 @@ static void stmmac_get_tx_hwtstamp(struc +@@ -365,14 +440,14 @@ static void stmmac_get_tx_hwtstamp(struc return; /* check tx tstamp status */ @@ -2759,7 +2759,7 @@ /* pass tstamp to stack */ skb_tstamp_tx(skb, &shhwtstamp); } -@@ -393,19 +468,19 @@ static void stmmac_get_rx_hwtstamp(struc +@@ -399,19 +474,19 @@ static void stmmac_get_rx_hwtstamp(struc return; /* Check if timestamp is available */ @@ -2782,7 +2782,7 @@ } } -@@ -471,7 +546,10 @@ static int stmmac_hwtstamp_ioctl(struct +@@ -477,7 +552,10 @@ static int stmmac_hwtstamp_ioctl(struct /* PTP v1, UDP, any kind of event packet */ config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT; /* take time stamp for all event messages */ @@ -2794,7 +2794,7 @@ ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; -@@ -503,7 +581,10 @@ static int stmmac_hwtstamp_ioctl(struct +@@ -509,7 +587,10 @@ static int stmmac_hwtstamp_ioctl(struct config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT; ptp_v2 = PTP_TCR_TSVER2ENA; /* take time stamp for all event messages */ @@ -2806,7 +2806,7 @@ ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; -@@ -537,7 +618,10 @@ static int stmmac_hwtstamp_ioctl(struct +@@ -543,7 +624,10 @@ static int stmmac_hwtstamp_ioctl(struct config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; ptp_v2 = PTP_TCR_TSVER2ENA; /* take time stamp for all event messages */ @@ -2818,7 +2818,7 @@ ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; -@@ -673,6 +757,19 @@ static void stmmac_release_ptp(struct st +@@ -679,6 +763,19 @@ static void stmmac_release_ptp(struct st } /** @@ -2838,7 +2838,7 @@ * stmmac_adjust_link - adjusts the link parameters * @dev: net device structure * Description: this is the helper called by the physical abstraction layer -@@ -687,7 +784,6 @@ static void stmmac_adjust_link(struct ne +@@ -693,7 +790,6 @@ static void stmmac_adjust_link(struct ne struct phy_device *phydev = dev->phydev; unsigned long flags; int new_state = 0; @@ -2846,7 +2846,7 @@ if (!phydev) return; -@@ -709,8 +805,7 @@ static void stmmac_adjust_link(struct ne +@@ -715,8 +811,7 @@ static void stmmac_adjust_link(struct ne } /* Flow Control operation */ if (phydev->pause) @@ -2856,7 +2856,7 @@ if (phydev->speed != priv->speed) { new_state = 1; -@@ -878,22 +973,56 @@ static int stmmac_init_phy(struct net_de +@@ -884,22 +979,56 @@ static int stmmac_init_phy(struct net_de return 0; } @@ -2925,7 +2925,7 @@ } static int stmmac_set_bfsize(int mtu, int bufsize) -@@ -913,48 +1042,88 @@ static int stmmac_set_bfsize(int mtu, in +@@ -919,48 +1048,88 @@ static int stmmac_set_bfsize(int mtu, in } /** @@ -3024,7 +3024,7 @@ struct sk_buff *skb; skb = __netdev_alloc_skb_ip_align(priv->dev, priv->dma_buf_sz, flags); -@@ -963,20 +1132,20 @@ static int stmmac_init_rx_buffers(struct +@@ -969,20 +1138,20 @@ static int stmmac_init_rx_buffers(struct "%s: Rx init fails; skb is NULL\n", __func__); return -ENOMEM; } @@ -3050,7 +3050,7 @@ if ((priv->hw->mode->init_desc3) && (priv->dma_buf_sz == BUF_SIZE_16KiB)) -@@ -985,30 +1154,71 @@ static int stmmac_init_rx_buffers(struct +@@ -991,30 +1160,71 @@ static int stmmac_init_rx_buffers(struct return 0; } @@ -3131,7 +3131,7 @@ if (priv->hw->mode->set_16kib_bfsize) bfsize = priv->hw->mode->set_16kib_bfsize(dev->mtu); -@@ -1018,235 +1228,409 @@ static int init_dma_desc_rings(struct ne +@@ -1024,235 +1234,409 @@ static int init_dma_desc_rings(struct ne priv->dma_buf_sz = bfsize; @@ -3706,7 +3706,7 @@ } /** -@@ -1256,19 +1640,104 @@ static void free_dma_desc_resources(stru +@@ -1262,19 +1646,104 @@ static void free_dma_desc_resources(stru */ static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv) { @@ -3822,7 +3822,7 @@ } /** -@@ -1279,11 +1748,20 @@ static void stmmac_mac_enable_rx_queues( +@@ -1285,11 +1754,20 @@ static void stmmac_mac_enable_rx_queues( */ static void stmmac_dma_operation_mode(struct stmmac_priv *priv) { @@ -3847,7 +3847,7 @@ /* * In case of GMAC, SF mode can be enabled * to perform the TX COE in HW. This depends on: -@@ -1291,37 +1769,53 @@ static void stmmac_dma_operation_mode(st +@@ -1297,37 +1775,53 @@ static void stmmac_dma_operation_mode(st * 2) There is no bugged Jumbo frame support * that needs to not insert csum in the TDES. */ @@ -3911,7 +3911,7 @@ status = priv->hw->desc->tx_status(&priv->dev->stats, &priv->xstats, p, -@@ -1342,48 +1836,51 @@ static void stmmac_tx_clean(struct stmma +@@ -1348,48 +1842,51 @@ static void stmmac_tx_clean(struct stmma stmmac_get_tx_hwtstamp(priv, p, skb); } @@ -3981,7 +3981,7 @@ } if ((priv->eee_enabled) && (!priv->tx_path_in_lpi_mode)) { -@@ -1393,45 +1890,76 @@ static void stmmac_tx_clean(struct stmma +@@ -1399,45 +1896,76 @@ static void stmmac_tx_clean(struct stmma netif_tx_unlock(priv->dev); } @@ -4073,7 +4073,7 @@ } /** -@@ -1443,31 +1971,43 @@ static void stmmac_tx_err(struct stmmac_ +@@ -1449,31 +1977,43 @@ static void stmmac_tx_err(struct stmmac_ */ static void stmmac_dma_interrupt(struct stmmac_priv *priv) { @@ -4138,7 +4138,7 @@ } /** -@@ -1574,6 +2114,13 @@ static void stmmac_check_ether_addr(stru +@@ -1580,6 +2120,13 @@ static void stmmac_check_ether_addr(stru */ static int stmmac_init_dma_engine(struct stmmac_priv *priv) { @@ -4152,7 +4152,7 @@ int atds = 0; int ret = 0; -@@ -1591,19 +2138,49 @@ static int stmmac_init_dma_engine(struct +@@ -1597,19 +2144,49 @@ static int stmmac_init_dma_engine(struct return ret; } @@ -4214,7 +4214,7 @@ } if (priv->plat->axi && priv->hw->dma->axi) -@@ -1621,8 +2198,12 @@ static int stmmac_init_dma_engine(struct +@@ -1627,8 +2204,12 @@ static int stmmac_init_dma_engine(struct static void stmmac_tx_timer(unsigned long data) { struct stmmac_priv *priv = (struct stmmac_priv *)data; @@ -4228,7 +4228,7 @@ } /** -@@ -1644,6 +2225,196 @@ static void stmmac_init_tx_coalesce(stru +@@ -1650,6 +2231,196 @@ static void stmmac_init_tx_coalesce(stru add_timer(&priv->txtimer); } @@ -4425,7 +4425,7 @@ /** * stmmac_hw_setup - setup mac in a usable state. * @dev : pointer to the device structure. -@@ -1659,6 +2430,9 @@ static void stmmac_init_tx_coalesce(stru +@@ -1665,6 +2436,9 @@ static void stmmac_init_tx_coalesce(stru static int stmmac_hw_setup(struct net_device *dev, bool init_ptp) { struct stmmac_priv *priv = netdev_priv(dev); @@ -4435,7 +4435,7 @@ int ret; /* DMA initialization and SW reset */ -@@ -1688,9 +2462,9 @@ static int stmmac_hw_setup(struct net_de +@@ -1694,9 +2468,9 @@ static int stmmac_hw_setup(struct net_de /* Initialize the MAC Core */ priv->hw->mac->core_init(priv->hw, dev->mtu); @@ -4448,7 +4448,7 @@ ret = priv->hw->mac->rx_ipc(priv->hw); if (!ret) { -@@ -1700,10 +2474,7 @@ static int stmmac_hw_setup(struct net_de +@@ -1706,10 +2480,7 @@ static int stmmac_hw_setup(struct net_de } /* Enable the MAC Rx/Tx */ @@ -4460,7 +4460,7 @@ /* Set the HW DMA mode and the COE */ stmmac_dma_operation_mode(priv); -@@ -1711,6 +2482,10 @@ static int stmmac_hw_setup(struct net_de +@@ -1717,6 +2488,10 @@ static int stmmac_hw_setup(struct net_de stmmac_mmc_setup(priv); if (init_ptp) { @@ -4471,7 +4471,7 @@ ret = stmmac_init_ptp(priv); if (ret == -EOPNOTSUPP) netdev_warn(priv->dev, "PTP not supported by HW\n"); -@@ -1725,35 +2500,37 @@ static int stmmac_hw_setup(struct net_de +@@ -1731,35 +2506,37 @@ static int stmmac_hw_setup(struct net_de __func__); #endif /* Start the ball rolling... */ @@ -4523,7 +4523,7 @@ /** * stmmac_open - open entry point of the driver * @dev : pointer to the device structure. -@@ -1822,7 +2599,7 @@ static int stmmac_open(struct net_device +@@ -1828,7 +2605,7 @@ static int stmmac_open(struct net_device netdev_err(priv->dev, "%s: ERROR: allocating the IRQ %d (error: %d)\n", __func__, dev->irq, ret); @@ -4532,7 +4532,7 @@ } /* Request the Wake IRQ in case of another line is used for WoL */ -@@ -1849,8 +2626,8 @@ static int stmmac_open(struct net_device +@@ -1855,8 +2632,8 @@ static int stmmac_open(struct net_device } } @@ -4543,7 +4543,7 @@ return 0; -@@ -1859,7 +2636,12 @@ lpiirq_error: +@@ -1865,7 +2642,12 @@ lpiirq_error: free_irq(priv->wol_irq, dev); wolirq_error: free_irq(dev->irq, dev); @@ -4556,7 +4556,7 @@ init_error: free_dma_desc_resources(priv); dma_desc_error: -@@ -1888,9 +2670,9 @@ static int stmmac_release(struct net_dev +@@ -1894,9 +2676,9 @@ static int stmmac_release(struct net_dev phy_disconnect(dev->phydev); } @@ -4568,7 +4568,7 @@ del_timer_sync(&priv->txtimer); -@@ -1902,14 +2684,13 @@ static int stmmac_release(struct net_dev +@@ -1908,14 +2690,13 @@ static int stmmac_release(struct net_dev free_irq(priv->lpi_irq, dev); /* Stop TX/RX DMA and clear the descriptors */ @@ -4585,7 +4585,7 @@ netif_carrier_off(dev); -@@ -1928,22 +2709,24 @@ static int stmmac_release(struct net_dev +@@ -1934,22 +2715,24 @@ static int stmmac_release(struct net_dev * @des: buffer start address * @total_len: total length to fill in descriptors * @last_segmant: condition for the last descriptor @@ -4614,7 +4614,7 @@ desc->des0 = cpu_to_le32(des + (total_len - tmp_len)); buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ? -@@ -1951,7 +2734,7 @@ static void stmmac_tso_allocator(struct +@@ -1957,7 +2740,7 @@ static void stmmac_tso_allocator(struct priv->hw->desc->prepare_tso_tx_desc(desc, 0, buff_size, 0, 1, @@ -4623,7 +4623,7 @@ 0, 0); tmp_len -= TSO_MAX_BUFF_SIZE; -@@ -1987,23 +2770,28 @@ static void stmmac_tso_allocator(struct +@@ -1993,23 +2776,28 @@ static void stmmac_tso_allocator(struct */ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev) { @@ -4658,7 +4658,7 @@ /* This is a hard error, log it. */ netdev_err(priv->dev, "%s: Tx Ring full when queue awake\n", -@@ -2018,10 +2806,10 @@ static netdev_tx_t stmmac_tso_xmit(struc +@@ -2024,10 +2812,10 @@ static netdev_tx_t stmmac_tso_xmit(struc /* set new MSS value if needed */ if (mss != priv->mss) { @@ -4671,7 +4671,7 @@ } if (netif_msg_tx_queued(priv)) { -@@ -2031,9 +2819,9 @@ static netdev_tx_t stmmac_tso_xmit(struc +@@ -2037,9 +2825,9 @@ static netdev_tx_t stmmac_tso_xmit(struc skb->data_len); } @@ -4683,7 +4683,7 @@ first = desc; /* first descriptor: fill Headers on Buf1 */ -@@ -2042,9 +2830,8 @@ static netdev_tx_t stmmac_tso_xmit(struc +@@ -2048,9 +2836,8 @@ static netdev_tx_t stmmac_tso_xmit(struc if (dma_mapping_error(priv->device, des)) goto dma_map_err; @@ -4695,7 +4695,7 @@ first->des0 = cpu_to_le32(des); -@@ -2055,7 +2842,7 @@ static netdev_tx_t stmmac_tso_xmit(struc +@@ -2061,7 +2848,7 @@ static netdev_tx_t stmmac_tso_xmit(struc /* If needed take extra descriptors to fill the remaining payload */ tmp_pay_len = pay_len - TSO_MAX_BUFF_SIZE; @@ -4704,7 +4704,7 @@ /* Prepare fragments */ for (i = 0; i < nfrags; i++) { -@@ -2064,24 +2851,34 @@ static netdev_tx_t stmmac_tso_xmit(struc +@@ -2070,24 +2857,34 @@ static netdev_tx_t stmmac_tso_xmit(struc des = skb_frag_dma_map(priv->device, frag, 0, skb_frag_size(frag), DMA_TO_DEVICE); @@ -4748,7 +4748,7 @@ } dev->stats.tx_bytes += skb->len; -@@ -2113,7 +2910,7 @@ static netdev_tx_t stmmac_tso_xmit(struc +@@ -2119,7 +2916,7 @@ static netdev_tx_t stmmac_tso_xmit(struc priv->hw->desc->prepare_tso_tx_desc(first, 1, proto_hdr_len, pay_len, @@ -4757,7 +4757,7 @@ tcp_hdrlen(skb) / 4, (skb->len - proto_hdr_len)); /* If context desc is used to change MSS */ -@@ -2128,20 +2925,20 @@ static netdev_tx_t stmmac_tso_xmit(struc +@@ -2134,20 +2931,20 @@ static netdev_tx_t stmmac_tso_xmit(struc if (netif_msg_pktdata(priv)) { pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n", @@ -4784,7 +4784,7 @@ return NETDEV_TX_OK; -@@ -2165,21 +2962,27 @@ static netdev_tx_t stmmac_xmit(struct sk +@@ -2171,21 +2968,27 @@ static netdev_tx_t stmmac_xmit(struct sk struct stmmac_priv *priv = netdev_priv(dev); unsigned int nopaged_len = skb_headlen(skb); int i, csum_insertion = 0, is_jumbo = 0; @@ -4816,7 +4816,7 @@ /* This is a hard error, log it. */ netdev_err(priv->dev, "%s: Tx Ring full when queue awake\n", -@@ -2191,20 +2994,18 @@ static netdev_tx_t stmmac_xmit(struct sk +@@ -2197,20 +3000,18 @@ static netdev_tx_t stmmac_xmit(struct sk if (priv->tx_path_in_lpi_mode) stmmac_disable_eee_mode(priv); @@ -4840,7 +4840,7 @@ enh_desc = priv->plat->enh_desc; /* To program the descriptors according to the size of the frame */ if (enh_desc) -@@ -2212,7 +3013,7 @@ static netdev_tx_t stmmac_xmit(struct sk +@@ -2218,7 +3019,7 @@ static netdev_tx_t stmmac_xmit(struct sk if (unlikely(is_jumbo) && likely(priv->synopsys_id < DWMAC_CORE_4_00)) { @@ -4849,7 +4849,7 @@ if (unlikely(entry < 0)) goto dma_map_err; } -@@ -2225,48 +3026,56 @@ static netdev_tx_t stmmac_xmit(struct sk +@@ -2231,48 +3032,56 @@ static netdev_tx_t stmmac_xmit(struct sk entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE); if (likely(priv->extend_desc)) @@ -4919,7 +4919,7 @@ priv->hw->desc->display_ring(tx_head, DMA_TX_SIZE, false); -@@ -2274,10 +3083,10 @@ static netdev_tx_t stmmac_xmit(struct sk +@@ -2280,10 +3089,10 @@ static netdev_tx_t stmmac_xmit(struct sk print_pkt(skb->data, skb->len); } @@ -4932,7 +4932,7 @@ } dev->stats.tx_bytes += skb->len; -@@ -2312,14 +3121,14 @@ static netdev_tx_t stmmac_xmit(struct sk +@@ -2318,14 +3127,14 @@ static netdev_tx_t stmmac_xmit(struct sk if (dma_mapping_error(priv->device, des)) goto dma_map_err; @@ -4950,7 +4950,7 @@ if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en)) { -@@ -2331,7 +3140,7 @@ static netdev_tx_t stmmac_xmit(struct sk +@@ -2337,7 +3146,7 @@ static netdev_tx_t stmmac_xmit(struct sk /* Prepare the first descriptor setting the OWN bit too */ priv->hw->desc->prepare_tx_desc(first, 1, nopaged_len, csum_insertion, priv->mode, 1, @@ -4959,7 +4959,7 @@ /* The own bit must be the latest setting done when prepare the * descriptor and then barrier is needed to make sure that -@@ -2340,13 +3149,13 @@ static netdev_tx_t stmmac_xmit(struct sk +@@ -2346,13 +3155,13 @@ static netdev_tx_t stmmac_xmit(struct sk dma_wmb(); } @@ -4976,7 +4976,7 @@ return NETDEV_TX_OK; -@@ -2374,9 +3183,9 @@ static void stmmac_rx_vlan(struct net_de +@@ -2380,9 +3189,9 @@ static void stmmac_rx_vlan(struct net_de } @@ -4988,7 +4988,7 @@ return 0; return 1; -@@ -2385,30 +3194,33 @@ static inline int stmmac_rx_threshold_co +@@ -2391,30 +3200,33 @@ static inline int stmmac_rx_threshold_co /** * stmmac_rx_refill - refill used skb preallocated buffers * @priv: driver private structure @@ -5029,7 +5029,7 @@ if (unlikely(net_ratelimit())) dev_err(priv->device, "fail to alloc skb entry %d\n", -@@ -2416,28 +3228,28 @@ static inline void stmmac_rx_refill(stru +@@ -2422,28 +3234,28 @@ static inline void stmmac_rx_refill(stru break; } @@ -5066,7 +5066,7 @@ netif_dbg(priv, rx_status, priv->dev, "refill entry #%d\n", entry); -@@ -2453,31 +3265,33 @@ static inline void stmmac_rx_refill(stru +@@ -2459,31 +3271,33 @@ static inline void stmmac_rx_refill(stru entry = STMMAC_GET_ENTRY(entry, DMA_RX_SIZE); } @@ -5107,7 +5107,7 @@ priv->hw->desc->display_ring(rx_head, DMA_RX_SIZE, true); } -@@ -2487,9 +3301,9 @@ static int stmmac_rx(struct stmmac_priv +@@ -2493,9 +3307,9 @@ static int stmmac_rx(struct stmmac_priv struct dma_desc *np; if (priv->extend_desc) @@ -5119,7 +5119,7 @@ /* read the status of the incoming frame */ status = priv->hw->desc->rx_status(&priv->dev->stats, -@@ -2500,20 +3314,20 @@ static int stmmac_rx(struct stmmac_priv +@@ -2506,20 +3320,20 @@ static int stmmac_rx(struct stmmac_priv count++; @@ -5145,7 +5145,7 @@ entry); if (unlikely(status == discard_frame)) { priv->dev->stats.rx_errors++; -@@ -2523,9 +3337,9 @@ static int stmmac_rx(struct stmmac_priv +@@ -2529,9 +3343,9 @@ static int stmmac_rx(struct stmmac_priv * them in stmmac_rx_refill() function so that * device can reuse it. */ @@ -5157,7 +5157,7 @@ priv->dma_buf_sz, DMA_FROM_DEVICE); } -@@ -2573,7 +3387,7 @@ static int stmmac_rx(struct stmmac_priv +@@ -2579,7 +3393,7 @@ static int stmmac_rx(struct stmmac_priv */ if (unlikely(!priv->plat->has_gmac4 && ((frame_len < priv->rx_copybreak) || @@ -5166,7 +5166,7 @@ skb = netdev_alloc_skb_ip_align(priv->dev, frame_len); if (unlikely(!skb)) { -@@ -2585,21 +3399,21 @@ static int stmmac_rx(struct stmmac_priv +@@ -2591,21 +3405,21 @@ static int stmmac_rx(struct stmmac_priv } dma_sync_single_for_cpu(priv->device, @@ -5192,7 +5192,7 @@ if (unlikely(!skb)) { netdev_err(priv->dev, "%s: Inconsistent Rx chain\n", -@@ -2608,12 +3422,12 @@ static int stmmac_rx(struct stmmac_priv +@@ -2614,12 +3428,12 @@ static int stmmac_rx(struct stmmac_priv break; } prefetch(skb->data - NET_IP_ALIGN); @@ -5208,7 +5208,7 @@ priv->dma_buf_sz, DMA_FROM_DEVICE); } -@@ -2635,7 +3449,7 @@ static int stmmac_rx(struct stmmac_priv +@@ -2641,7 +3455,7 @@ static int stmmac_rx(struct stmmac_priv else skb->ip_summed = CHECKSUM_UNNECESSARY; @@ -5217,7 +5217,7 @@ priv->dev->stats.rx_packets++; priv->dev->stats.rx_bytes += frame_len; -@@ -2643,7 +3457,7 @@ static int stmmac_rx(struct stmmac_priv +@@ -2649,7 +3463,7 @@ static int stmmac_rx(struct stmmac_priv entry = next_entry; } @@ -5226,7 +5226,7 @@ priv->xstats.rx_pkt_n += count; -@@ -2660,16 +3474,24 @@ static int stmmac_rx(struct stmmac_priv +@@ -2666,16 +3480,24 @@ static int stmmac_rx(struct stmmac_priv */ static int stmmac_poll(struct napi_struct *napi, int budget) { @@ -5255,7 +5255,7 @@ } return work_done; } -@@ -2685,9 +3507,12 @@ static int stmmac_poll(struct napi_struc +@@ -2691,9 +3513,12 @@ static int stmmac_poll(struct napi_struc static void stmmac_tx_timeout(struct net_device *dev) { struct stmmac_priv *priv = netdev_priv(dev); @@ -5269,7 +5269,7 @@ } /** -@@ -2810,6 +3635,12 @@ static irqreturn_t stmmac_interrupt(int +@@ -2816,6 +3641,12 @@ static irqreturn_t stmmac_interrupt(int { struct net_device *dev = (struct net_device *)dev_id; struct stmmac_priv *priv = netdev_priv(dev); @@ -5282,7 +5282,7 @@ if (priv->irq_wake) pm_wakeup_event(priv->device, 0); -@@ -2823,16 +3654,30 @@ static irqreturn_t stmmac_interrupt(int +@@ -2829,16 +3660,30 @@ static irqreturn_t stmmac_interrupt(int if ((priv->plat->has_gmac) || (priv->plat->has_gmac4)) { int status = priv->hw->mac->host_irq_status(priv->hw, &priv->xstats); @@ -5317,7 +5317,7 @@ } /* PCS link status */ -@@ -2917,7 +3762,7 @@ static void sysfs_display_ring(void *hea +@@ -2923,7 +3768,7 @@ static void sysfs_display_ring(void *hea ep++; } else { seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n", @@ -5326,7 +5326,7 @@ le32_to_cpu(p->des0), le32_to_cpu(p->des1), le32_to_cpu(p->des2), le32_to_cpu(p->des3)); p++; -@@ -2930,17 +3775,40 @@ static int stmmac_sysfs_ring_read(struct +@@ -2936,17 +3781,40 @@ static int stmmac_sysfs_ring_read(struct { struct net_device *dev = seq->private; struct stmmac_priv *priv = netdev_priv(dev); @@ -5377,7 +5377,7 @@ } return 0; -@@ -3223,11 +4091,14 @@ int stmmac_dvr_probe(struct device *devi +@@ -3229,11 +4097,14 @@ int stmmac_dvr_probe(struct device *devi struct plat_stmmacenet_data *plat_dat, struct stmmac_resources *res) { @@ -5394,7 +5394,7 @@ if (!ndev) return -ENOMEM; -@@ -3269,6 +4140,10 @@ int stmmac_dvr_probe(struct device *devi +@@ -3275,6 +4146,10 @@ int stmmac_dvr_probe(struct device *devi if (ret) goto error_hw_init; @@ -5405,7 +5405,7 @@ ndev->netdev_ops = &stmmac_netdev_ops; ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | -@@ -3301,7 +4176,12 @@ int stmmac_dvr_probe(struct device *devi +@@ -3307,7 +4182,12 @@ int stmmac_dvr_probe(struct device *devi "Enable RX Mitigation via HW Watchdog Timer\n"); } @@ -5419,7 +5419,7 @@ spin_lock_init(&priv->lock); -@@ -3346,7 +4226,11 @@ error_netdev_register: +@@ -3352,7 +4232,11 @@ error_netdev_register: priv->hw->pcs != STMMAC_PCS_RTBI) stmmac_mdio_unregister(ndev); error_mdio_register: @@ -5432,7 +5432,7 @@ error_hw_init: free_netdev(ndev); -@@ -3367,10 +4251,9 @@ int stmmac_dvr_remove(struct device *dev +@@ -3373,10 +4257,9 @@ int stmmac_dvr_remove(struct device *dev netdev_info(priv->dev, "%s: removing driver", __func__); @@ -5445,7 +5445,7 @@ netif_carrier_off(ndev); unregister_netdev(ndev); if (priv->plat->stmmac_rst) -@@ -3409,20 +4292,19 @@ int stmmac_suspend(struct device *dev) +@@ -3415,20 +4298,19 @@ int stmmac_suspend(struct device *dev) spin_lock_irqsave(&priv->lock, flags); netif_device_detach(ndev); @@ -5470,7 +5470,7 @@ pinctrl_pm_select_sleep_state(priv->device); /* Disable clock in case of PWM is off */ clk_disable(priv->plat->pclk); -@@ -3438,6 +4320,31 @@ int stmmac_suspend(struct device *dev) +@@ -3444,6 +4326,31 @@ int stmmac_suspend(struct device *dev) EXPORT_SYMBOL_GPL(stmmac_suspend); /** @@ -5502,7 +5502,7 @@ * stmmac_resume - resume callback * @dev: device pointer * Description: when resume this function is invoked to setup the DMA and CORE -@@ -3477,10 +4384,8 @@ int stmmac_resume(struct device *dev) +@@ -3483,10 +4390,8 @@ int stmmac_resume(struct device *dev) spin_lock_irqsave(&priv->lock, flags); @@ -5515,7 +5515,7 @@ /* reset private mss value to force mss context settings at * next tso xmit (only used for gmac4). */ -@@ -3492,9 +4397,9 @@ int stmmac_resume(struct device *dev) +@@ -3498,9 +4403,9 @@ int stmmac_resume(struct device *dev) stmmac_init_tx_coalesce(priv); stmmac_set_rx_mode(ndev); diff --git a/target/linux/sunxi/patches-4.9/0053-stmmac-form-4-13.patch b/target/linux/sunxi/patches-4.9/0053-stmmac-form-4-13.patch index 161e9f0e6c..a5d860ddc4 100644 --- a/target/linux/sunxi/patches-4.9/0053-stmmac-form-4-13.patch +++ b/target/linux/sunxi/patches-4.9/0053-stmmac-form-4-13.patch @@ -1347,7 +1347,7 @@ } static void print_pkt(unsigned char *buf, int len) -@@ -783,7 +794,7 @@ static void stmmac_adjust_link(struct ne +@@ -789,7 +800,7 @@ static void stmmac_adjust_link(struct ne struct stmmac_priv *priv = netdev_priv(dev); struct phy_device *phydev = dev->phydev; unsigned long flags; @@ -1356,7 +1356,7 @@ if (!phydev) return; -@@ -796,8 +807,8 @@ static void stmmac_adjust_link(struct ne +@@ -802,8 +813,8 @@ static void stmmac_adjust_link(struct ne /* Now we make sure that we can be in full duplex mode. * If not, we operate in half-duplex mode. */ if (phydev->duplex != priv->oldduplex) { @@ -1367,7 +1367,7 @@ ctrl &= ~priv->hw->link.duplex; else ctrl |= priv->hw->link.duplex; -@@ -808,30 +819,17 @@ static void stmmac_adjust_link(struct ne +@@ -814,30 +825,17 @@ static void stmmac_adjust_link(struct ne stmmac_mac_flow_ctrl(priv, phydev->duplex); if (phydev->speed != priv->speed) { @@ -1406,7 +1406,7 @@ break; default: netif_warn(priv, link, priv->dev, -@@ -847,12 +845,12 @@ static void stmmac_adjust_link(struct ne +@@ -853,12 +851,12 @@ static void stmmac_adjust_link(struct ne writel(ctrl, priv->ioaddr + MAC_CTRL_REG); if (!priv->oldlink) { @@ -1423,7 +1423,7 @@ priv->speed = SPEED_UNKNOWN; priv->oldduplex = DUPLEX_UNKNOWN; } -@@ -915,7 +913,7 @@ static int stmmac_init_phy(struct net_de +@@ -921,7 +919,7 @@ static int stmmac_init_phy(struct net_de char bus_id[MII_BUS_ID_SIZE]; int interface = priv->plat->interface; int max_speed = priv->plat->max_speed; @@ -1432,7 +1432,7 @@ priv->speed = SPEED_UNKNOWN; priv->oldduplex = DUPLEX_UNKNOWN; -@@ -1450,7 +1448,7 @@ static void free_dma_rx_desc_resources(s +@@ -1456,7 +1454,7 @@ static void free_dma_rx_desc_resources(s static void free_dma_tx_desc_resources(struct stmmac_priv *priv) { u32 tx_count = priv->plat->tx_queues_to_use; @@ -1441,7 +1441,7 @@ /* Free TX queue resources */ for (queue = 0; queue < tx_count; queue++) { -@@ -1499,7 +1497,7 @@ static int alloc_dma_rx_desc_resources(s +@@ -1505,7 +1503,7 @@ static int alloc_dma_rx_desc_resources(s sizeof(dma_addr_t), GFP_KERNEL); if (!rx_q->rx_skbuff_dma) @@ -1450,7 +1450,7 @@ rx_q->rx_skbuff = kmalloc_array(DMA_RX_SIZE, sizeof(struct sk_buff *), -@@ -1562,13 +1560,13 @@ static int alloc_dma_tx_desc_resources(s +@@ -1568,13 +1566,13 @@ static int alloc_dma_tx_desc_resources(s sizeof(*tx_q->tx_skbuff_dma), GFP_KERNEL); if (!tx_q->tx_skbuff_dma) @@ -1466,7 +1466,7 @@ if (priv->extend_desc) { tx_q->dma_etx = dma_zalloc_coherent(priv->device, -@@ -1578,7 +1576,7 @@ static int alloc_dma_tx_desc_resources(s +@@ -1584,7 +1582,7 @@ static int alloc_dma_tx_desc_resources(s &tx_q->dma_tx_phy, GFP_KERNEL); if (!tx_q->dma_etx) @@ -1475,7 +1475,7 @@ } else { tx_q->dma_tx = dma_zalloc_coherent(priv->device, DMA_TX_SIZE * -@@ -1587,13 +1585,13 @@ static int alloc_dma_tx_desc_resources(s +@@ -1593,13 +1591,13 @@ static int alloc_dma_tx_desc_resources(s &tx_q->dma_tx_phy, GFP_KERNEL); if (!tx_q->dma_tx) @@ -1491,7 +1491,7 @@ free_dma_tx_desc_resources(priv); return ret; -@@ -2896,8 +2894,7 @@ static netdev_tx_t stmmac_tso_xmit(struc +@@ -2902,8 +2900,7 @@ static netdev_tx_t stmmac_tso_xmit(struc priv->xstats.tx_set_ic_bit++; } @@ -1501,7 +1501,7 @@ if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en)) { -@@ -2975,7 +2972,7 @@ static netdev_tx_t stmmac_xmit(struct sk +@@ -2981,7 +2978,7 @@ static netdev_tx_t stmmac_xmit(struct sk /* Manage oversized TCP frames for GMAC4 device */ if (skb_is_gso(skb) && priv->tso) { @@ -1510,7 +1510,7 @@ return stmmac_tso_xmit(skb, dev); } -@@ -3106,8 +3103,7 @@ static netdev_tx_t stmmac_xmit(struct sk +@@ -3112,8 +3109,7 @@ static netdev_tx_t stmmac_xmit(struct sk priv->xstats.tx_set_ic_bit++; } @@ -1520,7 +1520,7 @@ /* Ready to fill the first descriptor and set the OWN bit w/o any * problems because all the descriptors are actually ready to be -@@ -3984,7 +3980,9 @@ static int stmmac_hw_init(struct stmmac_ +@@ -3990,7 +3986,9 @@ static int stmmac_hw_init(struct stmmac_ struct mac_device_info *mac; /* Identify the MAC HW device */ @@ -1531,7 +1531,7 @@ priv->dev->priv_flags |= IFF_UNICAST_FLT; mac = dwmac1000_setup(priv->ioaddr, priv->plat->multicast_filter_bins, -@@ -4004,6 +4002,10 @@ static int stmmac_hw_init(struct stmmac_ +@@ -4010,6 +4008,10 @@ static int stmmac_hw_init(struct stmmac_ priv->hw = mac; @@ -1542,7 +1542,7 @@ /* To use the chained or ring mode */ if (priv->synopsys_id >= DWMAC_CORE_4_00) { priv->hw->mode = &dwmac4_ring_mode_ops; -@@ -4132,8 +4134,15 @@ int stmmac_dvr_probe(struct device *devi +@@ -4138,8 +4140,15 @@ int stmmac_dvr_probe(struct device *devi if ((phyaddr >= 0) && (phyaddr <= 31)) priv->plat->phy_addr = phyaddr; @@ -1559,7 +1559,7 @@ /* Init MAC and get the capabilities */ ret = stmmac_hw_init(priv); -@@ -4150,7 +4159,7 @@ int stmmac_dvr_probe(struct device *devi +@@ -4156,7 +4165,7 @@ int stmmac_dvr_probe(struct device *devi NETIF_F_RXCSUM; if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) { @@ -1568,7 +1568,7 @@ priv->tso = true; dev_info(priv->device, "TSO feature enabled\n"); } -@@ -4312,7 +4321,7 @@ int stmmac_suspend(struct device *dev) +@@ -4318,7 +4327,7 @@ int stmmac_suspend(struct device *dev) } spin_unlock_irqrestore(&priv->lock, flags); |