diff options
Diffstat (limited to 'target/linux/sunxi')
4 files changed, 309 insertions, 331 deletions
diff --git a/target/linux/sunxi/patches-4.9/0050-stmmac-form-4-10.patch b/target/linux/sunxi/patches-4.9/0050-stmmac-form-4-10.patch index e6ca1447ff..7549d4995a 100644 --- a/target/linux/sunxi/patches-4.9/0050-stmmac-form-4-10.patch +++ b/target/linux/sunxi/patches-4.9/0050-stmmac-form-4-10.patch @@ -2317,7 +2317,7 @@ priv->xstats.tx_clean++; -@@ -1393,22 +1394,17 @@ static void stmmac_tx_clean(struct stmma +@@ -1398,22 +1399,17 @@ static void stmmac_tx_clean(struct stmma netdev_completed_queue(priv->dev, pkts_compl, bytes_compl); if (unlikely(netif_queue_stopped(priv->dev) && @@ -2345,7 +2345,7 @@ } static inline void stmmac_enable_dma_irq(struct stmmac_priv *priv) -@@ -1512,7 +1508,7 @@ static void stmmac_mmc_setup(struct stmm +@@ -1517,7 +1513,7 @@ static void stmmac_mmc_setup(struct stmm dwmac_mmc_ctrl(priv->mmcaddr, mode); memset(&priv->mmc, 0, sizeof(struct stmmac_counters)); } else @@ -2354,7 +2354,7 @@ } /** -@@ -1525,18 +1521,18 @@ static void stmmac_mmc_setup(struct stmm +@@ -1530,18 +1526,18 @@ static void stmmac_mmc_setup(struct stmm static void stmmac_selec_desc_mode(struct stmmac_priv *priv) { if (priv->plat->enh_desc) { @@ -2377,7 +2377,7 @@ priv->hw->desc = &ndesc_ops; } } -@@ -1577,8 +1573,8 @@ static void stmmac_check_ether_addr(stru +@@ -1582,8 +1578,8 @@ static void stmmac_check_ether_addr(stru priv->dev->dev_addr, 0); if (!is_valid_ether_addr(priv->dev->dev_addr)) eth_hw_addr_random(priv->dev); @@ -2388,7 +2388,7 @@ } } -@@ -1592,16 +1588,12 @@ static void stmmac_check_ether_addr(stru +@@ -1597,16 +1593,12 @@ static void stmmac_check_ether_addr(stru */ static int stmmac_init_dma_engine(struct stmmac_priv *priv) { @@ -2408,7 +2408,7 @@ } if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE)) -@@ -1613,8 +1605,8 @@ static int stmmac_init_dma_engine(struct +@@ -1618,8 +1610,8 @@ static int stmmac_init_dma_engine(struct return ret; } @@ -2419,7 +2419,7 @@ if (priv->synopsys_id >= DWMAC_CORE_4_00) { priv->rx_tail_addr = priv->dma_rx_phy + -@@ -1686,7 +1678,8 @@ static int stmmac_hw_setup(struct net_de +@@ -1691,7 +1683,8 @@ static int stmmac_hw_setup(struct net_de /* DMA initialization and SW reset */ ret = stmmac_init_dma_engine(priv); if (ret < 0) { @@ -2429,7 +2429,7 @@ return ret; } -@@ -1715,7 +1708,7 @@ static int stmmac_hw_setup(struct net_de +@@ -1720,7 +1713,7 @@ static int stmmac_hw_setup(struct net_de ret = priv->hw->mac->rx_ipc(priv->hw); if (!ret) { @@ -2438,7 +2438,7 @@ priv->plat->rx_coe = STMMAC_RX_COE_NONE; priv->hw->rx_csum = 0; } -@@ -1740,10 +1733,11 @@ static int stmmac_hw_setup(struct net_de +@@ -1745,10 +1738,11 @@ static int stmmac_hw_setup(struct net_de #ifdef CONFIG_DEBUG_FS ret = stmmac_init_fs(dev); if (ret < 0) @@ -2452,7 +2452,7 @@ priv->hw->dma->start_tx(priv->ioaddr); priv->hw->dma->start_rx(priv->ioaddr); -@@ -1798,8 +1792,9 @@ static int stmmac_open(struct net_device +@@ -1803,8 +1797,9 @@ static int stmmac_open(struct net_device priv->hw->pcs != STMMAC_PCS_RTBI) { ret = stmmac_init_phy(dev); if (ret) { @@ -2464,7 +2464,7 @@ return ret; } } -@@ -1814,33 +1809,36 @@ static int stmmac_open(struct net_device +@@ -1819,33 +1814,36 @@ static int stmmac_open(struct net_device ret = alloc_dma_desc_resources(priv); if (ret < 0) { @@ -2508,7 +2508,7 @@ goto init_error; } -@@ -1849,8 +1847,9 @@ static int stmmac_open(struct net_device +@@ -1854,8 +1852,9 @@ static int stmmac_open(struct net_device ret = request_irq(priv->wol_irq, stmmac_interrupt, IRQF_SHARED, dev->name, dev); if (unlikely(ret < 0)) { @@ -2520,7 +2520,7 @@ goto wolirq_error; } } -@@ -1860,8 +1859,9 @@ static int stmmac_open(struct net_device +@@ -1865,8 +1864,9 @@ static int stmmac_open(struct net_device ret = request_irq(priv->lpi_irq, stmmac_interrupt, IRQF_SHARED, dev->name, dev); if (unlikely(ret < 0)) { @@ -2532,7 +2532,7 @@ goto lpiirq_error; } } -@@ -1880,8 +1880,8 @@ wolirq_error: +@@ -1885,8 +1885,8 @@ wolirq_error: init_error: free_dma_desc_resources(priv); dma_desc_error: @@ -2543,7 +2543,7 @@ return ret; } -@@ -1900,10 +1900,9 @@ static int stmmac_release(struct net_dev +@@ -1905,10 +1905,9 @@ static int stmmac_release(struct net_dev del_timer_sync(&priv->eee_ctrl_timer); /* Stop and disconnect the PHY */ @@ -2557,7 +2557,7 @@ } netif_stop_queue(dev); -@@ -1963,13 +1962,13 @@ static void stmmac_tso_allocator(struct +@@ -1968,13 +1967,13 @@ static void stmmac_tso_allocator(struct priv->cur_tx = STMMAC_GET_ENTRY(priv->cur_tx, DMA_TX_SIZE); desc = priv->dma_tx + priv->cur_tx; @@ -2573,7 +2573,7 @@ 0, 0); tmp_len -= TSO_MAX_BUFF_SIZE; -@@ -2014,8 +2013,6 @@ static netdev_tx_t stmmac_tso_xmit(struc +@@ -2019,8 +2018,6 @@ static netdev_tx_t stmmac_tso_xmit(struc u8 proto_hdr_len; int i; @@ -2582,7 +2582,7 @@ /* Compute header lengths */ proto_hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); -@@ -2025,9 +2022,10 @@ static netdev_tx_t stmmac_tso_xmit(struc +@@ -2030,9 +2027,10 @@ static netdev_tx_t stmmac_tso_xmit(struc if (!netif_queue_stopped(dev)) { netif_stop_queue(dev); /* This is a hard error, log it. */ @@ -2595,7 +2595,7 @@ return NETDEV_TX_BUSY; } -@@ -2065,11 +2063,11 @@ static netdev_tx_t stmmac_tso_xmit(struc +@@ -2070,11 +2068,11 @@ static netdev_tx_t stmmac_tso_xmit(struc priv->tx_skbuff_dma[first_entry].len = skb_headlen(skb); priv->tx_skbuff[first_entry] = skb; @@ -2609,7 +2609,7 @@ /* If needed take extra descriptors to fill the remaining payload */ tmp_pay_len = pay_len - TSO_MAX_BUFF_SIZE; -@@ -2098,8 +2096,8 @@ static netdev_tx_t stmmac_tso_xmit(struc +@@ -2103,8 +2101,8 @@ static netdev_tx_t stmmac_tso_xmit(struc priv->cur_tx = STMMAC_GET_ENTRY(priv->cur_tx, DMA_TX_SIZE); if (unlikely(stmmac_tx_avail(priv) <= (MAX_SKB_FRAGS + 1))) { @@ -2620,7 +2620,7 @@ netif_stop_queue(dev); } -@@ -2143,7 +2141,7 @@ static netdev_tx_t stmmac_tso_xmit(struc +@@ -2155,7 +2153,7 @@ static netdev_tx_t stmmac_tso_xmit(struc * descriptor and then barrier is needed to make sure that * all is coherent before granting the DMA engine. */ @@ -2629,7 +2629,7 @@ if (netif_msg_pktdata(priv)) { pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n", -@@ -2162,11 +2160,9 @@ static netdev_tx_t stmmac_tso_xmit(struc +@@ -2174,11 +2172,9 @@ static netdev_tx_t stmmac_tso_xmit(struc priv->hw->dma->set_tx_tail_ptr(priv->ioaddr, priv->tx_tail_addr, STMMAC_CHAN0); @@ -2641,7 +2641,7 @@ dev_err(priv->device, "Tx dma map failed\n"); dev_kfree_skb(skb); priv->dev->stats.tx_dropped++; -@@ -2198,14 +2194,13 @@ static netdev_tx_t stmmac_xmit(struct sk +@@ -2210,14 +2206,13 @@ static netdev_tx_t stmmac_xmit(struct sk return stmmac_tso_xmit(skb, dev); } @@ -2659,7 +2659,7 @@ } return NETDEV_TX_BUSY; } -@@ -2258,13 +2253,11 @@ static netdev_tx_t stmmac_xmit(struct sk +@@ -2270,13 +2265,11 @@ static netdev_tx_t stmmac_xmit(struct sk priv->tx_skbuff[entry] = NULL; @@ -2678,7 +2678,7 @@ priv->tx_skbuff_dma[entry].map_as_page = true; priv->tx_skbuff_dma[entry].len = len; -@@ -2282,9 +2275,10 @@ static netdev_tx_t stmmac_xmit(struct sk +@@ -2294,9 +2287,10 @@ static netdev_tx_t stmmac_xmit(struct sk if (netif_msg_pktdata(priv)) { void *tx_head; @@ -2692,7 +2692,7 @@ if (priv->extend_desc) tx_head = (void *)priv->dma_etx; -@@ -2293,13 +2287,13 @@ static netdev_tx_t stmmac_xmit(struct sk +@@ -2305,13 +2299,13 @@ static netdev_tx_t stmmac_xmit(struct sk priv->hw->desc->display_ring(tx_head, DMA_TX_SIZE, false); @@ -2709,7 +2709,7 @@ netif_stop_queue(dev); } -@@ -2335,13 +2329,11 @@ static netdev_tx_t stmmac_xmit(struct sk +@@ -2347,13 +2341,11 @@ static netdev_tx_t stmmac_xmit(struct sk if (dma_mapping_error(priv->device, des)) goto dma_map_err; @@ -2728,7 +2728,7 @@ priv->tx_skbuff_dma[first_entry].len = nopaged_len; priv->tx_skbuff_dma[first_entry].last_segment = last_segment; -@@ -2362,7 +2354,7 @@ static netdev_tx_t stmmac_xmit(struct sk +@@ -2374,7 +2366,7 @@ static netdev_tx_t stmmac_xmit(struct sk * descriptor and then barrier is needed to make sure that * all is coherent before granting the DMA engine. */ @@ -2737,7 +2737,7 @@ } netdev_sent_queue(dev, skb->len); -@@ -2373,12 +2365,10 @@ static netdev_tx_t stmmac_xmit(struct sk +@@ -2385,12 +2377,10 @@ static netdev_tx_t stmmac_xmit(struct sk priv->hw->dma->set_tx_tail_ptr(priv->ioaddr, priv->tx_tail_addr, STMMAC_CHAN0); @@ -2751,7 +2751,7 @@ dev_kfree_skb(skb); priv->dev->stats.tx_dropped++; return NETDEV_TX_OK; -@@ -2449,16 +2439,16 @@ static inline void stmmac_rx_refill(stru +@@ -2461,16 +2451,16 @@ static inline void stmmac_rx_refill(stru DMA_FROM_DEVICE); if (dma_mapping_error(priv->device, priv->rx_skbuff_dma[entry])) { @@ -2771,7 +2771,7 @@ } if (priv->hw->mode->refill_desc3) priv->hw->mode->refill_desc3(priv, p); -@@ -2466,17 +2456,17 @@ static inline void stmmac_rx_refill(stru +@@ -2478,17 +2468,17 @@ static inline void stmmac_rx_refill(stru if (priv->rx_zeroc_thresh > 0) priv->rx_zeroc_thresh--; @@ -2793,7 +2793,7 @@ entry = STMMAC_GET_ENTRY(entry, DMA_RX_SIZE); } -@@ -2500,7 +2490,7 @@ static int stmmac_rx(struct stmmac_priv +@@ -2512,7 +2502,7 @@ static int stmmac_rx(struct stmmac_priv if (netif_msg_rx_status(priv)) { void *rx_head; @@ -2802,7 +2802,7 @@ if (priv->extend_desc) rx_head = (void *)priv->dma_erx; else -@@ -2562,9 +2552,9 @@ static int stmmac_rx(struct stmmac_priv +@@ -2574,9 +2564,9 @@ static int stmmac_rx(struct stmmac_priv unsigned int des; if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00)) @@ -2814,7 +2814,7 @@ frame_len = priv->hw->desc->get_rx_frame_len(p, coe); -@@ -2573,9 +2563,9 @@ static int stmmac_rx(struct stmmac_priv +@@ -2585,9 +2575,9 @@ static int stmmac_rx(struct stmmac_priv * ignored */ if (frame_len > priv->dma_buf_sz) { @@ -2827,7 +2827,7 @@ priv->dev->stats.rx_length_errors++; break; } -@@ -2587,11 +2577,11 @@ static int stmmac_rx(struct stmmac_priv +@@ -2599,11 +2589,11 @@ static int stmmac_rx(struct stmmac_priv frame_len -= ETH_FCS_LEN; if (netif_msg_rx_status(priv)) { @@ -2843,7 +2843,7 @@ } /* The zero-copy is always used for all the sizes -@@ -2628,8 +2618,9 @@ static int stmmac_rx(struct stmmac_priv +@@ -2640,8 +2630,9 @@ static int stmmac_rx(struct stmmac_priv } else { skb = priv->rx_skbuff[entry]; if (unlikely(!skb)) { @@ -2855,7 +2855,7 @@ priv->dev->stats.rx_dropped++; break; } -@@ -2645,7 +2636,8 @@ static int stmmac_rx(struct stmmac_priv +@@ -2657,7 +2648,8 @@ static int stmmac_rx(struct stmmac_priv } if (netif_msg_pktdata(priv)) { @@ -2865,7 +2865,7 @@ print_pkt(skb->data, frame_len); } -@@ -2748,7 +2740,7 @@ static int stmmac_change_mtu(struct net_ +@@ -2760,7 +2752,7 @@ static int stmmac_change_mtu(struct net_ int max_mtu; if (netif_running(dev)) { @@ -2874,7 +2874,7 @@ return -EBUSY; } -@@ -2840,7 +2832,7 @@ static irqreturn_t stmmac_interrupt(int +@@ -2852,7 +2844,7 @@ static irqreturn_t stmmac_interrupt(int pm_wakeup_event(priv->device, 0); if (unlikely(!dev)) { @@ -2883,7 +2883,7 @@ return IRQ_NONE; } -@@ -2898,7 +2890,6 @@ static void stmmac_poll_controller(struc +@@ -2910,7 +2902,6 @@ static void stmmac_poll_controller(struc */ static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) { @@ -2891,7 +2891,7 @@ int ret = -EOPNOTSUPP; if (!netif_running(dev)) -@@ -2908,9 +2899,9 @@ static int stmmac_ioctl(struct net_devic +@@ -2920,9 +2911,9 @@ static int stmmac_ioctl(struct net_devic case SIOCGMIIPHY: case SIOCGMIIREG: case SIOCSMIIREG: @@ -2903,7 +2903,7 @@ break; case SIOCSHWTSTAMP: ret = stmmac_hwtstamp_ioctl(dev, rq); -@@ -2938,14 +2929,17 @@ static void sysfs_display_ring(void *hea +@@ -2950,14 +2941,17 @@ static void sysfs_display_ring(void *hea x = *(u64 *) ep; seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n", i, (unsigned int)virt_to_phys(ep), @@ -2924,7 +2924,7 @@ p++; } seq_printf(seq, "\n"); -@@ -2977,6 +2971,8 @@ static int stmmac_sysfs_ring_open(struct +@@ -2989,6 +2983,8 @@ static int stmmac_sysfs_ring_open(struct return single_open(file, stmmac_sysfs_ring_read, inode->i_private); } @@ -2933,7 +2933,7 @@ static const struct file_operations stmmac_rings_status_fops = { .owner = THIS_MODULE, .open = stmmac_sysfs_ring_open, -@@ -2999,11 +2995,11 @@ static int stmmac_sysfs_dma_cap_read(str +@@ -3011,11 +3007,11 @@ static int stmmac_sysfs_dma_cap_read(str seq_printf(seq, "\tDMA HW features\n"); seq_printf(seq, "==============================\n"); @@ -2948,7 +2948,7 @@ (priv->dma_cap.half_duplex) ? "Y" : "N"); seq_printf(seq, "\tHash Filter: %s\n", (priv->dma_cap.hash_filter) ? "Y" : "N"); -@@ -3021,9 +3017,9 @@ static int stmmac_sysfs_dma_cap_read(str +@@ -3033,9 +3029,9 @@ static int stmmac_sysfs_dma_cap_read(str (priv->dma_cap.rmon) ? "Y" : "N"); seq_printf(seq, "\tIEEE 1588-2002 Time Stamp: %s\n", (priv->dma_cap.time_stamp) ? "Y" : "N"); @@ -2960,7 +2960,7 @@ (priv->dma_cap.eee) ? "Y" : "N"); seq_printf(seq, "\tAV features: %s\n", (priv->dma_cap.av) ? "Y" : "N"); seq_printf(seq, "\tChecksum Offload in TX: %s\n", -@@ -3070,8 +3066,7 @@ static int stmmac_init_fs(struct net_dev +@@ -3082,8 +3078,7 @@ static int stmmac_init_fs(struct net_dev priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir); if (!priv->dbgfs_dir || IS_ERR(priv->dbgfs_dir)) { @@ -2970,7 +2970,7 @@ return -ENOMEM; } -@@ -3083,7 +3078,7 @@ static int stmmac_init_fs(struct net_dev +@@ -3095,7 +3090,7 @@ static int stmmac_init_fs(struct net_dev &stmmac_rings_status_fops); if (!priv->dbgfs_rings_status || IS_ERR(priv->dbgfs_rings_status)) { @@ -2979,7 +2979,7 @@ debugfs_remove_recursive(priv->dbgfs_dir); return -ENOMEM; -@@ -3095,7 +3090,7 @@ static int stmmac_init_fs(struct net_dev +@@ -3107,7 +3102,7 @@ static int stmmac_init_fs(struct net_dev dev, &stmmac_dma_cap_fops); if (!priv->dbgfs_dma_cap || IS_ERR(priv->dbgfs_dma_cap)) { @@ -2988,7 +2988,7 @@ debugfs_remove_recursive(priv->dbgfs_dir); return -ENOMEM; -@@ -3167,11 +3162,11 @@ static int stmmac_hw_init(struct stmmac_ +@@ -3179,11 +3174,11 @@ static int stmmac_hw_init(struct stmmac_ } else { if (chain_mode) { priv->hw->mode = &chain_mode_ops; @@ -3002,7 +3002,7 @@ priv->mode = STMMAC_RING_MODE; } } -@@ -3179,7 +3174,7 @@ static int stmmac_hw_init(struct stmmac_ +@@ -3191,7 +3186,7 @@ static int stmmac_hw_init(struct stmmac_ /* Get the HW capability (new GMAC newer than 3.50a) */ priv->hw_cap_support = stmmac_get_hw_features(priv); if (priv->hw_cap_support) { @@ -3011,7 +3011,7 @@ /* We can override some gmac/dma configuration fields: e.g. * enh_desc, tx_coe (e.g. that are passed through the -@@ -3204,8 +3199,9 @@ static int stmmac_hw_init(struct stmmac_ +@@ -3216,8 +3211,9 @@ static int stmmac_hw_init(struct stmmac_ else if (priv->dma_cap.rx_coe_type1) priv->plat->rx_coe = STMMAC_RX_COE_TYPE1; @@ -3023,7 +3023,7 @@ /* To use alternate (extended), normal or GMAC4 descriptor structures */ if (priv->synopsys_id >= DWMAC_CORE_4_00) -@@ -3215,20 +3211,20 @@ static int stmmac_hw_init(struct stmmac_ +@@ -3227,20 +3223,20 @@ static int stmmac_hw_init(struct stmmac_ if (priv->plat->rx_coe) { priv->hw->rx_csum = priv->plat->rx_coe; @@ -3049,7 +3049,7 @@ return 0; } -@@ -3287,8 +3283,8 @@ int stmmac_dvr_probe(struct device *devi +@@ -3299,8 +3295,8 @@ int stmmac_dvr_probe(struct device *devi priv->stmmac_clk = devm_clk_get(priv->device, STMMAC_RESOURCE_NAME); if (IS_ERR(priv->stmmac_clk)) { @@ -3060,7 +3060,7 @@ /* If failed to obtain stmmac_clk and specific clk_csr value * is NOT passed from the platform, probe fail. */ -@@ -3337,7 +3333,7 @@ int stmmac_dvr_probe(struct device *devi +@@ -3349,7 +3345,7 @@ int stmmac_dvr_probe(struct device *devi if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) { ndev->hw_features |= NETIF_F_TSO; priv->tso = true; @@ -3069,7 +3069,7 @@ } ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA; ndev->watchdog_timeo = msecs_to_jiffies(watchdog); -@@ -3357,13 +3353,13 @@ int stmmac_dvr_probe(struct device *devi +@@ -3369,13 +3365,13 @@ int stmmac_dvr_probe(struct device *devi */ if ((priv->synopsys_id >= DWMAC_CORE_3_50) && (!priv->plat->riwt_off)) { priv->use_riwt = 1; @@ -3085,7 +3085,7 @@ /* If a specific clk_csr value is passed from the platform * this means that the CSR Clock Range selection cannot be -@@ -3384,15 +3380,17 @@ int stmmac_dvr_probe(struct device *devi +@@ -3396,15 +3392,17 @@ int stmmac_dvr_probe(struct device *devi /* MDIO bus Registration */ ret = stmmac_mdio_register(ndev); if (ret < 0) { @@ -3107,7 +3107,7 @@ goto error_netdev_register; } -@@ -3403,7 +3401,7 @@ error_netdev_register: +@@ -3415,7 +3413,7 @@ error_netdev_register: priv->hw->pcs != STMMAC_PCS_TBI && priv->hw->pcs != STMMAC_PCS_RTBI) stmmac_mdio_unregister(ndev); @@ -3116,7 +3116,7 @@ netif_napi_del(&priv->napi); error_hw_init: clk_disable_unprepare(priv->pclk); -@@ -3427,7 +3425,7 @@ int stmmac_dvr_remove(struct device *dev +@@ -3439,7 +3437,7 @@ int stmmac_dvr_remove(struct device *dev struct net_device *ndev = dev_get_drvdata(dev); struct stmmac_priv *priv = netdev_priv(ndev); @@ -3125,7 +3125,7 @@ priv->hw->dma->stop_rx(priv->ioaddr); priv->hw->dma->stop_tx(priv->ioaddr); -@@ -3465,8 +3463,8 @@ int stmmac_suspend(struct device *dev) +@@ -3477,8 +3475,8 @@ int stmmac_suspend(struct device *dev) if (!ndev || !netif_running(ndev)) return 0; @@ -3136,7 +3136,7 @@ spin_lock_irqsave(&priv->lock, flags); -@@ -3560,8 +3558,8 @@ int stmmac_resume(struct device *dev) +@@ -3572,8 +3570,8 @@ int stmmac_resume(struct device *dev) spin_unlock_irqrestore(&priv->lock, flags); diff --git a/target/linux/sunxi/patches-4.9/0051-stmmac-form-4-11.patch b/target/linux/sunxi/patches-4.9/0051-stmmac-form-4-11.patch index 95eb3deeb0..cd3e52a212 100644 --- a/target/linux/sunxi/patches-4.9/0051-stmmac-form-4-11.patch +++ b/target/linux/sunxi/patches-4.9/0051-stmmac-form-4-11.patch @@ -418,7 +418,7 @@ }; static void meson8b_dwmac_mask_bits(struct meson8b_dwmac *dwmac, u32 reg, -@@ -179,11 +177,19 @@ static int meson8b_init_prg_eth(struct m +@@ -181,11 +179,19 @@ static int meson8b_init_prg_eth(struct m { int ret; unsigned long clk_rate; @@ -439,7 +439,7 @@ case PHY_INTERFACE_MODE_RGMII_TXID: /* Generate a 25MHz clock for the PHY */ clk_rate = 25 * 1000 * 1000; -@@ -196,9 +202,8 @@ static int meson8b_init_prg_eth(struct m +@@ -198,9 +204,8 @@ static int meson8b_init_prg_eth(struct m meson8b_dwmac_mask_bits(dwmac, PRG_ETH0, PRG_ETH0_INVERTED_RMII_CLK, 0); @@ -450,7 +450,7 @@ break; case PHY_INTERFACE_MODE_RMII: -@@ -284,6 +289,11 @@ static int meson8b_dwmac_probe(struct pl +@@ -286,6 +291,11 @@ static int meson8b_dwmac_probe(struct pl goto err_remove_config_dt; } @@ -1671,7 +1671,7 @@ * stmmac_dma_operation_mode - HW DMA operation mode * @priv: driver private structure * Description: it is used for configuring the DMA operation mode register in -@@ -1686,10 +1687,6 @@ static int stmmac_hw_setup(struct net_de +@@ -1691,10 +1692,6 @@ static int stmmac_hw_setup(struct net_de /* Copy the MAC addr into the HW */ priv->hw->mac->set_umac_addr(priv->hw, dev->dev_addr, 0); @@ -1682,7 +1682,7 @@ /* PS and related bits will be programmed according to the speed */ if (priv->hw->pcs) { int speed = priv->plat->mac_port_sel_speed; -@@ -1706,6 +1703,10 @@ static int stmmac_hw_setup(struct net_de +@@ -1711,6 +1708,10 @@ static int stmmac_hw_setup(struct net_de /* Initialize the MAC Core */ priv->hw->mac->core_init(priv->hw, dev->mtu); @@ -1693,7 +1693,7 @@ ret = priv->hw->mac->rx_ipc(priv->hw); if (!ret) { netdev_warn(priv->dev, "RX IPC Checksum Offload disabled\n"); -@@ -1726,8 +1727,10 @@ static int stmmac_hw_setup(struct net_de +@@ -1731,8 +1732,10 @@ static int stmmac_hw_setup(struct net_de if (init_ptp) { ret = stmmac_init_ptp(priv); @@ -1706,7 +1706,7 @@ } #ifdef CONFIG_DEBUG_FS -@@ -1741,11 +1744,6 @@ static int stmmac_hw_setup(struct net_de +@@ -1746,11 +1749,6 @@ static int stmmac_hw_setup(struct net_de priv->hw->dma->start_tx(priv->ioaddr); priv->hw->dma->start_rx(priv->ioaddr); @@ -1718,7 +1718,7 @@ priv->tx_lpi_timer = STMMAC_DEFAULT_TWT_LS; if ((priv->use_riwt) && (priv->hw->dma->rx_watchdog)) { -@@ -2535,7 +2533,7 @@ static int stmmac_rx(struct stmmac_priv +@@ -2547,7 +2545,7 @@ static int stmmac_rx(struct stmmac_priv if (unlikely(status == discard_frame)) { priv->dev->stats.rx_errors++; if (priv->hwts_rx_en && !priv->extend_desc) { @@ -1727,7 +1727,7 @@ * with timestamp value, hence reinitialize * them in stmmac_rx_refill() function so that * device can reuse it. -@@ -2558,7 +2556,7 @@ static int stmmac_rx(struct stmmac_priv +@@ -2570,7 +2568,7 @@ static int stmmac_rx(struct stmmac_priv frame_len = priv->hw->desc->get_rx_frame_len(p, coe); @@ -1736,7 +1736,7 @@ * (preallocated during init) then the packet is * ignored */ -@@ -2778,7 +2776,7 @@ static netdev_features_t stmmac_fix_feat +@@ -2790,7 +2788,7 @@ static netdev_features_t stmmac_fix_feat /* Some GMAC devices have a bugged Jumbo frame support that * needs to have the Tx COE disabled for oversized frames * (due to limited buffer sizes). In this case we disable @@ -1745,7 +1745,7 @@ */ if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN)) features &= ~NETIF_F_CSUM_MASK; -@@ -2924,9 +2922,7 @@ static void sysfs_display_ring(void *hea +@@ -2936,9 +2934,7 @@ static void sysfs_display_ring(void *hea struct dma_desc *p = (struct dma_desc *)head; for (i = 0; i < size; i++) { @@ -1755,7 +1755,7 @@ seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n", i, (unsigned int)virt_to_phys(ep), le32_to_cpu(ep->basic.des0), -@@ -2935,7 +2931,6 @@ static void sysfs_display_ring(void *hea +@@ -2947,7 +2943,6 @@ static void sysfs_display_ring(void *hea le32_to_cpu(ep->basic.des3)); ep++; } else { @@ -1763,7 +1763,7 @@ seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n", i, (unsigned int)virt_to_phys(ep), le32_to_cpu(p->des0), le32_to_cpu(p->des1), -@@ -3005,7 +3000,7 @@ static int stmmac_sysfs_dma_cap_read(str +@@ -3017,7 +3012,7 @@ static int stmmac_sysfs_dma_cap_read(str (priv->dma_cap.hash_filter) ? "Y" : "N"); seq_printf(seq, "\tMultiple MAC address registers: %s\n", (priv->dma_cap.multi_addr) ? "Y" : "N"); @@ -1772,7 +1772,7 @@ (priv->dma_cap.pcs) ? "Y" : "N"); seq_printf(seq, "\tSMA (MDIO) Interface: %s\n", (priv->dma_cap.sma_mdio) ? "Y" : "N"); -@@ -3281,44 +3276,8 @@ int stmmac_dvr_probe(struct device *devi +@@ -3293,44 +3288,8 @@ int stmmac_dvr_probe(struct device *devi if ((phyaddr >= 0) && (phyaddr <= 31)) priv->plat->phy_addr = phyaddr; @@ -1819,7 +1819,7 @@ /* Init MAC and get the capabilities */ ret = stmmac_hw_init(priv); -@@ -3404,10 +3363,6 @@ error_netdev_register: +@@ -3416,10 +3375,6 @@ error_netdev_register: error_mdio_register: netif_napi_del(&priv->napi); error_hw_init: @@ -1830,7 +1830,7 @@ free_netdev(ndev); return ret; -@@ -3433,10 +3388,10 @@ int stmmac_dvr_remove(struct device *dev +@@ -3445,10 +3400,10 @@ int stmmac_dvr_remove(struct device *dev stmmac_set_mac(priv->ioaddr, false); netif_carrier_off(ndev); unregister_netdev(ndev); @@ -1845,7 +1845,7 @@ if (priv->hw->pcs != STMMAC_PCS_RGMII && priv->hw->pcs != STMMAC_PCS_TBI && priv->hw->pcs != STMMAC_PCS_RTBI) -@@ -3485,14 +3440,14 @@ int stmmac_suspend(struct device *dev) +@@ -3497,14 +3452,14 @@ int stmmac_suspend(struct device *dev) stmmac_set_mac(priv->ioaddr, false); pinctrl_pm_select_sleep_state(priv->device); /* Disable clock in case of PWM is off */ @@ -1864,7 +1864,7 @@ return 0; } EXPORT_SYMBOL_GPL(stmmac_suspend); -@@ -3525,9 +3480,9 @@ int stmmac_resume(struct device *dev) +@@ -3537,9 +3492,9 @@ int stmmac_resume(struct device *dev) priv->irq_wake = 0; } else { pinctrl_pm_select_default_state(priv->device); diff --git a/target/linux/sunxi/patches-4.9/0052-stmmac-form-4-12.patch b/target/linux/sunxi/patches-4.9/0052-stmmac-form-4-12.patch index 3d9bbb2295..a1b018186f 100644 --- a/target/linux/sunxi/patches-4.9/0052-stmmac-form-4-12.patch +++ b/target/linux/sunxi/patches-4.9/0052-stmmac-form-4-12.patch @@ -3095,7 +3095,7 @@ if (priv->hw->mode->set_16kib_bfsize) bfsize = priv->hw->mode->set_16kib_bfsize(dev->mtu); -@@ -1033,257 +1234,516 @@ static int init_dma_desc_rings(struct ne +@@ -1033,235 +1234,409 @@ static int init_dma_desc_rings(struct ne priv->dma_buf_sz = bfsize; @@ -3351,10 +3351,17 @@ - priv->tx_skbuff_dma[i].buf, - priv->tx_skbuff_dma[i].len, - DMA_TO_DEVICE); +- } + for (i = 0; i < DMA_TX_SIZE; i++) + stmmac_free_tx_buffer(priv, queue, i); +} -+ + +- if (priv->tx_skbuff[i]) { +- dev_kfree_skb_any(priv->tx_skbuff[i]); +- priv->tx_skbuff[i] = NULL; +- priv->tx_skbuff_dma[i].buf = 0; +- priv->tx_skbuff_dma[i].map_as_page = false; +- } +/** + * free_dma_rx_desc_resources - free RX dma desc resources + * @priv: private structure @@ -3383,10 +3390,11 @@ + + kfree(rx_q->rx_skbuff_dma); + kfree(rx_q->rx_skbuff); -+ } -+} -+ -+/** + } + } + + /** +- * alloc_dma_desc_resources - alloc TX/RX resources. + * free_dma_tx_desc_resources - free TX dma desc resources + * @priv: private structure + */ @@ -3419,36 +3427,90 @@ + +/** + * alloc_dma_rx_desc_resources - alloc RX resources. -+ * @priv: private structure -+ * Description: according to which descriptor can be used (extend or basic) -+ * this function allocates the resources for TX and RX paths. In case of -+ * reception, for example, it pre-allocated the RX socket buffer in order to -+ * allow zero-copy mechanism. -+ */ + * @priv: private structure + * Description: according to which descriptor can be used (extend or basic) + * this function allocates the resources for TX and RX paths. In case of + * reception, for example, it pre-allocated the RX socket buffer in order to + * allow zero-copy mechanism. + */ +-static int alloc_dma_desc_resources(struct stmmac_priv *priv) +static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv) -+{ + { + u32 rx_count = priv->plat->rx_queues_to_use; -+ int ret = -ENOMEM; + int ret = -ENOMEM; + u32 queue; -+ + +- priv->rx_skbuff_dma = kmalloc_array(DMA_RX_SIZE, sizeof(dma_addr_t), +- GFP_KERNEL); +- if (!priv->rx_skbuff_dma) +- return -ENOMEM; + /* RX queues buffers and DMA */ + for (queue = 0; queue < rx_count; queue++) { + struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; -+ + +- priv->rx_skbuff = kmalloc_array(DMA_RX_SIZE, sizeof(struct sk_buff *), +- GFP_KERNEL); +- if (!priv->rx_skbuff) +- goto err_rx_skbuff; +- +- priv->tx_skbuff_dma = kmalloc_array(DMA_TX_SIZE, +- sizeof(*priv->tx_skbuff_dma), +- GFP_KERNEL); +- if (!priv->tx_skbuff_dma) +- goto err_tx_skbuff_dma; +- +- priv->tx_skbuff = kmalloc_array(DMA_TX_SIZE, sizeof(struct sk_buff *), +- GFP_KERNEL); +- if (!priv->tx_skbuff) +- goto err_tx_skbuff; +- +- if (priv->extend_desc) { +- priv->dma_erx = dma_zalloc_coherent(priv->device, DMA_RX_SIZE * +- sizeof(struct +- dma_extended_desc), +- &priv->dma_rx_phy, +- GFP_KERNEL); +- if (!priv->dma_erx) +- goto err_dma; + rx_q->queue_index = queue; + rx_q->priv_data = priv; -+ + +- priv->dma_etx = dma_zalloc_coherent(priv->device, DMA_TX_SIZE * +- sizeof(struct +- dma_extended_desc), +- &priv->dma_tx_phy, + rx_q->rx_skbuff_dma = kmalloc_array(DMA_RX_SIZE, + sizeof(dma_addr_t), -+ GFP_KERNEL); + GFP_KERNEL); +- if (!priv->dma_etx) { +- dma_free_coherent(priv->device, DMA_RX_SIZE * +- sizeof(struct dma_extended_desc), +- priv->dma_erx, priv->dma_rx_phy); +- goto err_dma; +- } +- } else { +- priv->dma_rx = dma_zalloc_coherent(priv->device, DMA_RX_SIZE * +- sizeof(struct dma_desc), +- &priv->dma_rx_phy, +- GFP_KERNEL); +- if (!priv->dma_rx) +- goto err_dma; + if (!rx_q->rx_skbuff_dma) + return -ENOMEM; -+ + +- priv->dma_tx = dma_zalloc_coherent(priv->device, DMA_TX_SIZE * +- sizeof(struct dma_desc), +- &priv->dma_tx_phy, +- GFP_KERNEL); +- if (!priv->dma_tx) { +- dma_free_coherent(priv->device, DMA_RX_SIZE * +- sizeof(struct dma_desc), +- priv->dma_rx, priv->dma_rx_phy); + rx_q->rx_skbuff = kmalloc_array(DMA_RX_SIZE, + sizeof(struct sk_buff *), + GFP_KERNEL); + if (!rx_q->rx_skbuff) -+ goto err_dma; + goto err_dma; + + if (priv->extend_desc) { + rx_q->dma_erx = dma_zalloc_coherent(priv->device, @@ -3469,12 +3531,19 @@ + GFP_KERNEL); + if (!rx_q->dma_rx) + goto err_dma; -+ } -+ } -+ -+ return 0; -+ -+err_dma: + } + } + + return 0; + + err_dma: +- kfree(priv->tx_skbuff); +-err_tx_skbuff: +- kfree(priv->tx_skbuff_dma); +-err_tx_skbuff_dma: +- kfree(priv->rx_skbuff); +-err_rx_skbuff: +- kfree(priv->rx_skbuff_dma); + free_dma_rx_desc_resources(priv); + + return ret; @@ -3531,7 +3600,7 @@ + GFP_KERNEL); + if (!tx_q->dma_tx) + goto err_dma_buffers; - } ++ } + } + + return 0; @@ -3560,126 +3629,85 @@ + + ret = alloc_dma_tx_desc_resources(priv); + -+ return ret; -+} -+ + return ret; + } + +/** + * free_dma_desc_resources - free dma desc resources + * @priv: private structure + */ -+static void free_dma_desc_resources(struct stmmac_priv *priv) -+{ + static void free_dma_desc_resources(struct stmmac_priv *priv) + { +- /* Release the DMA TX/RX socket buffers */ +- dma_free_rx_skbufs(priv); +- dma_free_tx_skbufs(priv); +- +- /* Free DMA regions of consistent memory previously allocated */ +- if (!priv->extend_desc) { +- dma_free_coherent(priv->device, +- DMA_TX_SIZE * sizeof(struct dma_desc), +- priv->dma_tx, priv->dma_tx_phy); +- dma_free_coherent(priv->device, +- DMA_RX_SIZE * sizeof(struct dma_desc), +- priv->dma_rx, priv->dma_rx_phy); +- } else { +- dma_free_coherent(priv->device, DMA_TX_SIZE * +- sizeof(struct dma_extended_desc), +- priv->dma_etx, priv->dma_tx_phy); +- dma_free_coherent(priv->device, DMA_RX_SIZE * +- sizeof(struct dma_extended_desc), +- priv->dma_erx, priv->dma_rx_phy); +- } +- kfree(priv->rx_skbuff_dma); +- kfree(priv->rx_skbuff); +- kfree(priv->tx_skbuff_dma); +- kfree(priv->tx_skbuff); + /* Release the DMA RX socket buffers */ + free_dma_rx_desc_resources(priv); + + /* Release the DMA TX socket buffers */ + free_dma_tx_desc_resources(priv); -+} -+ -+/** -+ * stmmac_mac_enable_rx_queues - Enable MAC rx queues -+ * @priv: driver private structure -+ * Description: It is used for enabling the rx queues in the MAC -+ */ -+static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv) -+{ + } + + /** +@@ -1271,19 +1646,104 @@ static void free_dma_desc_resources(stru + */ + static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv) + { +- int rx_count = priv->dma_cap.number_rx_queues; +- int queue = 0; + u32 rx_queues_count = priv->plat->rx_queues_to_use; + int queue; + u8 mode; -- if (priv->tx_skbuff[i]) { -- dev_kfree_skb_any(priv->tx_skbuff[i]); -- priv->tx_skbuff[i] = NULL; -- priv->tx_skbuff_dma[i].buf = 0; -- priv->tx_skbuff_dma[i].map_as_page = false; -- } +- /* If GMAC does not have multiple queues, then this is not necessary*/ +- if (rx_count == 1) +- return; + for (queue = 0; queue < rx_queues_count; queue++) { + mode = priv->plat->rx_queues_cfg[queue].mode_to_use; + priv->hw->mac->rx_queue_enable(priv->hw, mode, queue); - } - } ++ } ++} - /** -- * alloc_dma_desc_resources - alloc TX/RX resources. -- * @priv: private structure -- * Description: according to which descriptor can be used (extend or basic) -- * this function allocates the resources for TX and RX paths. In case of -- * reception, for example, it pre-allocated the RX socket buffer in order to -- * allow zero-copy mechanism. +- /** +- * If the core is synthesized with multiple rx queues / multiple +- * dma channels, then rx queues will be disabled by default. +- * For now only rx queue 0 is enabled. +- */ +- priv->hw->mac->rx_queue_enable(priv->hw, queue); ++/** + * stmmac_start_rx_dma - start RX DMA channel + * @priv: driver private structure + * @chan: RX channel index + * Description: + * This starts a RX DMA channel - */ --static int alloc_dma_desc_resources(struct stmmac_priv *priv) ++ */ +static void stmmac_start_rx_dma(struct stmmac_priv *priv, u32 chan) - { -- int ret = -ENOMEM; -- -- priv->rx_skbuff_dma = kmalloc_array(DMA_RX_SIZE, sizeof(dma_addr_t), -- GFP_KERNEL); -- if (!priv->rx_skbuff_dma) -- return -ENOMEM; -- -- priv->rx_skbuff = kmalloc_array(DMA_RX_SIZE, sizeof(struct sk_buff *), -- GFP_KERNEL); -- if (!priv->rx_skbuff) -- goto err_rx_skbuff; -- -- priv->tx_skbuff_dma = kmalloc_array(DMA_TX_SIZE, -- sizeof(*priv->tx_skbuff_dma), -- GFP_KERNEL); -- if (!priv->tx_skbuff_dma) -- goto err_tx_skbuff_dma; -- -- priv->tx_skbuff = kmalloc_array(DMA_TX_SIZE, sizeof(struct sk_buff *), -- GFP_KERNEL); -- if (!priv->tx_skbuff) -- goto err_tx_skbuff; -- -- if (priv->extend_desc) { -- priv->dma_erx = dma_zalloc_coherent(priv->device, DMA_RX_SIZE * -- sizeof(struct -- dma_extended_desc), -- &priv->dma_rx_phy, -- GFP_KERNEL); -- if (!priv->dma_erx) -- goto err_dma; -- -- priv->dma_etx = dma_zalloc_coherent(priv->device, DMA_TX_SIZE * -- sizeof(struct -- dma_extended_desc), -- &priv->dma_tx_phy, -- GFP_KERNEL); -- if (!priv->dma_etx) { -- dma_free_coherent(priv->device, DMA_RX_SIZE * -- sizeof(struct dma_extended_desc), -- priv->dma_erx, priv->dma_rx_phy); -- goto err_dma; -- } -- } else { -- priv->dma_rx = dma_zalloc_coherent(priv->device, DMA_RX_SIZE * -- sizeof(struct dma_desc), -- &priv->dma_rx_phy, -- GFP_KERNEL); -- if (!priv->dma_rx) -- goto err_dma; ++{ + netdev_dbg(priv->dev, "DMA RX processes started in channel %d\n", chan); + priv->hw->dma->start_rx(priv->ioaddr, chan); +} - -- priv->dma_tx = dma_zalloc_coherent(priv->device, DMA_TX_SIZE * -- sizeof(struct dma_desc), -- &priv->dma_tx_phy, -- GFP_KERNEL); -- if (!priv->dma_tx) { -- dma_free_coherent(priv->device, DMA_RX_SIZE * -- sizeof(struct dma_desc), -- priv->dma_rx, priv->dma_rx_phy); -- goto err_dma; -- } -- } ++ +/** + * stmmac_start_tx_dma - start TX DMA channel + * @priv: driver private structure @@ -3692,8 +3720,7 @@ + netdev_dbg(priv->dev, "DMA TX processes started in channel %d\n", chan); + priv->hw->dma->start_tx(priv->ioaddr, chan); +} - -- return 0; ++ +/** + * stmmac_stop_rx_dma - stop RX DMA channel + * @priv: driver private structure @@ -3706,16 +3733,7 @@ + netdev_dbg(priv->dev, "DMA RX processes stopped in channel %d\n", chan); + priv->hw->dma->stop_rx(priv->ioaddr, chan); +} - --err_dma: -- kfree(priv->tx_skbuff); --err_tx_skbuff: -- kfree(priv->tx_skbuff_dma); --err_tx_skbuff_dma: -- kfree(priv->rx_skbuff); --err_rx_skbuff: -- kfree(priv->rx_skbuff_dma); -- return ret; ++ +/** + * stmmac_stop_tx_dma - stop TX DMA channel + * @priv: driver private structure @@ -3727,9 +3745,8 @@ +{ + netdev_dbg(priv->dev, "DMA TX processes stopped in channel %d\n", chan); + priv->hw->dma->stop_tx(priv->ioaddr, chan); - } - --static void free_dma_desc_resources(struct stmmac_priv *priv) ++} ++ +/** + * stmmac_start_all_dma - start all RX and TX DMA channels + * @priv: driver private structure @@ -3737,31 +3754,7 @@ + * This starts all the RX and TX DMA channels + */ +static void stmmac_start_all_dma(struct stmmac_priv *priv) - { -- /* Release the DMA TX/RX socket buffers */ -- dma_free_rx_skbufs(priv); -- dma_free_tx_skbufs(priv); -- -- /* Free DMA regions of consistent memory previously allocated */ -- if (!priv->extend_desc) { -- dma_free_coherent(priv->device, -- DMA_TX_SIZE * sizeof(struct dma_desc), -- priv->dma_tx, priv->dma_tx_phy); -- dma_free_coherent(priv->device, -- DMA_RX_SIZE * sizeof(struct dma_desc), -- priv->dma_rx, priv->dma_rx_phy); -- } else { -- dma_free_coherent(priv->device, DMA_TX_SIZE * -- sizeof(struct dma_extended_desc), -- priv->dma_etx, priv->dma_tx_phy); -- dma_free_coherent(priv->device, DMA_RX_SIZE * -- sizeof(struct dma_extended_desc), -- priv->dma_erx, priv->dma_rx_phy); -- } -- kfree(priv->rx_skbuff_dma); -- kfree(priv->rx_skbuff); -- kfree(priv->tx_skbuff_dma); -- kfree(priv->tx_skbuff); ++{ + u32 rx_channels_count = priv->plat->rx_queues_to_use; + u32 tx_channels_count = priv->plat->tx_queues_to_use; + u32 chan = 0; @@ -3771,38 +3764,23 @@ + + for (chan = 0; chan < tx_channels_count; chan++) + stmmac_start_tx_dma(priv, chan); - } - - /** -- * stmmac_mac_enable_rx_queues - Enable MAC rx queues -- * @priv: driver private structure -- * Description: It is used for enabling the rx queues in the MAC ++} ++ ++/** + * stmmac_stop_all_dma - stop all RX and TX DMA channels + * @priv: driver private structure + * Description: + * This stops the RX and TX DMA channels - */ --static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv) ++ */ +static void stmmac_stop_all_dma(struct stmmac_priv *priv) - { -- int rx_count = priv->dma_cap.number_rx_queues; -- int queue = 0; ++{ + u32 rx_channels_count = priv->plat->rx_queues_to_use; + u32 tx_channels_count = priv->plat->tx_queues_to_use; + u32 chan = 0; - -- /* If GMAC does not have multiple queues, then this is not necessary*/ -- if (rx_count == 1) -- return; ++ + for (chan = 0; chan < rx_channels_count; chan++) + stmmac_stop_rx_dma(priv, chan); - -- /** -- * If the core is synthesized with multiple rx queues / multiple -- * dma channels, then rx queues will be disabled by default. -- * For now only rx queue 0 is enabled. -- */ -- priv->hw->mac->rx_queue_enable(priv->hw, queue); ++ + for (chan = 0; chan < tx_channels_count; chan++) + stmmac_stop_tx_dma(priv, chan); } @@ -3897,7 +3875,7 @@ status = priv->hw->desc->tx_status(&priv->dev->stats, &priv->xstats, p, -@@ -1357,48 +1842,51 @@ static void stmmac_tx_clean(struct stmma +@@ -1362,48 +1847,51 @@ static void stmmac_tx_clean(struct stmma stmmac_get_tx_hwtstamp(priv, p, skb); } @@ -3967,7 +3945,7 @@ } if ((priv->eee_enabled) && (!priv->tx_path_in_lpi_mode)) { -@@ -1408,45 +1896,76 @@ static void stmmac_tx_clean(struct stmma +@@ -1413,45 +1901,76 @@ static void stmmac_tx_clean(struct stmma netif_tx_unlock(priv->dev); } @@ -4059,7 +4037,7 @@ } /** -@@ -1458,31 +1977,43 @@ static void stmmac_tx_err(struct stmmac_ +@@ -1463,31 +1982,43 @@ static void stmmac_tx_err(struct stmmac_ */ static void stmmac_dma_interrupt(struct stmmac_priv *priv) { @@ -4124,7 +4102,7 @@ } /** -@@ -1589,6 +2120,13 @@ static void stmmac_check_ether_addr(stru +@@ -1594,6 +2125,13 @@ static void stmmac_check_ether_addr(stru */ static int stmmac_init_dma_engine(struct stmmac_priv *priv) { @@ -4138,7 +4116,7 @@ int atds = 0; int ret = 0; -@@ -1606,19 +2144,49 @@ static int stmmac_init_dma_engine(struct +@@ -1611,19 +2149,49 @@ static int stmmac_init_dma_engine(struct return ret; } @@ -4200,7 +4178,7 @@ } if (priv->plat->axi && priv->hw->dma->axi) -@@ -1636,8 +2204,12 @@ static int stmmac_init_dma_engine(struct +@@ -1641,8 +2209,12 @@ static int stmmac_init_dma_engine(struct static void stmmac_tx_timer(unsigned long data) { struct stmmac_priv *priv = (struct stmmac_priv *)data; @@ -4214,7 +4192,7 @@ } /** -@@ -1659,6 +2231,196 @@ static void stmmac_init_tx_coalesce(stru +@@ -1664,6 +2236,196 @@ static void stmmac_init_tx_coalesce(stru add_timer(&priv->txtimer); } @@ -4411,7 +4389,7 @@ /** * stmmac_hw_setup - setup mac in a usable state. * @dev : pointer to the device structure. -@@ -1674,6 +2436,9 @@ static void stmmac_init_tx_coalesce(stru +@@ -1679,6 +2441,9 @@ static void stmmac_init_tx_coalesce(stru static int stmmac_hw_setup(struct net_device *dev, bool init_ptp) { struct stmmac_priv *priv = netdev_priv(dev); @@ -4421,7 +4399,7 @@ int ret; /* DMA initialization and SW reset */ -@@ -1703,9 +2468,9 @@ static int stmmac_hw_setup(struct net_de +@@ -1708,9 +2473,9 @@ static int stmmac_hw_setup(struct net_de /* Initialize the MAC Core */ priv->hw->mac->core_init(priv->hw, dev->mtu); @@ -4434,7 +4412,7 @@ ret = priv->hw->mac->rx_ipc(priv->hw); if (!ret) { -@@ -1715,10 +2480,7 @@ static int stmmac_hw_setup(struct net_de +@@ -1720,10 +2485,7 @@ static int stmmac_hw_setup(struct net_de } /* Enable the MAC Rx/Tx */ @@ -4446,7 +4424,7 @@ /* Set the HW DMA mode and the COE */ stmmac_dma_operation_mode(priv); -@@ -1726,6 +2488,10 @@ static int stmmac_hw_setup(struct net_de +@@ -1731,6 +2493,10 @@ static int stmmac_hw_setup(struct net_de stmmac_mmc_setup(priv); if (init_ptp) { @@ -4457,7 +4435,7 @@ ret = stmmac_init_ptp(priv); if (ret == -EOPNOTSUPP) netdev_warn(priv->dev, "PTP not supported by HW\n"); -@@ -1740,35 +2506,37 @@ static int stmmac_hw_setup(struct net_de +@@ -1745,35 +2511,37 @@ static int stmmac_hw_setup(struct net_de __func__); #endif /* Start the ball rolling... */ @@ -4509,7 +4487,7 @@ /** * stmmac_open - open entry point of the driver * @dev : pointer to the device structure. -@@ -1837,7 +2605,7 @@ static int stmmac_open(struct net_device +@@ -1842,7 +2610,7 @@ static int stmmac_open(struct net_device netdev_err(priv->dev, "%s: ERROR: allocating the IRQ %d (error: %d)\n", __func__, dev->irq, ret); @@ -4518,7 +4496,7 @@ } /* Request the Wake IRQ in case of another line is used for WoL */ -@@ -1864,8 +2632,8 @@ static int stmmac_open(struct net_device +@@ -1869,8 +2637,8 @@ static int stmmac_open(struct net_device } } @@ -4529,7 +4507,7 @@ return 0; -@@ -1874,7 +2642,12 @@ lpiirq_error: +@@ -1879,7 +2647,12 @@ lpiirq_error: free_irq(priv->wol_irq, dev); wolirq_error: free_irq(dev->irq, dev); @@ -4542,7 +4520,7 @@ init_error: free_dma_desc_resources(priv); dma_desc_error: -@@ -1903,9 +2676,9 @@ static int stmmac_release(struct net_dev +@@ -1908,9 +2681,9 @@ static int stmmac_release(struct net_dev phy_disconnect(dev->phydev); } @@ -4554,7 +4532,7 @@ del_timer_sync(&priv->txtimer); -@@ -1917,14 +2690,13 @@ static int stmmac_release(struct net_dev +@@ -1922,14 +2695,13 @@ static int stmmac_release(struct net_dev free_irq(priv->lpi_irq, dev); /* Stop TX/RX DMA and clear the descriptors */ @@ -4571,7 +4549,7 @@ netif_carrier_off(dev); -@@ -1943,22 +2715,24 @@ static int stmmac_release(struct net_dev +@@ -1948,22 +2720,24 @@ static int stmmac_release(struct net_dev * @des: buffer start address * @total_len: total length to fill in descriptors * @last_segmant: condition for the last descriptor @@ -4600,7 +4578,7 @@ desc->des0 = cpu_to_le32(des + (total_len - tmp_len)); buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ? -@@ -1966,7 +2740,7 @@ static void stmmac_tso_allocator(struct +@@ -1971,7 +2745,7 @@ static void stmmac_tso_allocator(struct priv->hw->desc->prepare_tso_tx_desc(desc, 0, buff_size, 0, 1, @@ -4609,7 +4587,7 @@ 0, 0); tmp_len -= TSO_MAX_BUFF_SIZE; -@@ -2002,23 +2776,28 @@ static void stmmac_tso_allocator(struct +@@ -2007,23 +2781,28 @@ static void stmmac_tso_allocator(struct */ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev) { @@ -4644,7 +4622,7 @@ /* This is a hard error, log it. */ netdev_err(priv->dev, "%s: Tx Ring full when queue awake\n", -@@ -2033,10 +2812,10 @@ static netdev_tx_t stmmac_tso_xmit(struc +@@ -2038,10 +2817,10 @@ static netdev_tx_t stmmac_tso_xmit(struc /* set new MSS value if needed */ if (mss != priv->mss) { @@ -4657,7 +4635,7 @@ } if (netif_msg_tx_queued(priv)) { -@@ -2046,9 +2825,9 @@ static netdev_tx_t stmmac_tso_xmit(struc +@@ -2051,9 +2830,9 @@ static netdev_tx_t stmmac_tso_xmit(struc skb->data_len); } @@ -4669,7 +4647,7 @@ first = desc; /* first descriptor: fill Headers on Buf1 */ -@@ -2057,9 +2836,8 @@ static netdev_tx_t stmmac_tso_xmit(struc +@@ -2062,9 +2841,8 @@ static netdev_tx_t stmmac_tso_xmit(struc if (dma_mapping_error(priv->device, des)) goto dma_map_err; @@ -4681,7 +4659,7 @@ first->des0 = cpu_to_le32(des); -@@ -2070,7 +2848,7 @@ static netdev_tx_t stmmac_tso_xmit(struc +@@ -2075,7 +2853,7 @@ static netdev_tx_t stmmac_tso_xmit(struc /* If needed take extra descriptors to fill the remaining payload */ tmp_pay_len = pay_len - TSO_MAX_BUFF_SIZE; @@ -4690,7 +4668,7 @@ /* Prepare fragments */ for (i = 0; i < nfrags; i++) { -@@ -2079,24 +2857,34 @@ static netdev_tx_t stmmac_tso_xmit(struc +@@ -2084,24 +2862,34 @@ static netdev_tx_t stmmac_tso_xmit(struc des = skb_frag_dma_map(priv->device, frag, 0, skb_frag_size(frag), DMA_TO_DEVICE); @@ -4734,7 +4712,7 @@ } dev->stats.tx_bytes += skb->len; -@@ -2128,7 +2916,7 @@ static netdev_tx_t stmmac_tso_xmit(struc +@@ -2133,7 +2921,7 @@ static netdev_tx_t stmmac_tso_xmit(struc priv->hw->desc->prepare_tso_tx_desc(first, 1, proto_hdr_len, pay_len, @@ -4743,7 +4721,7 @@ tcp_hdrlen(skb) / 4, (skb->len - proto_hdr_len)); /* If context desc is used to change MSS */ -@@ -2143,20 +2931,20 @@ static netdev_tx_t stmmac_tso_xmit(struc +@@ -2155,20 +2943,20 @@ static netdev_tx_t stmmac_tso_xmit(struc if (netif_msg_pktdata(priv)) { pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n", @@ -4770,7 +4748,7 @@ return NETDEV_TX_OK; -@@ -2180,21 +2968,27 @@ static netdev_tx_t stmmac_xmit(struct sk +@@ -2192,21 +2980,27 @@ static netdev_tx_t stmmac_xmit(struct sk struct stmmac_priv *priv = netdev_priv(dev); unsigned int nopaged_len = skb_headlen(skb); int i, csum_insertion = 0, is_jumbo = 0; @@ -4802,7 +4780,7 @@ /* This is a hard error, log it. */ netdev_err(priv->dev, "%s: Tx Ring full when queue awake\n", -@@ -2206,20 +3000,18 @@ static netdev_tx_t stmmac_xmit(struct sk +@@ -2218,20 +3012,18 @@ static netdev_tx_t stmmac_xmit(struct sk if (priv->tx_path_in_lpi_mode) stmmac_disable_eee_mode(priv); @@ -4826,7 +4804,7 @@ enh_desc = priv->plat->enh_desc; /* To program the descriptors according to the size of the frame */ if (enh_desc) -@@ -2227,7 +3019,7 @@ static netdev_tx_t stmmac_xmit(struct sk +@@ -2239,7 +3031,7 @@ static netdev_tx_t stmmac_xmit(struct sk if (unlikely(is_jumbo) && likely(priv->synopsys_id < DWMAC_CORE_4_00)) { @@ -4835,7 +4813,7 @@ if (unlikely(entry < 0)) goto dma_map_err; } -@@ -2240,48 +3032,56 @@ static netdev_tx_t stmmac_xmit(struct sk +@@ -2252,48 +3044,56 @@ static netdev_tx_t stmmac_xmit(struct sk entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE); if (likely(priv->extend_desc)) @@ -4905,7 +4883,7 @@ priv->hw->desc->display_ring(tx_head, DMA_TX_SIZE, false); -@@ -2289,10 +3089,10 @@ static netdev_tx_t stmmac_xmit(struct sk +@@ -2301,10 +3101,10 @@ static netdev_tx_t stmmac_xmit(struct sk print_pkt(skb->data, skb->len); } @@ -4918,7 +4896,7 @@ } dev->stats.tx_bytes += skb->len; -@@ -2327,14 +3127,14 @@ static netdev_tx_t stmmac_xmit(struct sk +@@ -2339,14 +3139,14 @@ static netdev_tx_t stmmac_xmit(struct sk if (dma_mapping_error(priv->device, des)) goto dma_map_err; @@ -4936,7 +4914,7 @@ if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en)) { -@@ -2346,7 +3146,7 @@ static netdev_tx_t stmmac_xmit(struct sk +@@ -2358,7 +3158,7 @@ static netdev_tx_t stmmac_xmit(struct sk /* Prepare the first descriptor setting the OWN bit too */ priv->hw->desc->prepare_tx_desc(first, 1, nopaged_len, csum_insertion, priv->mode, 1, @@ -4945,7 +4923,7 @@ /* The own bit must be the latest setting done when prepare the * descriptor and then barrier is needed to make sure that -@@ -2355,13 +3155,13 @@ static netdev_tx_t stmmac_xmit(struct sk +@@ -2367,13 +3167,13 @@ static netdev_tx_t stmmac_xmit(struct sk dma_wmb(); } @@ -4962,7 +4940,7 @@ return NETDEV_TX_OK; -@@ -2389,9 +3189,9 @@ static void stmmac_rx_vlan(struct net_de +@@ -2401,9 +3201,9 @@ static void stmmac_rx_vlan(struct net_de } @@ -4974,7 +4952,7 @@ return 0; return 1; -@@ -2400,30 +3200,33 @@ static inline int stmmac_rx_threshold_co +@@ -2412,30 +3212,33 @@ static inline int stmmac_rx_threshold_co /** * stmmac_rx_refill - refill used skb preallocated buffers * @priv: driver private structure @@ -5015,7 +4993,7 @@ if (unlikely(net_ratelimit())) dev_err(priv->device, "fail to alloc skb entry %d\n", -@@ -2431,28 +3234,28 @@ static inline void stmmac_rx_refill(stru +@@ -2443,28 +3246,28 @@ static inline void stmmac_rx_refill(stru break; } @@ -5052,7 +5030,7 @@ netif_dbg(priv, rx_status, priv->dev, "refill entry #%d\n", entry); -@@ -2468,31 +3271,33 @@ static inline void stmmac_rx_refill(stru +@@ -2480,31 +3283,33 @@ static inline void stmmac_rx_refill(stru entry = STMMAC_GET_ENTRY(entry, DMA_RX_SIZE); } @@ -5093,7 +5071,7 @@ priv->hw->desc->display_ring(rx_head, DMA_RX_SIZE, true); } -@@ -2502,9 +3307,9 @@ static int stmmac_rx(struct stmmac_priv +@@ -2514,9 +3319,9 @@ static int stmmac_rx(struct stmmac_priv struct dma_desc *np; if (priv->extend_desc) @@ -5105,7 +5083,7 @@ /* read the status of the incoming frame */ status = priv->hw->desc->rx_status(&priv->dev->stats, -@@ -2515,20 +3320,20 @@ static int stmmac_rx(struct stmmac_priv +@@ -2527,20 +3332,20 @@ static int stmmac_rx(struct stmmac_priv count++; @@ -5131,7 +5109,7 @@ entry); if (unlikely(status == discard_frame)) { priv->dev->stats.rx_errors++; -@@ -2538,9 +3343,9 @@ static int stmmac_rx(struct stmmac_priv +@@ -2550,9 +3355,9 @@ static int stmmac_rx(struct stmmac_priv * them in stmmac_rx_refill() function so that * device can reuse it. */ @@ -5143,7 +5121,7 @@ priv->dma_buf_sz, DMA_FROM_DEVICE); } -@@ -2588,7 +3393,7 @@ static int stmmac_rx(struct stmmac_priv +@@ -2600,7 +3405,7 @@ static int stmmac_rx(struct stmmac_priv */ if (unlikely(!priv->plat->has_gmac4 && ((frame_len < priv->rx_copybreak) || @@ -5152,7 +5130,7 @@ skb = netdev_alloc_skb_ip_align(priv->dev, frame_len); if (unlikely(!skb)) { -@@ -2600,21 +3405,21 @@ static int stmmac_rx(struct stmmac_priv +@@ -2612,21 +3417,21 @@ static int stmmac_rx(struct stmmac_priv } dma_sync_single_for_cpu(priv->device, @@ -5178,7 +5156,7 @@ if (unlikely(!skb)) { netdev_err(priv->dev, "%s: Inconsistent Rx chain\n", -@@ -2623,12 +3428,12 @@ static int stmmac_rx(struct stmmac_priv +@@ -2635,12 +3440,12 @@ static int stmmac_rx(struct stmmac_priv break; } prefetch(skb->data - NET_IP_ALIGN); @@ -5194,7 +5172,7 @@ priv->dma_buf_sz, DMA_FROM_DEVICE); } -@@ -2650,7 +3455,7 @@ static int stmmac_rx(struct stmmac_priv +@@ -2662,7 +3467,7 @@ static int stmmac_rx(struct stmmac_priv else skb->ip_summed = CHECKSUM_UNNECESSARY; @@ -5203,7 +5181,7 @@ priv->dev->stats.rx_packets++; priv->dev->stats.rx_bytes += frame_len; -@@ -2658,7 +3463,7 @@ static int stmmac_rx(struct stmmac_priv +@@ -2670,7 +3475,7 @@ static int stmmac_rx(struct stmmac_priv entry = next_entry; } @@ -5212,7 +5190,7 @@ priv->xstats.rx_pkt_n += count; -@@ -2675,16 +3480,24 @@ static int stmmac_rx(struct stmmac_priv +@@ -2687,16 +3492,24 @@ static int stmmac_rx(struct stmmac_priv */ static int stmmac_poll(struct napi_struct *napi, int budget) { @@ -5241,7 +5219,7 @@ } return work_done; } -@@ -2700,9 +3513,12 @@ static int stmmac_poll(struct napi_struc +@@ -2712,9 +3525,12 @@ static int stmmac_poll(struct napi_struc static void stmmac_tx_timeout(struct net_device *dev) { struct stmmac_priv *priv = netdev_priv(dev); @@ -5255,7 +5233,7 @@ } /** -@@ -2825,6 +3641,12 @@ static irqreturn_t stmmac_interrupt(int +@@ -2837,6 +3653,12 @@ static irqreturn_t stmmac_interrupt(int { struct net_device *dev = (struct net_device *)dev_id; struct stmmac_priv *priv = netdev_priv(dev); @@ -5268,7 +5246,7 @@ if (priv->irq_wake) pm_wakeup_event(priv->device, 0); -@@ -2838,16 +3660,30 @@ static irqreturn_t stmmac_interrupt(int +@@ -2850,16 +3672,30 @@ static irqreturn_t stmmac_interrupt(int if ((priv->plat->has_gmac) || (priv->plat->has_gmac4)) { int status = priv->hw->mac->host_irq_status(priv->hw, &priv->xstats); @@ -5303,7 +5281,7 @@ } /* PCS link status */ -@@ -2932,7 +3768,7 @@ static void sysfs_display_ring(void *hea +@@ -2944,7 +3780,7 @@ static void sysfs_display_ring(void *hea ep++; } else { seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n", @@ -5312,7 +5290,7 @@ le32_to_cpu(p->des0), le32_to_cpu(p->des1), le32_to_cpu(p->des2), le32_to_cpu(p->des3)); p++; -@@ -2945,17 +3781,40 @@ static int stmmac_sysfs_ring_read(struct +@@ -2957,17 +3793,40 @@ static int stmmac_sysfs_ring_read(struct { struct net_device *dev = seq->private; struct stmmac_priv *priv = netdev_priv(dev); @@ -5363,7 +5341,7 @@ } return 0; -@@ -3238,11 +4097,14 @@ int stmmac_dvr_probe(struct device *devi +@@ -3250,11 +4109,14 @@ int stmmac_dvr_probe(struct device *devi struct plat_stmmacenet_data *plat_dat, struct stmmac_resources *res) { @@ -5380,7 +5358,7 @@ if (!ndev) return -ENOMEM; -@@ -3284,6 +4146,10 @@ int stmmac_dvr_probe(struct device *devi +@@ -3296,6 +4158,10 @@ int stmmac_dvr_probe(struct device *devi if (ret) goto error_hw_init; @@ -5391,7 +5369,7 @@ ndev->netdev_ops = &stmmac_netdev_ops; ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | -@@ -3316,7 +4182,12 @@ int stmmac_dvr_probe(struct device *devi +@@ -3328,7 +4194,12 @@ int stmmac_dvr_probe(struct device *devi "Enable RX Mitigation via HW Watchdog Timer\n"); } @@ -5405,7 +5383,7 @@ spin_lock_init(&priv->lock); -@@ -3361,7 +4232,11 @@ error_netdev_register: +@@ -3373,7 +4244,11 @@ error_netdev_register: priv->hw->pcs != STMMAC_PCS_RTBI) stmmac_mdio_unregister(ndev); error_mdio_register: @@ -5418,7 +5396,7 @@ error_hw_init: free_netdev(ndev); -@@ -3382,10 +4257,9 @@ int stmmac_dvr_remove(struct device *dev +@@ -3394,10 +4269,9 @@ int stmmac_dvr_remove(struct device *dev netdev_info(priv->dev, "%s: removing driver", __func__); @@ -5431,7 +5409,7 @@ netif_carrier_off(ndev); unregister_netdev(ndev); if (priv->plat->stmmac_rst) -@@ -3424,20 +4298,19 @@ int stmmac_suspend(struct device *dev) +@@ -3436,20 +4310,19 @@ int stmmac_suspend(struct device *dev) spin_lock_irqsave(&priv->lock, flags); netif_device_detach(ndev); @@ -5456,7 +5434,7 @@ pinctrl_pm_select_sleep_state(priv->device); /* Disable clock in case of PWM is off */ clk_disable(priv->plat->pclk); -@@ -3453,6 +4326,31 @@ int stmmac_suspend(struct device *dev) +@@ -3465,6 +4338,31 @@ int stmmac_suspend(struct device *dev) EXPORT_SYMBOL_GPL(stmmac_suspend); /** @@ -5488,7 +5466,7 @@ * stmmac_resume - resume callback * @dev: device pointer * Description: when resume this function is invoked to setup the DMA and CORE -@@ -3492,10 +4390,8 @@ int stmmac_resume(struct device *dev) +@@ -3504,10 +4402,8 @@ int stmmac_resume(struct device *dev) spin_lock_irqsave(&priv->lock, flags); @@ -5501,7 +5479,7 @@ /* reset private mss value to force mss context settings at * next tso xmit (only used for gmac4). */ -@@ -3507,9 +4403,9 @@ int stmmac_resume(struct device *dev) +@@ -3519,9 +4415,9 @@ int stmmac_resume(struct device *dev) stmmac_init_tx_coalesce(priv); stmmac_set_rx_mode(ndev); diff --git a/target/linux/sunxi/patches-4.9/0053-stmmac-form-4-13.patch b/target/linux/sunxi/patches-4.9/0053-stmmac-form-4-13.patch index a5d860ddc4..4ee62321d5 100644 --- a/target/linux/sunxi/patches-4.9/0053-stmmac-form-4-13.patch +++ b/target/linux/sunxi/patches-4.9/0053-stmmac-form-4-13.patch @@ -1491,7 +1491,7 @@ free_dma_tx_desc_resources(priv); return ret; -@@ -2902,8 +2900,7 @@ static netdev_tx_t stmmac_tso_xmit(struc +@@ -2907,8 +2905,7 @@ static netdev_tx_t stmmac_tso_xmit(struc priv->xstats.tx_set_ic_bit++; } @@ -1501,7 +1501,7 @@ if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en)) { -@@ -2981,7 +2978,7 @@ static netdev_tx_t stmmac_xmit(struct sk +@@ -2993,7 +2990,7 @@ static netdev_tx_t stmmac_xmit(struct sk /* Manage oversized TCP frames for GMAC4 device */ if (skb_is_gso(skb) && priv->tso) { @@ -1510,7 +1510,7 @@ return stmmac_tso_xmit(skb, dev); } -@@ -3112,8 +3109,7 @@ static netdev_tx_t stmmac_xmit(struct sk +@@ -3124,8 +3121,7 @@ static netdev_tx_t stmmac_xmit(struct sk priv->xstats.tx_set_ic_bit++; } @@ -1520,7 +1520,7 @@ /* Ready to fill the first descriptor and set the OWN bit w/o any * problems because all the descriptors are actually ready to be -@@ -3990,7 +3986,9 @@ static int stmmac_hw_init(struct stmmac_ +@@ -4002,7 +3998,9 @@ static int stmmac_hw_init(struct stmmac_ struct mac_device_info *mac; /* Identify the MAC HW device */ @@ -1531,7 +1531,7 @@ priv->dev->priv_flags |= IFF_UNICAST_FLT; mac = dwmac1000_setup(priv->ioaddr, priv->plat->multicast_filter_bins, -@@ -4010,6 +4008,10 @@ static int stmmac_hw_init(struct stmmac_ +@@ -4022,6 +4020,10 @@ static int stmmac_hw_init(struct stmmac_ priv->hw = mac; @@ -1542,7 +1542,7 @@ /* To use the chained or ring mode */ if (priv->synopsys_id >= DWMAC_CORE_4_00) { priv->hw->mode = &dwmac4_ring_mode_ops; -@@ -4138,8 +4140,15 @@ int stmmac_dvr_probe(struct device *devi +@@ -4150,8 +4152,15 @@ int stmmac_dvr_probe(struct device *devi if ((phyaddr >= 0) && (phyaddr <= 31)) priv->plat->phy_addr = phyaddr; @@ -1559,7 +1559,7 @@ /* Init MAC and get the capabilities */ ret = stmmac_hw_init(priv); -@@ -4156,7 +4165,7 @@ int stmmac_dvr_probe(struct device *devi +@@ -4168,7 +4177,7 @@ int stmmac_dvr_probe(struct device *devi NETIF_F_RXCSUM; if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) { @@ -1568,7 +1568,7 @@ priv->tso = true; dev_info(priv->device, "TSO feature enabled\n"); } -@@ -4318,7 +4327,7 @@ int stmmac_suspend(struct device *dev) +@@ -4330,7 +4339,7 @@ int stmmac_suspend(struct device *dev) } spin_unlock_irqrestore(&priv->lock, flags); |