diff options
Diffstat (limited to 'target/linux/sunxi/patches-4.9/0052-stmmac-form-4-12.patch')
-rw-r--r-- | target/linux/sunxi/patches-4.9/0052-stmmac-form-4-12.patch | 100 |
1 files changed, 50 insertions, 50 deletions
diff --git a/target/linux/sunxi/patches-4.9/0052-stmmac-form-4-12.patch b/target/linux/sunxi/patches-4.9/0052-stmmac-form-4-12.patch index 285e4d2762..18f58e947c 100644 --- a/target/linux/sunxi/patches-4.9/0052-stmmac-form-4-12.patch +++ b/target/linux/sunxi/patches-4.9/0052-stmmac-form-4-12.patch @@ -4523,7 +4523,7 @@ /** * stmmac_open - open entry point of the driver * @dev : pointer to the device structure. -@@ -1821,7 +2598,7 @@ static int stmmac_open(struct net_device +@@ -1822,7 +2599,7 @@ static int stmmac_open(struct net_device netdev_err(priv->dev, "%s: ERROR: allocating the IRQ %d (error: %d)\n", __func__, dev->irq, ret); @@ -4532,7 +4532,7 @@ } /* Request the Wake IRQ in case of another line is used for WoL */ -@@ -1848,8 +2625,8 @@ static int stmmac_open(struct net_device +@@ -1849,8 +2626,8 @@ static int stmmac_open(struct net_device } } @@ -4543,7 +4543,7 @@ return 0; -@@ -1858,7 +2635,12 @@ lpiirq_error: +@@ -1859,7 +2636,12 @@ lpiirq_error: free_irq(priv->wol_irq, dev); wolirq_error: free_irq(dev->irq, dev); @@ -4556,7 +4556,7 @@ init_error: free_dma_desc_resources(priv); dma_desc_error: -@@ -1887,9 +2669,9 @@ static int stmmac_release(struct net_dev +@@ -1888,9 +2670,9 @@ static int stmmac_release(struct net_dev phy_disconnect(dev->phydev); } @@ -4568,7 +4568,7 @@ del_timer_sync(&priv->txtimer); -@@ -1901,14 +2683,13 @@ static int stmmac_release(struct net_dev +@@ -1902,14 +2684,13 @@ static int stmmac_release(struct net_dev free_irq(priv->lpi_irq, dev); /* Stop TX/RX DMA and clear the descriptors */ @@ -4585,7 +4585,7 @@ netif_carrier_off(dev); -@@ -1927,22 +2708,24 @@ static int stmmac_release(struct net_dev +@@ -1928,22 +2709,24 @@ static int stmmac_release(struct net_dev * @des: buffer start address * @total_len: total length to fill in descriptors * @last_segmant: condition for the last descriptor @@ -4614,7 +4614,7 @@ desc->des0 = cpu_to_le32(des + (total_len - tmp_len)); buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ? -@@ -1950,7 +2733,7 @@ static void stmmac_tso_allocator(struct +@@ -1951,7 +2734,7 @@ static void stmmac_tso_allocator(struct priv->hw->desc->prepare_tso_tx_desc(desc, 0, buff_size, 0, 1, @@ -4623,7 +4623,7 @@ 0, 0); tmp_len -= TSO_MAX_BUFF_SIZE; -@@ -1986,23 +2769,28 @@ static void stmmac_tso_allocator(struct +@@ -1987,23 +2770,28 @@ static void stmmac_tso_allocator(struct */ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev) { @@ -4658,7 +4658,7 @@ /* This is a hard error, log it. */ netdev_err(priv->dev, "%s: Tx Ring full when queue awake\n", -@@ -2017,10 +2805,10 @@ static netdev_tx_t stmmac_tso_xmit(struc +@@ -2018,10 +2806,10 @@ static netdev_tx_t stmmac_tso_xmit(struc /* set new MSS value if needed */ if (mss != priv->mss) { @@ -4671,7 +4671,7 @@ } if (netif_msg_tx_queued(priv)) { -@@ -2030,9 +2818,9 @@ static netdev_tx_t stmmac_tso_xmit(struc +@@ -2031,9 +2819,9 @@ static netdev_tx_t stmmac_tso_xmit(struc skb->data_len); } @@ -4683,7 +4683,7 @@ first = desc; /* first descriptor: fill Headers on Buf1 */ -@@ -2041,9 +2829,8 @@ static netdev_tx_t stmmac_tso_xmit(struc +@@ -2042,9 +2830,8 @@ static netdev_tx_t stmmac_tso_xmit(struc if (dma_mapping_error(priv->device, des)) goto dma_map_err; @@ -4695,7 +4695,7 @@ first->des0 = cpu_to_le32(des); -@@ -2054,7 +2841,7 @@ static netdev_tx_t stmmac_tso_xmit(struc +@@ -2055,7 +2842,7 @@ static netdev_tx_t stmmac_tso_xmit(struc /* If needed take extra descriptors to fill the remaining payload */ tmp_pay_len = pay_len - TSO_MAX_BUFF_SIZE; @@ -4704,7 +4704,7 @@ /* Prepare fragments */ for (i = 0; i < nfrags; i++) { -@@ -2063,24 +2850,34 @@ static netdev_tx_t stmmac_tso_xmit(struc +@@ -2064,24 +2851,34 @@ static netdev_tx_t stmmac_tso_xmit(struc des = skb_frag_dma_map(priv->device, frag, 0, skb_frag_size(frag), DMA_TO_DEVICE); @@ -4748,7 +4748,7 @@ } dev->stats.tx_bytes += skb->len; -@@ -2112,7 +2909,7 @@ static netdev_tx_t stmmac_tso_xmit(struc +@@ -2113,7 +2910,7 @@ static netdev_tx_t stmmac_tso_xmit(struc priv->hw->desc->prepare_tso_tx_desc(first, 1, proto_hdr_len, pay_len, @@ -4757,7 +4757,7 @@ tcp_hdrlen(skb) / 4, (skb->len - proto_hdr_len)); /* If context desc is used to change MSS */ -@@ -2127,20 +2924,20 @@ static netdev_tx_t stmmac_tso_xmit(struc +@@ -2128,20 +2925,20 @@ static netdev_tx_t stmmac_tso_xmit(struc if (netif_msg_pktdata(priv)) { pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n", @@ -4784,7 +4784,7 @@ return NETDEV_TX_OK; -@@ -2164,21 +2961,27 @@ static netdev_tx_t stmmac_xmit(struct sk +@@ -2165,21 +2962,27 @@ static netdev_tx_t stmmac_xmit(struct sk struct stmmac_priv *priv = netdev_priv(dev); unsigned int nopaged_len = skb_headlen(skb); int i, csum_insertion = 0, is_jumbo = 0; @@ -4816,7 +4816,7 @@ /* This is a hard error, log it. */ netdev_err(priv->dev, "%s: Tx Ring full when queue awake\n", -@@ -2190,20 +2993,18 @@ static netdev_tx_t stmmac_xmit(struct sk +@@ -2191,20 +2994,18 @@ static netdev_tx_t stmmac_xmit(struct sk if (priv->tx_path_in_lpi_mode) stmmac_disable_eee_mode(priv); @@ -4840,7 +4840,7 @@ enh_desc = priv->plat->enh_desc; /* To program the descriptors according to the size of the frame */ if (enh_desc) -@@ -2211,7 +3012,7 @@ static netdev_tx_t stmmac_xmit(struct sk +@@ -2212,7 +3013,7 @@ static netdev_tx_t stmmac_xmit(struct sk if (unlikely(is_jumbo) && likely(priv->synopsys_id < DWMAC_CORE_4_00)) { @@ -4849,7 +4849,7 @@ if (unlikely(entry < 0)) goto dma_map_err; } -@@ -2224,48 +3025,56 @@ static netdev_tx_t stmmac_xmit(struct sk +@@ -2225,48 +3026,56 @@ static netdev_tx_t stmmac_xmit(struct sk entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE); if (likely(priv->extend_desc)) @@ -4919,7 +4919,7 @@ priv->hw->desc->display_ring(tx_head, DMA_TX_SIZE, false); -@@ -2273,10 +3082,10 @@ static netdev_tx_t stmmac_xmit(struct sk +@@ -2274,10 +3083,10 @@ static netdev_tx_t stmmac_xmit(struct sk print_pkt(skb->data, skb->len); } @@ -4932,7 +4932,7 @@ } dev->stats.tx_bytes += skb->len; -@@ -2311,14 +3120,14 @@ static netdev_tx_t stmmac_xmit(struct sk +@@ -2312,14 +3121,14 @@ static netdev_tx_t stmmac_xmit(struct sk if (dma_mapping_error(priv->device, des)) goto dma_map_err; @@ -4950,7 +4950,7 @@ if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en)) { -@@ -2330,7 +3139,7 @@ static netdev_tx_t stmmac_xmit(struct sk +@@ -2331,7 +3140,7 @@ static netdev_tx_t stmmac_xmit(struct sk /* Prepare the first descriptor setting the OWN bit too */ priv->hw->desc->prepare_tx_desc(first, 1, nopaged_len, csum_insertion, priv->mode, 1, @@ -4959,7 +4959,7 @@ /* The own bit must be the latest setting done when prepare the * descriptor and then barrier is needed to make sure that -@@ -2339,13 +3148,13 @@ static netdev_tx_t stmmac_xmit(struct sk +@@ -2340,13 +3149,13 @@ static netdev_tx_t stmmac_xmit(struct sk dma_wmb(); } @@ -4976,7 +4976,7 @@ return NETDEV_TX_OK; -@@ -2373,9 +3182,9 @@ static void stmmac_rx_vlan(struct net_de +@@ -2374,9 +3183,9 @@ static void stmmac_rx_vlan(struct net_de } @@ -4988,7 +4988,7 @@ return 0; return 1; -@@ -2384,30 +3193,33 @@ static inline int stmmac_rx_threshold_co +@@ -2385,30 +3194,33 @@ static inline int stmmac_rx_threshold_co /** * stmmac_rx_refill - refill used skb preallocated buffers * @priv: driver private structure @@ -5029,7 +5029,7 @@ if (unlikely(net_ratelimit())) dev_err(priv->device, "fail to alloc skb entry %d\n", -@@ -2415,28 +3227,28 @@ static inline void stmmac_rx_refill(stru +@@ -2416,28 +3228,28 @@ static inline void stmmac_rx_refill(stru break; } @@ -5066,7 +5066,7 @@ netif_dbg(priv, rx_status, priv->dev, "refill entry #%d\n", entry); -@@ -2452,31 +3264,33 @@ static inline void stmmac_rx_refill(stru +@@ -2453,31 +3265,33 @@ static inline void stmmac_rx_refill(stru entry = STMMAC_GET_ENTRY(entry, DMA_RX_SIZE); } @@ -5107,7 +5107,7 @@ priv->hw->desc->display_ring(rx_head, DMA_RX_SIZE, true); } -@@ -2486,9 +3300,9 @@ static int stmmac_rx(struct stmmac_priv +@@ -2487,9 +3301,9 @@ static int stmmac_rx(struct stmmac_priv struct dma_desc *np; if (priv->extend_desc) @@ -5119,7 +5119,7 @@ /* read the status of the incoming frame */ status = priv->hw->desc->rx_status(&priv->dev->stats, -@@ -2499,20 +3313,20 @@ static int stmmac_rx(struct stmmac_priv +@@ -2500,20 +3314,20 @@ static int stmmac_rx(struct stmmac_priv count++; @@ -5145,7 +5145,7 @@ entry); if (unlikely(status == discard_frame)) { priv->dev->stats.rx_errors++; -@@ -2522,9 +3336,9 @@ static int stmmac_rx(struct stmmac_priv +@@ -2523,9 +3337,9 @@ static int stmmac_rx(struct stmmac_priv * them in stmmac_rx_refill() function so that * device can reuse it. */ @@ -5157,7 +5157,7 @@ priv->dma_buf_sz, DMA_FROM_DEVICE); } -@@ -2572,7 +3386,7 @@ static int stmmac_rx(struct stmmac_priv +@@ -2573,7 +3387,7 @@ static int stmmac_rx(struct stmmac_priv */ if (unlikely(!priv->plat->has_gmac4 && ((frame_len < priv->rx_copybreak) || @@ -5166,7 +5166,7 @@ skb = netdev_alloc_skb_ip_align(priv->dev, frame_len); if (unlikely(!skb)) { -@@ -2584,21 +3398,21 @@ static int stmmac_rx(struct stmmac_priv +@@ -2585,21 +3399,21 @@ static int stmmac_rx(struct stmmac_priv } dma_sync_single_for_cpu(priv->device, @@ -5192,7 +5192,7 @@ if (unlikely(!skb)) { netdev_err(priv->dev, "%s: Inconsistent Rx chain\n", -@@ -2607,12 +3421,12 @@ static int stmmac_rx(struct stmmac_priv +@@ -2608,12 +3422,12 @@ static int stmmac_rx(struct stmmac_priv break; } prefetch(skb->data - NET_IP_ALIGN); @@ -5208,7 +5208,7 @@ priv->dma_buf_sz, DMA_FROM_DEVICE); } -@@ -2634,7 +3448,7 @@ static int stmmac_rx(struct stmmac_priv +@@ -2635,7 +3449,7 @@ static int stmmac_rx(struct stmmac_priv else skb->ip_summed = CHECKSUM_UNNECESSARY; @@ -5217,7 +5217,7 @@ priv->dev->stats.rx_packets++; priv->dev->stats.rx_bytes += frame_len; -@@ -2642,7 +3456,7 @@ static int stmmac_rx(struct stmmac_priv +@@ -2643,7 +3457,7 @@ static int stmmac_rx(struct stmmac_priv entry = next_entry; } @@ -5226,7 +5226,7 @@ priv->xstats.rx_pkt_n += count; -@@ -2659,16 +3473,24 @@ static int stmmac_rx(struct stmmac_priv +@@ -2660,16 +3474,24 @@ static int stmmac_rx(struct stmmac_priv */ static int stmmac_poll(struct napi_struct *napi, int budget) { @@ -5255,7 +5255,7 @@ } return work_done; } -@@ -2684,9 +3506,12 @@ static int stmmac_poll(struct napi_struc +@@ -2685,9 +3507,12 @@ static int stmmac_poll(struct napi_struc static void stmmac_tx_timeout(struct net_device *dev) { struct stmmac_priv *priv = netdev_priv(dev); @@ -5269,7 +5269,7 @@ } /** -@@ -2809,6 +3634,12 @@ static irqreturn_t stmmac_interrupt(int +@@ -2810,6 +3635,12 @@ static irqreturn_t stmmac_interrupt(int { struct net_device *dev = (struct net_device *)dev_id; struct stmmac_priv *priv = netdev_priv(dev); @@ -5282,7 +5282,7 @@ if (priv->irq_wake) pm_wakeup_event(priv->device, 0); -@@ -2822,16 +3653,30 @@ static irqreturn_t stmmac_interrupt(int +@@ -2823,16 +3654,30 @@ static irqreturn_t stmmac_interrupt(int if ((priv->plat->has_gmac) || (priv->plat->has_gmac4)) { int status = priv->hw->mac->host_irq_status(priv->hw, &priv->xstats); @@ -5317,7 +5317,7 @@ } /* PCS link status */ -@@ -2916,7 +3761,7 @@ static void sysfs_display_ring(void *hea +@@ -2917,7 +3762,7 @@ static void sysfs_display_ring(void *hea ep++; } else { seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n", @@ -5326,7 +5326,7 @@ le32_to_cpu(p->des0), le32_to_cpu(p->des1), le32_to_cpu(p->des2), le32_to_cpu(p->des3)); p++; -@@ -2929,17 +3774,40 @@ static int stmmac_sysfs_ring_read(struct +@@ -2930,17 +3775,40 @@ static int stmmac_sysfs_ring_read(struct { struct net_device *dev = seq->private; struct stmmac_priv *priv = netdev_priv(dev); @@ -5377,7 +5377,7 @@ } return 0; -@@ -3222,11 +4090,14 @@ int stmmac_dvr_probe(struct device *devi +@@ -3223,11 +4091,14 @@ int stmmac_dvr_probe(struct device *devi struct plat_stmmacenet_data *plat_dat, struct stmmac_resources *res) { @@ -5394,7 +5394,7 @@ if (!ndev) return -ENOMEM; -@@ -3268,6 +4139,10 @@ int stmmac_dvr_probe(struct device *devi +@@ -3269,6 +4140,10 @@ int stmmac_dvr_probe(struct device *devi if (ret) goto error_hw_init; @@ -5405,7 +5405,7 @@ ndev->netdev_ops = &stmmac_netdev_ops; ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | -@@ -3300,7 +4175,12 @@ int stmmac_dvr_probe(struct device *devi +@@ -3301,7 +4176,12 @@ int stmmac_dvr_probe(struct device *devi "Enable RX Mitigation via HW Watchdog Timer\n"); } @@ -5419,7 +5419,7 @@ spin_lock_init(&priv->lock); -@@ -3345,7 +4225,11 @@ error_netdev_register: +@@ -3346,7 +4226,11 @@ error_netdev_register: priv->hw->pcs != STMMAC_PCS_RTBI) stmmac_mdio_unregister(ndev); error_mdio_register: @@ -5432,7 +5432,7 @@ error_hw_init: free_netdev(ndev); -@@ -3366,10 +4250,9 @@ int stmmac_dvr_remove(struct device *dev +@@ -3367,10 +4251,9 @@ int stmmac_dvr_remove(struct device *dev netdev_info(priv->dev, "%s: removing driver", __func__); @@ -5445,7 +5445,7 @@ netif_carrier_off(ndev); unregister_netdev(ndev); if (priv->plat->stmmac_rst) -@@ -3408,20 +4291,19 @@ int stmmac_suspend(struct device *dev) +@@ -3409,20 +4292,19 @@ int stmmac_suspend(struct device *dev) spin_lock_irqsave(&priv->lock, flags); netif_device_detach(ndev); @@ -5470,7 +5470,7 @@ pinctrl_pm_select_sleep_state(priv->device); /* Disable clock in case of PWM is off */ clk_disable(priv->plat->pclk); -@@ -3437,6 +4319,31 @@ int stmmac_suspend(struct device *dev) +@@ -3438,6 +4320,31 @@ int stmmac_suspend(struct device *dev) EXPORT_SYMBOL_GPL(stmmac_suspend); /** @@ -5502,7 +5502,7 @@ * stmmac_resume - resume callback * @dev: device pointer * Description: when resume this function is invoked to setup the DMA and CORE -@@ -3476,10 +4383,8 @@ int stmmac_resume(struct device *dev) +@@ -3477,10 +4384,8 @@ int stmmac_resume(struct device *dev) spin_lock_irqsave(&priv->lock, flags); @@ -5515,7 +5515,7 @@ /* reset private mss value to force mss context settings at * next tso xmit (only used for gmac4). */ -@@ -3491,9 +4396,9 @@ int stmmac_resume(struct device *dev) +@@ -3492,9 +4397,9 @@ int stmmac_resume(struct device *dev) stmmac_init_tx_coalesce(priv); stmmac_set_rx_mode(ndev); |