diff options
Diffstat (limited to 'target/linux/sunxi/patches-4.9/0052-stmmac-form-4-12.patch')
-rw-r--r-- | target/linux/sunxi/patches-4.9/0052-stmmac-form-4-12.patch | 468 |
1 files changed, 223 insertions, 245 deletions
diff --git a/target/linux/sunxi/patches-4.9/0052-stmmac-form-4-12.patch b/target/linux/sunxi/patches-4.9/0052-stmmac-form-4-12.patch index 3d9bbb2295..a1b018186f 100644 --- a/target/linux/sunxi/patches-4.9/0052-stmmac-form-4-12.patch +++ b/target/linux/sunxi/patches-4.9/0052-stmmac-form-4-12.patch @@ -3095,7 +3095,7 @@ if (priv->hw->mode->set_16kib_bfsize) bfsize = priv->hw->mode->set_16kib_bfsize(dev->mtu); -@@ -1033,257 +1234,516 @@ static int init_dma_desc_rings(struct ne +@@ -1033,235 +1234,409 @@ static int init_dma_desc_rings(struct ne priv->dma_buf_sz = bfsize; @@ -3351,10 +3351,17 @@ - priv->tx_skbuff_dma[i].buf, - priv->tx_skbuff_dma[i].len, - DMA_TO_DEVICE); +- } + for (i = 0; i < DMA_TX_SIZE; i++) + stmmac_free_tx_buffer(priv, queue, i); +} -+ + +- if (priv->tx_skbuff[i]) { +- dev_kfree_skb_any(priv->tx_skbuff[i]); +- priv->tx_skbuff[i] = NULL; +- priv->tx_skbuff_dma[i].buf = 0; +- priv->tx_skbuff_dma[i].map_as_page = false; +- } +/** + * free_dma_rx_desc_resources - free RX dma desc resources + * @priv: private structure @@ -3383,10 +3390,11 @@ + + kfree(rx_q->rx_skbuff_dma); + kfree(rx_q->rx_skbuff); -+ } -+} -+ -+/** + } + } + + /** +- * alloc_dma_desc_resources - alloc TX/RX resources. + * free_dma_tx_desc_resources - free TX dma desc resources + * @priv: private structure + */ @@ -3419,36 +3427,90 @@ + +/** + * alloc_dma_rx_desc_resources - alloc RX resources. -+ * @priv: private structure -+ * Description: according to which descriptor can be used (extend or basic) -+ * this function allocates the resources for TX and RX paths. In case of -+ * reception, for example, it pre-allocated the RX socket buffer in order to -+ * allow zero-copy mechanism. -+ */ + * @priv: private structure + * Description: according to which descriptor can be used (extend or basic) + * this function allocates the resources for TX and RX paths. In case of + * reception, for example, it pre-allocated the RX socket buffer in order to + * allow zero-copy mechanism. + */ +-static int alloc_dma_desc_resources(struct stmmac_priv *priv) +static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv) -+{ + { + u32 rx_count = priv->plat->rx_queues_to_use; -+ int ret = -ENOMEM; + int ret = -ENOMEM; + u32 queue; -+ + +- priv->rx_skbuff_dma = kmalloc_array(DMA_RX_SIZE, sizeof(dma_addr_t), +- GFP_KERNEL); +- if (!priv->rx_skbuff_dma) +- return -ENOMEM; + /* RX queues buffers and DMA */ + for (queue = 0; queue < rx_count; queue++) { + struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; -+ + +- priv->rx_skbuff = kmalloc_array(DMA_RX_SIZE, sizeof(struct sk_buff *), +- GFP_KERNEL); +- if (!priv->rx_skbuff) +- goto err_rx_skbuff; +- +- priv->tx_skbuff_dma = kmalloc_array(DMA_TX_SIZE, +- sizeof(*priv->tx_skbuff_dma), +- GFP_KERNEL); +- if (!priv->tx_skbuff_dma) +- goto err_tx_skbuff_dma; +- +- priv->tx_skbuff = kmalloc_array(DMA_TX_SIZE, sizeof(struct sk_buff *), +- GFP_KERNEL); +- if (!priv->tx_skbuff) +- goto err_tx_skbuff; +- +- if (priv->extend_desc) { +- priv->dma_erx = dma_zalloc_coherent(priv->device, DMA_RX_SIZE * +- sizeof(struct +- dma_extended_desc), +- &priv->dma_rx_phy, +- GFP_KERNEL); +- if (!priv->dma_erx) +- goto err_dma; + rx_q->queue_index = queue; + rx_q->priv_data = priv; -+ + +- priv->dma_etx = dma_zalloc_coherent(priv->device, DMA_TX_SIZE * +- sizeof(struct +- dma_extended_desc), +- &priv->dma_tx_phy, + rx_q->rx_skbuff_dma = kmalloc_array(DMA_RX_SIZE, + sizeof(dma_addr_t), -+ GFP_KERNEL); + GFP_KERNEL); +- if (!priv->dma_etx) { +- dma_free_coherent(priv->device, DMA_RX_SIZE * +- sizeof(struct dma_extended_desc), +- priv->dma_erx, priv->dma_rx_phy); +- goto err_dma; +- } +- } else { +- priv->dma_rx = dma_zalloc_coherent(priv->device, DMA_RX_SIZE * +- sizeof(struct dma_desc), +- &priv->dma_rx_phy, +- GFP_KERNEL); +- if (!priv->dma_rx) +- goto err_dma; + if (!rx_q->rx_skbuff_dma) + return -ENOMEM; -+ + +- priv->dma_tx = dma_zalloc_coherent(priv->device, DMA_TX_SIZE * +- sizeof(struct dma_desc), +- &priv->dma_tx_phy, +- GFP_KERNEL); +- if (!priv->dma_tx) { +- dma_free_coherent(priv->device, DMA_RX_SIZE * +- sizeof(struct dma_desc), +- priv->dma_rx, priv->dma_rx_phy); + rx_q->rx_skbuff = kmalloc_array(DMA_RX_SIZE, + sizeof(struct sk_buff *), + GFP_KERNEL); + if (!rx_q->rx_skbuff) -+ goto err_dma; + goto err_dma; + + if (priv->extend_desc) { + rx_q->dma_erx = dma_zalloc_coherent(priv->device, @@ -3469,12 +3531,19 @@ + GFP_KERNEL); + if (!rx_q->dma_rx) + goto err_dma; -+ } -+ } -+ -+ return 0; -+ -+err_dma: + } + } + + return 0; + + err_dma: +- kfree(priv->tx_skbuff); +-err_tx_skbuff: +- kfree(priv->tx_skbuff_dma); +-err_tx_skbuff_dma: +- kfree(priv->rx_skbuff); +-err_rx_skbuff: +- kfree(priv->rx_skbuff_dma); + free_dma_rx_desc_resources(priv); + + return ret; @@ -3531,7 +3600,7 @@ + GFP_KERNEL); + if (!tx_q->dma_tx) + goto err_dma_buffers; - } ++ } + } + + return 0; @@ -3560,126 +3629,85 @@ + + ret = alloc_dma_tx_desc_resources(priv); + -+ return ret; -+} -+ + return ret; + } + +/** + * free_dma_desc_resources - free dma desc resources + * @priv: private structure + */ -+static void free_dma_desc_resources(struct stmmac_priv *priv) -+{ + static void free_dma_desc_resources(struct stmmac_priv *priv) + { +- /* Release the DMA TX/RX socket buffers */ +- dma_free_rx_skbufs(priv); +- dma_free_tx_skbufs(priv); +- +- /* Free DMA regions of consistent memory previously allocated */ +- if (!priv->extend_desc) { +- dma_free_coherent(priv->device, +- DMA_TX_SIZE * sizeof(struct dma_desc), +- priv->dma_tx, priv->dma_tx_phy); +- dma_free_coherent(priv->device, +- DMA_RX_SIZE * sizeof(struct dma_desc), +- priv->dma_rx, priv->dma_rx_phy); +- } else { +- dma_free_coherent(priv->device, DMA_TX_SIZE * +- sizeof(struct dma_extended_desc), +- priv->dma_etx, priv->dma_tx_phy); +- dma_free_coherent(priv->device, DMA_RX_SIZE * +- sizeof(struct dma_extended_desc), +- priv->dma_erx, priv->dma_rx_phy); +- } +- kfree(priv->rx_skbuff_dma); +- kfree(priv->rx_skbuff); +- kfree(priv->tx_skbuff_dma); +- kfree(priv->tx_skbuff); + /* Release the DMA RX socket buffers */ + free_dma_rx_desc_resources(priv); + + /* Release the DMA TX socket buffers */ + free_dma_tx_desc_resources(priv); -+} -+ -+/** -+ * stmmac_mac_enable_rx_queues - Enable MAC rx queues -+ * @priv: driver private structure -+ * Description: It is used for enabling the rx queues in the MAC -+ */ -+static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv) -+{ + } + + /** +@@ -1271,19 +1646,104 @@ static void free_dma_desc_resources(stru + */ + static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv) + { +- int rx_count = priv->dma_cap.number_rx_queues; +- int queue = 0; + u32 rx_queues_count = priv->plat->rx_queues_to_use; + int queue; + u8 mode; -- if (priv->tx_skbuff[i]) { -- dev_kfree_skb_any(priv->tx_skbuff[i]); -- priv->tx_skbuff[i] = NULL; -- priv->tx_skbuff_dma[i].buf = 0; -- priv->tx_skbuff_dma[i].map_as_page = false; -- } +- /* If GMAC does not have multiple queues, then this is not necessary*/ +- if (rx_count == 1) +- return; + for (queue = 0; queue < rx_queues_count; queue++) { + mode = priv->plat->rx_queues_cfg[queue].mode_to_use; + priv->hw->mac->rx_queue_enable(priv->hw, mode, queue); - } - } ++ } ++} - /** -- * alloc_dma_desc_resources - alloc TX/RX resources. -- * @priv: private structure -- * Description: according to which descriptor can be used (extend or basic) -- * this function allocates the resources for TX and RX paths. In case of -- * reception, for example, it pre-allocated the RX socket buffer in order to -- * allow zero-copy mechanism. +- /** +- * If the core is synthesized with multiple rx queues / multiple +- * dma channels, then rx queues will be disabled by default. +- * For now only rx queue 0 is enabled. +- */ +- priv->hw->mac->rx_queue_enable(priv->hw, queue); ++/** + * stmmac_start_rx_dma - start RX DMA channel + * @priv: driver private structure + * @chan: RX channel index + * Description: + * This starts a RX DMA channel - */ --static int alloc_dma_desc_resources(struct stmmac_priv *priv) ++ */ +static void stmmac_start_rx_dma(struct stmmac_priv *priv, u32 chan) - { -- int ret = -ENOMEM; -- -- priv->rx_skbuff_dma = kmalloc_array(DMA_RX_SIZE, sizeof(dma_addr_t), -- GFP_KERNEL); -- if (!priv->rx_skbuff_dma) -- return -ENOMEM; -- -- priv->rx_skbuff = kmalloc_array(DMA_RX_SIZE, sizeof(struct sk_buff *), -- GFP_KERNEL); -- if (!priv->rx_skbuff) -- goto err_rx_skbuff; -- -- priv->tx_skbuff_dma = kmalloc_array(DMA_TX_SIZE, -- sizeof(*priv->tx_skbuff_dma), -- GFP_KERNEL); -- if (!priv->tx_skbuff_dma) -- goto err_tx_skbuff_dma; -- -- priv->tx_skbuff = kmalloc_array(DMA_TX_SIZE, sizeof(struct sk_buff *), -- GFP_KERNEL); -- if (!priv->tx_skbuff) -- goto err_tx_skbuff; -- -- if (priv->extend_desc) { -- priv->dma_erx = dma_zalloc_coherent(priv->device, DMA_RX_SIZE * -- sizeof(struct -- dma_extended_desc), -- &priv->dma_rx_phy, -- GFP_KERNEL); -- if (!priv->dma_erx) -- goto err_dma; -- -- priv->dma_etx = dma_zalloc_coherent(priv->device, DMA_TX_SIZE * -- sizeof(struct -- dma_extended_desc), -- &priv->dma_tx_phy, -- GFP_KERNEL); -- if (!priv->dma_etx) { -- dma_free_coherent(priv->device, DMA_RX_SIZE * -- sizeof(struct dma_extended_desc), -- priv->dma_erx, priv->dma_rx_phy); -- goto err_dma; -- } -- } else { -- priv->dma_rx = dma_zalloc_coherent(priv->device, DMA_RX_SIZE * -- sizeof(struct dma_desc), -- &priv->dma_rx_phy, -- GFP_KERNEL); -- if (!priv->dma_rx) -- goto err_dma; ++{ + netdev_dbg(priv->dev, "DMA RX processes started in channel %d\n", chan); + priv->hw->dma->start_rx(priv->ioaddr, chan); +} - -- priv->dma_tx = dma_zalloc_coherent(priv->device, DMA_TX_SIZE * -- sizeof(struct dma_desc), -- &priv->dma_tx_phy, -- GFP_KERNEL); -- if (!priv->dma_tx) { -- dma_free_coherent(priv->device, DMA_RX_SIZE * -- sizeof(struct dma_desc), -- priv->dma_rx, priv->dma_rx_phy); -- goto err_dma; -- } -- } ++ +/** + * stmmac_start_tx_dma - start TX DMA channel + * @priv: driver private structure @@ -3692,8 +3720,7 @@ + netdev_dbg(priv->dev, "DMA TX processes started in channel %d\n", chan); + priv->hw->dma->start_tx(priv->ioaddr, chan); +} - -- return 0; ++ +/** + * stmmac_stop_rx_dma - stop RX DMA channel + * @priv: driver private structure @@ -3706,16 +3733,7 @@ + netdev_dbg(priv->dev, "DMA RX processes stopped in channel %d\n", chan); + priv->hw->dma->stop_rx(priv->ioaddr, chan); +} - --err_dma: -- kfree(priv->tx_skbuff); --err_tx_skbuff: -- kfree(priv->tx_skbuff_dma); --err_tx_skbuff_dma: -- kfree(priv->rx_skbuff); --err_rx_skbuff: -- kfree(priv->rx_skbuff_dma); -- return ret; ++ +/** + * stmmac_stop_tx_dma - stop TX DMA channel + * @priv: driver private structure @@ -3727,9 +3745,8 @@ +{ + netdev_dbg(priv->dev, "DMA TX processes stopped in channel %d\n", chan); + priv->hw->dma->stop_tx(priv->ioaddr, chan); - } - --static void free_dma_desc_resources(struct stmmac_priv *priv) ++} ++ +/** + * stmmac_start_all_dma - start all RX and TX DMA channels + * @priv: driver private structure @@ -3737,31 +3754,7 @@ + * This starts all the RX and TX DMA channels + */ +static void stmmac_start_all_dma(struct stmmac_priv *priv) - { -- /* Release the DMA TX/RX socket buffers */ -- dma_free_rx_skbufs(priv); -- dma_free_tx_skbufs(priv); -- -- /* Free DMA regions of consistent memory previously allocated */ -- if (!priv->extend_desc) { -- dma_free_coherent(priv->device, -- DMA_TX_SIZE * sizeof(struct dma_desc), -- priv->dma_tx, priv->dma_tx_phy); -- dma_free_coherent(priv->device, -- DMA_RX_SIZE * sizeof(struct dma_desc), -- priv->dma_rx, priv->dma_rx_phy); -- } else { -- dma_free_coherent(priv->device, DMA_TX_SIZE * -- sizeof(struct dma_extended_desc), -- priv->dma_etx, priv->dma_tx_phy); -- dma_free_coherent(priv->device, DMA_RX_SIZE * -- sizeof(struct dma_extended_desc), -- priv->dma_erx, priv->dma_rx_phy); -- } -- kfree(priv->rx_skbuff_dma); -- kfree(priv->rx_skbuff); -- kfree(priv->tx_skbuff_dma); -- kfree(priv->tx_skbuff); ++{ + u32 rx_channels_count = priv->plat->rx_queues_to_use; + u32 tx_channels_count = priv->plat->tx_queues_to_use; + u32 chan = 0; @@ -3771,38 +3764,23 @@ + + for (chan = 0; chan < tx_channels_count; chan++) + stmmac_start_tx_dma(priv, chan); - } - - /** -- * stmmac_mac_enable_rx_queues - Enable MAC rx queues -- * @priv: driver private structure -- * Description: It is used for enabling the rx queues in the MAC ++} ++ ++/** + * stmmac_stop_all_dma - stop all RX and TX DMA channels + * @priv: driver private structure + * Description: + * This stops the RX and TX DMA channels - */ --static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv) ++ */ +static void stmmac_stop_all_dma(struct stmmac_priv *priv) - { -- int rx_count = priv->dma_cap.number_rx_queues; -- int queue = 0; ++{ + u32 rx_channels_count = priv->plat->rx_queues_to_use; + u32 tx_channels_count = priv->plat->tx_queues_to_use; + u32 chan = 0; - -- /* If GMAC does not have multiple queues, then this is not necessary*/ -- if (rx_count == 1) -- return; ++ + for (chan = 0; chan < rx_channels_count; chan++) + stmmac_stop_rx_dma(priv, chan); - -- /** -- * If the core is synthesized with multiple rx queues / multiple -- * dma channels, then rx queues will be disabled by default. -- * For now only rx queue 0 is enabled. -- */ -- priv->hw->mac->rx_queue_enable(priv->hw, queue); ++ + for (chan = 0; chan < tx_channels_count; chan++) + stmmac_stop_tx_dma(priv, chan); } @@ -3897,7 +3875,7 @@ status = priv->hw->desc->tx_status(&priv->dev->stats, &priv->xstats, p, -@@ -1357,48 +1842,51 @@ static void stmmac_tx_clean(struct stmma +@@ -1362,48 +1847,51 @@ static void stmmac_tx_clean(struct stmma stmmac_get_tx_hwtstamp(priv, p, skb); } @@ -3967,7 +3945,7 @@ } if ((priv->eee_enabled) && (!priv->tx_path_in_lpi_mode)) { -@@ -1408,45 +1896,76 @@ static void stmmac_tx_clean(struct stmma +@@ -1413,45 +1901,76 @@ static void stmmac_tx_clean(struct stmma netif_tx_unlock(priv->dev); } @@ -4059,7 +4037,7 @@ } /** -@@ -1458,31 +1977,43 @@ static void stmmac_tx_err(struct stmmac_ +@@ -1463,31 +1982,43 @@ static void stmmac_tx_err(struct stmmac_ */ static void stmmac_dma_interrupt(struct stmmac_priv *priv) { @@ -4124,7 +4102,7 @@ } /** -@@ -1589,6 +2120,13 @@ static void stmmac_check_ether_addr(stru +@@ -1594,6 +2125,13 @@ static void stmmac_check_ether_addr(stru */ static int stmmac_init_dma_engine(struct stmmac_priv *priv) { @@ -4138,7 +4116,7 @@ int atds = 0; int ret = 0; -@@ -1606,19 +2144,49 @@ static int stmmac_init_dma_engine(struct +@@ -1611,19 +2149,49 @@ static int stmmac_init_dma_engine(struct return ret; } @@ -4200,7 +4178,7 @@ } if (priv->plat->axi && priv->hw->dma->axi) -@@ -1636,8 +2204,12 @@ static int stmmac_init_dma_engine(struct +@@ -1641,8 +2209,12 @@ static int stmmac_init_dma_engine(struct static void stmmac_tx_timer(unsigned long data) { struct stmmac_priv *priv = (struct stmmac_priv *)data; @@ -4214,7 +4192,7 @@ } /** -@@ -1659,6 +2231,196 @@ static void stmmac_init_tx_coalesce(stru +@@ -1664,6 +2236,196 @@ static void stmmac_init_tx_coalesce(stru add_timer(&priv->txtimer); } @@ -4411,7 +4389,7 @@ /** * stmmac_hw_setup - setup mac in a usable state. * @dev : pointer to the device structure. -@@ -1674,6 +2436,9 @@ static void stmmac_init_tx_coalesce(stru +@@ -1679,6 +2441,9 @@ static void stmmac_init_tx_coalesce(stru static int stmmac_hw_setup(struct net_device *dev, bool init_ptp) { struct stmmac_priv *priv = netdev_priv(dev); @@ -4421,7 +4399,7 @@ int ret; /* DMA initialization and SW reset */ -@@ -1703,9 +2468,9 @@ static int stmmac_hw_setup(struct net_de +@@ -1708,9 +2473,9 @@ static int stmmac_hw_setup(struct net_de /* Initialize the MAC Core */ priv->hw->mac->core_init(priv->hw, dev->mtu); @@ -4434,7 +4412,7 @@ ret = priv->hw->mac->rx_ipc(priv->hw); if (!ret) { -@@ -1715,10 +2480,7 @@ static int stmmac_hw_setup(struct net_de +@@ -1720,10 +2485,7 @@ static int stmmac_hw_setup(struct net_de } /* Enable the MAC Rx/Tx */ @@ -4446,7 +4424,7 @@ /* Set the HW DMA mode and the COE */ stmmac_dma_operation_mode(priv); -@@ -1726,6 +2488,10 @@ static int stmmac_hw_setup(struct net_de +@@ -1731,6 +2493,10 @@ static int stmmac_hw_setup(struct net_de stmmac_mmc_setup(priv); if (init_ptp) { @@ -4457,7 +4435,7 @@ ret = stmmac_init_ptp(priv); if (ret == -EOPNOTSUPP) netdev_warn(priv->dev, "PTP not supported by HW\n"); -@@ -1740,35 +2506,37 @@ static int stmmac_hw_setup(struct net_de +@@ -1745,35 +2511,37 @@ static int stmmac_hw_setup(struct net_de __func__); #endif /* Start the ball rolling... */ @@ -4509,7 +4487,7 @@ /** * stmmac_open - open entry point of the driver * @dev : pointer to the device structure. -@@ -1837,7 +2605,7 @@ static int stmmac_open(struct net_device +@@ -1842,7 +2610,7 @@ static int stmmac_open(struct net_device netdev_err(priv->dev, "%s: ERROR: allocating the IRQ %d (error: %d)\n", __func__, dev->irq, ret); @@ -4518,7 +4496,7 @@ } /* Request the Wake IRQ in case of another line is used for WoL */ -@@ -1864,8 +2632,8 @@ static int stmmac_open(struct net_device +@@ -1869,8 +2637,8 @@ static int stmmac_open(struct net_device } } @@ -4529,7 +4507,7 @@ return 0; -@@ -1874,7 +2642,12 @@ lpiirq_error: +@@ -1879,7 +2647,12 @@ lpiirq_error: free_irq(priv->wol_irq, dev); wolirq_error: free_irq(dev->irq, dev); @@ -4542,7 +4520,7 @@ init_error: free_dma_desc_resources(priv); dma_desc_error: -@@ -1903,9 +2676,9 @@ static int stmmac_release(struct net_dev +@@ -1908,9 +2681,9 @@ static int stmmac_release(struct net_dev phy_disconnect(dev->phydev); } @@ -4554,7 +4532,7 @@ del_timer_sync(&priv->txtimer); -@@ -1917,14 +2690,13 @@ static int stmmac_release(struct net_dev +@@ -1922,14 +2695,13 @@ static int stmmac_release(struct net_dev free_irq(priv->lpi_irq, dev); /* Stop TX/RX DMA and clear the descriptors */ @@ -4571,7 +4549,7 @@ netif_carrier_off(dev); -@@ -1943,22 +2715,24 @@ static int stmmac_release(struct net_dev +@@ -1948,22 +2720,24 @@ static int stmmac_release(struct net_dev * @des: buffer start address * @total_len: total length to fill in descriptors * @last_segmant: condition for the last descriptor @@ -4600,7 +4578,7 @@ desc->des0 = cpu_to_le32(des + (total_len - tmp_len)); buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ? -@@ -1966,7 +2740,7 @@ static void stmmac_tso_allocator(struct +@@ -1971,7 +2745,7 @@ static void stmmac_tso_allocator(struct priv->hw->desc->prepare_tso_tx_desc(desc, 0, buff_size, 0, 1, @@ -4609,7 +4587,7 @@ 0, 0); tmp_len -= TSO_MAX_BUFF_SIZE; -@@ -2002,23 +2776,28 @@ static void stmmac_tso_allocator(struct +@@ -2007,23 +2781,28 @@ static void stmmac_tso_allocator(struct */ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev) { @@ -4644,7 +4622,7 @@ /* This is a hard error, log it. */ netdev_err(priv->dev, "%s: Tx Ring full when queue awake\n", -@@ -2033,10 +2812,10 @@ static netdev_tx_t stmmac_tso_xmit(struc +@@ -2038,10 +2817,10 @@ static netdev_tx_t stmmac_tso_xmit(struc /* set new MSS value if needed */ if (mss != priv->mss) { @@ -4657,7 +4635,7 @@ } if (netif_msg_tx_queued(priv)) { -@@ -2046,9 +2825,9 @@ static netdev_tx_t stmmac_tso_xmit(struc +@@ -2051,9 +2830,9 @@ static netdev_tx_t stmmac_tso_xmit(struc skb->data_len); } @@ -4669,7 +4647,7 @@ first = desc; /* first descriptor: fill Headers on Buf1 */ -@@ -2057,9 +2836,8 @@ static netdev_tx_t stmmac_tso_xmit(struc +@@ -2062,9 +2841,8 @@ static netdev_tx_t stmmac_tso_xmit(struc if (dma_mapping_error(priv->device, des)) goto dma_map_err; @@ -4681,7 +4659,7 @@ first->des0 = cpu_to_le32(des); -@@ -2070,7 +2848,7 @@ static netdev_tx_t stmmac_tso_xmit(struc +@@ -2075,7 +2853,7 @@ static netdev_tx_t stmmac_tso_xmit(struc /* If needed take extra descriptors to fill the remaining payload */ tmp_pay_len = pay_len - TSO_MAX_BUFF_SIZE; @@ -4690,7 +4668,7 @@ /* Prepare fragments */ for (i = 0; i < nfrags; i++) { -@@ -2079,24 +2857,34 @@ static netdev_tx_t stmmac_tso_xmit(struc +@@ -2084,24 +2862,34 @@ static netdev_tx_t stmmac_tso_xmit(struc des = skb_frag_dma_map(priv->device, frag, 0, skb_frag_size(frag), DMA_TO_DEVICE); @@ -4734,7 +4712,7 @@ } dev->stats.tx_bytes += skb->len; -@@ -2128,7 +2916,7 @@ static netdev_tx_t stmmac_tso_xmit(struc +@@ -2133,7 +2921,7 @@ static netdev_tx_t stmmac_tso_xmit(struc priv->hw->desc->prepare_tso_tx_desc(first, 1, proto_hdr_len, pay_len, @@ -4743,7 +4721,7 @@ tcp_hdrlen(skb) / 4, (skb->len - proto_hdr_len)); /* If context desc is used to change MSS */ -@@ -2143,20 +2931,20 @@ static netdev_tx_t stmmac_tso_xmit(struc +@@ -2155,20 +2943,20 @@ static netdev_tx_t stmmac_tso_xmit(struc if (netif_msg_pktdata(priv)) { pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n", @@ -4770,7 +4748,7 @@ return NETDEV_TX_OK; -@@ -2180,21 +2968,27 @@ static netdev_tx_t stmmac_xmit(struct sk +@@ -2192,21 +2980,27 @@ static netdev_tx_t stmmac_xmit(struct sk struct stmmac_priv *priv = netdev_priv(dev); unsigned int nopaged_len = skb_headlen(skb); int i, csum_insertion = 0, is_jumbo = 0; @@ -4802,7 +4780,7 @@ /* This is a hard error, log it. */ netdev_err(priv->dev, "%s: Tx Ring full when queue awake\n", -@@ -2206,20 +3000,18 @@ static netdev_tx_t stmmac_xmit(struct sk +@@ -2218,20 +3012,18 @@ static netdev_tx_t stmmac_xmit(struct sk if (priv->tx_path_in_lpi_mode) stmmac_disable_eee_mode(priv); @@ -4826,7 +4804,7 @@ enh_desc = priv->plat->enh_desc; /* To program the descriptors according to the size of the frame */ if (enh_desc) -@@ -2227,7 +3019,7 @@ static netdev_tx_t stmmac_xmit(struct sk +@@ -2239,7 +3031,7 @@ static netdev_tx_t stmmac_xmit(struct sk if (unlikely(is_jumbo) && likely(priv->synopsys_id < DWMAC_CORE_4_00)) { @@ -4835,7 +4813,7 @@ if (unlikely(entry < 0)) goto dma_map_err; } -@@ -2240,48 +3032,56 @@ static netdev_tx_t stmmac_xmit(struct sk +@@ -2252,48 +3044,56 @@ static netdev_tx_t stmmac_xmit(struct sk entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE); if (likely(priv->extend_desc)) @@ -4905,7 +4883,7 @@ priv->hw->desc->display_ring(tx_head, DMA_TX_SIZE, false); -@@ -2289,10 +3089,10 @@ static netdev_tx_t stmmac_xmit(struct sk +@@ -2301,10 +3101,10 @@ static netdev_tx_t stmmac_xmit(struct sk print_pkt(skb->data, skb->len); } @@ -4918,7 +4896,7 @@ } dev->stats.tx_bytes += skb->len; -@@ -2327,14 +3127,14 @@ static netdev_tx_t stmmac_xmit(struct sk +@@ -2339,14 +3139,14 @@ static netdev_tx_t stmmac_xmit(struct sk if (dma_mapping_error(priv->device, des)) goto dma_map_err; @@ -4936,7 +4914,7 @@ if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en)) { -@@ -2346,7 +3146,7 @@ static netdev_tx_t stmmac_xmit(struct sk +@@ -2358,7 +3158,7 @@ static netdev_tx_t stmmac_xmit(struct sk /* Prepare the first descriptor setting the OWN bit too */ priv->hw->desc->prepare_tx_desc(first, 1, nopaged_len, csum_insertion, priv->mode, 1, @@ -4945,7 +4923,7 @@ /* The own bit must be the latest setting done when prepare the * descriptor and then barrier is needed to make sure that -@@ -2355,13 +3155,13 @@ static netdev_tx_t stmmac_xmit(struct sk +@@ -2367,13 +3167,13 @@ static netdev_tx_t stmmac_xmit(struct sk dma_wmb(); } @@ -4962,7 +4940,7 @@ return NETDEV_TX_OK; -@@ -2389,9 +3189,9 @@ static void stmmac_rx_vlan(struct net_de +@@ -2401,9 +3201,9 @@ static void stmmac_rx_vlan(struct net_de } @@ -4974,7 +4952,7 @@ return 0; return 1; -@@ -2400,30 +3200,33 @@ static inline int stmmac_rx_threshold_co +@@ -2412,30 +3212,33 @@ static inline int stmmac_rx_threshold_co /** * stmmac_rx_refill - refill used skb preallocated buffers * @priv: driver private structure @@ -5015,7 +4993,7 @@ if (unlikely(net_ratelimit())) dev_err(priv->device, "fail to alloc skb entry %d\n", -@@ -2431,28 +3234,28 @@ static inline void stmmac_rx_refill(stru +@@ -2443,28 +3246,28 @@ static inline void stmmac_rx_refill(stru break; } @@ -5052,7 +5030,7 @@ netif_dbg(priv, rx_status, priv->dev, "refill entry #%d\n", entry); -@@ -2468,31 +3271,33 @@ static inline void stmmac_rx_refill(stru +@@ -2480,31 +3283,33 @@ static inline void stmmac_rx_refill(stru entry = STMMAC_GET_ENTRY(entry, DMA_RX_SIZE); } @@ -5093,7 +5071,7 @@ priv->hw->desc->display_ring(rx_head, DMA_RX_SIZE, true); } -@@ -2502,9 +3307,9 @@ static int stmmac_rx(struct stmmac_priv +@@ -2514,9 +3319,9 @@ static int stmmac_rx(struct stmmac_priv struct dma_desc *np; if (priv->extend_desc) @@ -5105,7 +5083,7 @@ /* read the status of the incoming frame */ status = priv->hw->desc->rx_status(&priv->dev->stats, -@@ -2515,20 +3320,20 @@ static int stmmac_rx(struct stmmac_priv +@@ -2527,20 +3332,20 @@ static int stmmac_rx(struct stmmac_priv count++; @@ -5131,7 +5109,7 @@ entry); if (unlikely(status == discard_frame)) { priv->dev->stats.rx_errors++; -@@ -2538,9 +3343,9 @@ static int stmmac_rx(struct stmmac_priv +@@ -2550,9 +3355,9 @@ static int stmmac_rx(struct stmmac_priv * them in stmmac_rx_refill() function so that * device can reuse it. */ @@ -5143,7 +5121,7 @@ priv->dma_buf_sz, DMA_FROM_DEVICE); } -@@ -2588,7 +3393,7 @@ static int stmmac_rx(struct stmmac_priv +@@ -2600,7 +3405,7 @@ static int stmmac_rx(struct stmmac_priv */ if (unlikely(!priv->plat->has_gmac4 && ((frame_len < priv->rx_copybreak) || @@ -5152,7 +5130,7 @@ skb = netdev_alloc_skb_ip_align(priv->dev, frame_len); if (unlikely(!skb)) { -@@ -2600,21 +3405,21 @@ static int stmmac_rx(struct stmmac_priv +@@ -2612,21 +3417,21 @@ static int stmmac_rx(struct stmmac_priv } dma_sync_single_for_cpu(priv->device, @@ -5178,7 +5156,7 @@ if (unlikely(!skb)) { netdev_err(priv->dev, "%s: Inconsistent Rx chain\n", -@@ -2623,12 +3428,12 @@ static int stmmac_rx(struct stmmac_priv +@@ -2635,12 +3440,12 @@ static int stmmac_rx(struct stmmac_priv break; } prefetch(skb->data - NET_IP_ALIGN); @@ -5194,7 +5172,7 @@ priv->dma_buf_sz, DMA_FROM_DEVICE); } -@@ -2650,7 +3455,7 @@ static int stmmac_rx(struct stmmac_priv +@@ -2662,7 +3467,7 @@ static int stmmac_rx(struct stmmac_priv else skb->ip_summed = CHECKSUM_UNNECESSARY; @@ -5203,7 +5181,7 @@ priv->dev->stats.rx_packets++; priv->dev->stats.rx_bytes += frame_len; -@@ -2658,7 +3463,7 @@ static int stmmac_rx(struct stmmac_priv +@@ -2670,7 +3475,7 @@ static int stmmac_rx(struct stmmac_priv entry = next_entry; } @@ -5212,7 +5190,7 @@ priv->xstats.rx_pkt_n += count; -@@ -2675,16 +3480,24 @@ static int stmmac_rx(struct stmmac_priv +@@ -2687,16 +3492,24 @@ static int stmmac_rx(struct stmmac_priv */ static int stmmac_poll(struct napi_struct *napi, int budget) { @@ -5241,7 +5219,7 @@ } return work_done; } -@@ -2700,9 +3513,12 @@ static int stmmac_poll(struct napi_struc +@@ -2712,9 +3525,12 @@ static int stmmac_poll(struct napi_struc static void stmmac_tx_timeout(struct net_device *dev) { struct stmmac_priv *priv = netdev_priv(dev); @@ -5255,7 +5233,7 @@ } /** -@@ -2825,6 +3641,12 @@ static irqreturn_t stmmac_interrupt(int +@@ -2837,6 +3653,12 @@ static irqreturn_t stmmac_interrupt(int { struct net_device *dev = (struct net_device *)dev_id; struct stmmac_priv *priv = netdev_priv(dev); @@ -5268,7 +5246,7 @@ if (priv->irq_wake) pm_wakeup_event(priv->device, 0); -@@ -2838,16 +3660,30 @@ static irqreturn_t stmmac_interrupt(int +@@ -2850,16 +3672,30 @@ static irqreturn_t stmmac_interrupt(int if ((priv->plat->has_gmac) || (priv->plat->has_gmac4)) { int status = priv->hw->mac->host_irq_status(priv->hw, &priv->xstats); @@ -5303,7 +5281,7 @@ } /* PCS link status */ -@@ -2932,7 +3768,7 @@ static void sysfs_display_ring(void *hea +@@ -2944,7 +3780,7 @@ static void sysfs_display_ring(void *hea ep++; } else { seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n", @@ -5312,7 +5290,7 @@ le32_to_cpu(p->des0), le32_to_cpu(p->des1), le32_to_cpu(p->des2), le32_to_cpu(p->des3)); p++; -@@ -2945,17 +3781,40 @@ static int stmmac_sysfs_ring_read(struct +@@ -2957,17 +3793,40 @@ static int stmmac_sysfs_ring_read(struct { struct net_device *dev = seq->private; struct stmmac_priv *priv = netdev_priv(dev); @@ -5363,7 +5341,7 @@ } return 0; -@@ -3238,11 +4097,14 @@ int stmmac_dvr_probe(struct device *devi +@@ -3250,11 +4109,14 @@ int stmmac_dvr_probe(struct device *devi struct plat_stmmacenet_data *plat_dat, struct stmmac_resources *res) { @@ -5380,7 +5358,7 @@ if (!ndev) return -ENOMEM; -@@ -3284,6 +4146,10 @@ int stmmac_dvr_probe(struct device *devi +@@ -3296,6 +4158,10 @@ int stmmac_dvr_probe(struct device *devi if (ret) goto error_hw_init; @@ -5391,7 +5369,7 @@ ndev->netdev_ops = &stmmac_netdev_ops; ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | -@@ -3316,7 +4182,12 @@ int stmmac_dvr_probe(struct device *devi +@@ -3328,7 +4194,12 @@ int stmmac_dvr_probe(struct device *devi "Enable RX Mitigation via HW Watchdog Timer\n"); } @@ -5405,7 +5383,7 @@ spin_lock_init(&priv->lock); -@@ -3361,7 +4232,11 @@ error_netdev_register: +@@ -3373,7 +4244,11 @@ error_netdev_register: priv->hw->pcs != STMMAC_PCS_RTBI) stmmac_mdio_unregister(ndev); error_mdio_register: @@ -5418,7 +5396,7 @@ error_hw_init: free_netdev(ndev); -@@ -3382,10 +4257,9 @@ int stmmac_dvr_remove(struct device *dev +@@ -3394,10 +4269,9 @@ int stmmac_dvr_remove(struct device *dev netdev_info(priv->dev, "%s: removing driver", __func__); @@ -5431,7 +5409,7 @@ netif_carrier_off(ndev); unregister_netdev(ndev); if (priv->plat->stmmac_rst) -@@ -3424,20 +4298,19 @@ int stmmac_suspend(struct device *dev) +@@ -3436,20 +4310,19 @@ int stmmac_suspend(struct device *dev) spin_lock_irqsave(&priv->lock, flags); netif_device_detach(ndev); @@ -5456,7 +5434,7 @@ pinctrl_pm_select_sleep_state(priv->device); /* Disable clock in case of PWM is off */ clk_disable(priv->plat->pclk); -@@ -3453,6 +4326,31 @@ int stmmac_suspend(struct device *dev) +@@ -3465,6 +4338,31 @@ int stmmac_suspend(struct device *dev) EXPORT_SYMBOL_GPL(stmmac_suspend); /** @@ -5488,7 +5466,7 @@ * stmmac_resume - resume callback * @dev: device pointer * Description: when resume this function is invoked to setup the DMA and CORE -@@ -3492,10 +4390,8 @@ int stmmac_resume(struct device *dev) +@@ -3504,10 +4402,8 @@ int stmmac_resume(struct device *dev) spin_lock_irqsave(&priv->lock, flags); @@ -5501,7 +5479,7 @@ /* reset private mss value to force mss context settings at * next tso xmit (only used for gmac4). */ -@@ -3507,9 +4403,9 @@ int stmmac_resume(struct device *dev) +@@ -3519,9 +4415,9 @@ int stmmac_resume(struct device *dev) stmmac_init_tx_coalesce(priv); stmmac_set_rx_mode(ndev); |