aboutsummaryrefslogtreecommitdiffstats
path: root/target/linux/mvebu/patches-5.10/008-net-mvneta-make-tx-buffer-array-agnostic.patch
diff options
context:
space:
mode:
Diffstat (limited to 'target/linux/mvebu/patches-5.10/008-net-mvneta-make-tx-buffer-array-agnostic.patch')
-rw-r--r--target/linux/mvebu/patches-5.10/008-net-mvneta-make-tx-buffer-array-agnostic.patch210
1 files changed, 210 insertions, 0 deletions
diff --git a/target/linux/mvebu/patches-5.10/008-net-mvneta-make-tx-buffer-array-agnostic.patch b/target/linux/mvebu/patches-5.10/008-net-mvneta-make-tx-buffer-array-agnostic.patch
new file mode 100644
index 0000000000..e79aa816b8
--- /dev/null
+++ b/target/linux/mvebu/patches-5.10/008-net-mvneta-make-tx-buffer-array-agnostic.patch
@@ -0,0 +1,210 @@
+From 9e58c8b410650b5a6eb5b8fad8474bd8425a4023 Mon Sep 17 00:00:00 2001
+From: Lorenzo Bianconi <lorenzo@kernel.org>
+Date: Sat, 19 Oct 2019 10:13:26 +0200
+Subject: [PATCH 6/7] net: mvneta: make tx buffer array agnostic
+
+Allow tx buffer array to contain both skb and xdp buffers in order to
+enable xdp frame recycling adding XDP_TX verdict support
+
+Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+---
+ drivers/net/ethernet/marvell/mvneta.c | 66 +++++++++++++++++----------
+ 1 file changed, 43 insertions(+), 23 deletions(-)
+
+--- a/drivers/net/ethernet/marvell/mvneta.c
++++ b/drivers/net/ethernet/marvell/mvneta.c
+@@ -565,6 +565,20 @@ struct mvneta_rx_desc {
+ };
+ #endif
+
++enum mvneta_tx_buf_type {
++ MVNETA_TYPE_SKB,
++ MVNETA_TYPE_XDP_TX,
++ MVNETA_TYPE_XDP_NDO,
++};
++
++struct mvneta_tx_buf {
++ enum mvneta_tx_buf_type type;
++ union {
++ struct xdp_frame *xdpf;
++ struct sk_buff *skb;
++ };
++};
++
+ struct mvneta_tx_queue {
+ /* Number of this TX queue, in the range 0-7 */
+ u8 id;
+@@ -580,8 +594,8 @@ struct mvneta_tx_queue {
+ int tx_stop_threshold;
+ int tx_wake_threshold;
+
+- /* Array of transmitted skb */
+- struct sk_buff **tx_skb;
++ /* Array of transmitted buffers */
++ struct mvneta_tx_buf *buf;
+
+ /* Index of last TX DMA descriptor that was inserted */
+ int txq_put_index;
+@@ -1793,14 +1807,9 @@ static void mvneta_txq_bufs_free(struct
+ int i;
+
+ for (i = 0; i < num; i++) {
++ struct mvneta_tx_buf *buf = &txq->buf[txq->txq_get_index];
+ struct mvneta_tx_desc *tx_desc = txq->descs +
+ txq->txq_get_index;
+- struct sk_buff *skb = txq->tx_skb[txq->txq_get_index];
+-
+- if (skb) {
+- bytes_compl += skb->len;
+- pkts_compl++;
+- }
+
+ mvneta_txq_inc_get(txq);
+
+@@ -1808,9 +1817,12 @@ static void mvneta_txq_bufs_free(struct
+ dma_unmap_single(pp->dev->dev.parent,
+ tx_desc->buf_phys_addr,
+ tx_desc->data_size, DMA_TO_DEVICE);
+- if (!skb)
++ if (!buf->skb)
+ continue;
+- dev_kfree_skb_any(skb);
++
++ bytes_compl += buf->skb->len;
++ pkts_compl++;
++ dev_kfree_skb_any(buf->skb);
+ }
+
+ netdev_tx_completed_queue(nq, pkts_compl, bytes_compl);
+@@ -2335,16 +2347,19 @@ static inline void
+ mvneta_tso_put_hdr(struct sk_buff *skb,
+ struct mvneta_port *pp, struct mvneta_tx_queue *txq)
+ {
+- struct mvneta_tx_desc *tx_desc;
+ int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
++ struct mvneta_tx_buf *buf = &txq->buf[txq->txq_put_index];
++ struct mvneta_tx_desc *tx_desc;
+
+- txq->tx_skb[txq->txq_put_index] = NULL;
+ tx_desc = mvneta_txq_next_desc_get(txq);
+ tx_desc->data_size = hdr_len;
+ tx_desc->command = mvneta_skb_tx_csum(pp, skb);
+ tx_desc->command |= MVNETA_TXD_F_DESC;
+ tx_desc->buf_phys_addr = txq->tso_hdrs_phys +
+ txq->txq_put_index * TSO_HEADER_SIZE;
++ buf->type = MVNETA_TYPE_SKB;
++ buf->skb = NULL;
++
+ mvneta_txq_inc_put(txq);
+ }
+
+@@ -2353,6 +2368,7 @@ mvneta_tso_put_data(struct net_device *d
+ struct sk_buff *skb, char *data, int size,
+ bool last_tcp, bool is_last)
+ {
++ struct mvneta_tx_buf *buf = &txq->buf[txq->txq_put_index];
+ struct mvneta_tx_desc *tx_desc;
+
+ tx_desc = mvneta_txq_next_desc_get(txq);
+@@ -2366,7 +2382,8 @@ mvneta_tso_put_data(struct net_device *d
+ }
+
+ tx_desc->command = 0;
+- txq->tx_skb[txq->txq_put_index] = NULL;
++ buf->type = MVNETA_TYPE_SKB;
++ buf->skb = NULL;
+
+ if (last_tcp) {
+ /* last descriptor in the TCP packet */
+@@ -2374,7 +2391,7 @@ mvneta_tso_put_data(struct net_device *d
+
+ /* last descriptor in SKB */
+ if (is_last)
+- txq->tx_skb[txq->txq_put_index] = skb;
++ buf->skb = skb;
+ }
+ mvneta_txq_inc_put(txq);
+ return 0;
+@@ -2459,6 +2476,7 @@ static int mvneta_tx_frag_process(struct
+ int i, nr_frags = skb_shinfo(skb)->nr_frags;
+
+ for (i = 0; i < nr_frags; i++) {
++ struct mvneta_tx_buf *buf = &txq->buf[txq->txq_put_index];
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+ void *addr = skb_frag_address(frag);
+
+@@ -2478,12 +2496,13 @@ static int mvneta_tx_frag_process(struct
+ if (i == nr_frags - 1) {
+ /* Last descriptor */
+ tx_desc->command = MVNETA_TXD_L_DESC | MVNETA_TXD_Z_PAD;
+- txq->tx_skb[txq->txq_put_index] = skb;
++ buf->skb = skb;
+ } else {
+ /* Descriptor in the middle: Not First, Not Last */
+ tx_desc->command = 0;
+- txq->tx_skb[txq->txq_put_index] = NULL;
++ buf->skb = NULL;
+ }
++ buf->type = MVNETA_TYPE_SKB;
+ mvneta_txq_inc_put(txq);
+ }
+
+@@ -2511,6 +2530,7 @@ static netdev_tx_t mvneta_tx(struct sk_b
+ struct mvneta_port *pp = netdev_priv(dev);
+ u16 txq_id = skb_get_queue_mapping(skb);
+ struct mvneta_tx_queue *txq = &pp->txqs[txq_id];
++ struct mvneta_tx_buf *buf = &txq->buf[txq->txq_put_index];
+ struct mvneta_tx_desc *tx_desc;
+ int len = skb->len;
+ int frags = 0;
+@@ -2543,16 +2563,17 @@ static netdev_tx_t mvneta_tx(struct sk_b
+ goto out;
+ }
+
++ buf->type = MVNETA_TYPE_SKB;
+ if (frags == 1) {
+ /* First and Last descriptor */
+ tx_cmd |= MVNETA_TXD_FLZ_DESC;
+ tx_desc->command = tx_cmd;
+- txq->tx_skb[txq->txq_put_index] = skb;
++ buf->skb = skb;
+ mvneta_txq_inc_put(txq);
+ } else {
+ /* First but not Last */
+ tx_cmd |= MVNETA_TXD_F_DESC;
+- txq->tx_skb[txq->txq_put_index] = NULL;
++ buf->skb = NULL;
+ mvneta_txq_inc_put(txq);
+ tx_desc->command = tx_cmd;
+ /* Continue with other skb fragments */
+@@ -3138,9 +3159,8 @@ static int mvneta_txq_sw_init(struct mvn
+
+ txq->last_desc = txq->size - 1;
+
+- txq->tx_skb = kmalloc_array(txq->size, sizeof(*txq->tx_skb),
+- GFP_KERNEL);
+- if (!txq->tx_skb) {
++ txq->buf = kmalloc_array(txq->size, sizeof(*txq->buf), GFP_KERNEL);
++ if (!txq->buf) {
+ dma_free_coherent(pp->dev->dev.parent,
+ txq->size * MVNETA_DESC_ALIGNED_SIZE,
+ txq->descs, txq->descs_phys);
+@@ -3152,7 +3172,7 @@ static int mvneta_txq_sw_init(struct mvn
+ txq->size * TSO_HEADER_SIZE,
+ &txq->tso_hdrs_phys, GFP_KERNEL);
+ if (!txq->tso_hdrs) {
+- kfree(txq->tx_skb);
++ kfree(txq->buf);
+ dma_free_coherent(pp->dev->dev.parent,
+ txq->size * MVNETA_DESC_ALIGNED_SIZE,
+ txq->descs, txq->descs_phys);
+@@ -3205,7 +3225,7 @@ static void mvneta_txq_sw_deinit(struct
+ {
+ struct netdev_queue *nq = netdev_get_tx_queue(pp->dev, txq->id);
+
+- kfree(txq->tx_skb);
++ kfree(txq->buf);
+
+ if (txq->tso_hdrs)
+ dma_free_coherent(pp->dev->dev.parent,