aboutsummaryrefslogtreecommitdiffstats
path: root/target/linux/mvebu/patches-5.4/005-net-mvneta-rely-on-build_skb-in-mvneta_rx_swbm-poll-.patch
diff options
context:
space:
mode:
authorTomasz Maciej Nowak <tmn505@gmail.com>2020-11-23 14:59:45 +0100
committerAdrian Schmutzler <freifunk@adrianschmutzler.de>2020-11-25 13:57:50 +0100
commit92b3efec54b36797614650c830a57c4f8786b5c9 (patch)
tree4ed06c90bde035a38ecd6d9a79004717ec3ee5ce /target/linux/mvebu/patches-5.4/005-net-mvneta-rely-on-build_skb-in-mvneta_rx_swbm-poll-.patch
parentbffb30603cca586ee2a5713748ed1f6e76d6754b (diff)
downloadupstream-92b3efec54b36797614650c830a57c4f8786b5c9.tar.gz
upstream-92b3efec54b36797614650c830a57c4f8786b5c9.tar.bz2
upstream-92b3efec54b36797614650c830a57c4f8786b5c9.zip
mvebu: sort patches
Sort patches according to target/linux/generic/PATCHES. Additionally: - replace hashes in backported patches with the ones from main Linux tree - add descriptions to some patches Signed-off-by: Tomasz Maciej Nowak <tmn505@gmail.com> [remove 004-add_sata_disk_activity_trigger.patch separately] Signed-off-by: Adrian Schmutzler <freifunk@adrianschmutzler.de>
Diffstat (limited to 'target/linux/mvebu/patches-5.4/005-net-mvneta-rely-on-build_skb-in-mvneta_rx_swbm-poll-.patch')
-rw-r--r--target/linux/mvebu/patches-5.4/005-net-mvneta-rely-on-build_skb-in-mvneta_rx_swbm-poll-.patch303
1 files changed, 303 insertions, 0 deletions
diff --git a/target/linux/mvebu/patches-5.4/005-net-mvneta-rely-on-build_skb-in-mvneta_rx_swbm-poll-.patch b/target/linux/mvebu/patches-5.4/005-net-mvneta-rely-on-build_skb-in-mvneta_rx_swbm-poll-.patch
new file mode 100644
index 0000000000..9038ca8ca8
--- /dev/null
+++ b/target/linux/mvebu/patches-5.4/005-net-mvneta-rely-on-build_skb-in-mvneta_rx_swbm-poll-.patch
@@ -0,0 +1,303 @@
+From 8dc9a0888f4c8e27b25e48ff1b4bc2b3a845cc2d Mon Sep 17 00:00:00 2001
+From: Lorenzo Bianconi <lorenzo@kernel.org>
+Date: Sat, 19 Oct 2019 10:13:23 +0200
+Subject: [PATCH 3/7] net: mvneta: rely on build_skb in mvneta_rx_swbm poll
+ routine
+
+Refactor mvneta_rx_swbm code introducing mvneta_swbm_rx_frame and
+mvneta_swbm_add_rx_fragment routines. Rely on build_skb in oreder to
+allocate skb since the previous patch introduced buffer recycling using
+the page_pool API.
+This patch fixes even an issue in the original driver where dma buffers
+are accessed before dma sync.
+mvneta driver can run on not cache coherent devices so it is
+necessary to sync DMA buffers before sending them to the device
+in order to avoid memory corruptions. Running perf analysis we can
+see a performance cost associated with this DMA-sync (anyway it is
+already there in the original driver code). In follow up patches we
+will add more logic to reduce DMA-sync as much as possible.
+
+Signed-off-by: Ilias Apalodimas <ilias.apalodimas@linaro.org>
+Signed-off-by: Jesper Dangaard Brouer <brouer@redhat.com>
+Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+---
+ drivers/net/ethernet/marvell/mvneta.c | 185 +++++++++++++-------------
+ 1 file changed, 95 insertions(+), 90 deletions(-)
+
+--- a/drivers/net/ethernet/marvell/mvneta.c
++++ b/drivers/net/ethernet/marvell/mvneta.c
+@@ -325,6 +325,11 @@
+ ETH_HLEN + ETH_FCS_LEN, \
+ cache_line_size())
+
++#define MVNETA_SKB_PAD (SKB_DATA_ALIGN(sizeof(struct skb_shared_info) + \
++ NET_SKB_PAD))
++#define MVNETA_SKB_SIZE(len) (SKB_DATA_ALIGN(len) + MVNETA_SKB_PAD)
++#define MVNETA_MAX_RX_BUF_SIZE (PAGE_SIZE - MVNETA_SKB_PAD)
++
+ #define IS_TSO_HEADER(txq, addr) \
+ ((addr >= txq->tso_hdrs_phys) && \
+ (addr < txq->tso_hdrs_phys + txq->size * TSO_HEADER_SIZE))
+@@ -650,7 +655,6 @@ static int txq_number = 8;
+ static int rxq_def;
+
+ static int rx_copybreak __read_mostly = 256;
+-static int rx_header_size __read_mostly = 128;
+
+ /* HW BM need that each port be identify by a unique ID */
+ static int global_port_id;
+@@ -1842,7 +1846,7 @@ static int mvneta_rx_refill(struct mvnet
+ phys_addr = page_pool_get_dma_addr(page) + pp->rx_offset_correction;
+ dma_dir = page_pool_get_dma_dir(rxq->page_pool);
+ dma_sync_single_for_device(pp->dev->dev.parent, phys_addr,
+- PAGE_SIZE, dma_dir);
++ MVNETA_MAX_RX_BUF_SIZE, dma_dir);
+ mvneta_rx_desc_fill(rx_desc, phys_addr, page, rxq);
+
+ return 0;
+@@ -1960,30 +1964,102 @@ int mvneta_rx_refill_queue(struct mvneta
+ return i;
+ }
+
++static int
++mvneta_swbm_rx_frame(struct mvneta_port *pp,
++ struct mvneta_rx_desc *rx_desc,
++ struct mvneta_rx_queue *rxq,
++ struct page *page)
++{
++ unsigned char *data = page_address(page);
++ int data_len = -MVNETA_MH_SIZE, len;
++ struct net_device *dev = pp->dev;
++ enum dma_data_direction dma_dir;
++
++ if (MVNETA_SKB_SIZE(rx_desc->data_size) > PAGE_SIZE) {
++ len = MVNETA_MAX_RX_BUF_SIZE;
++ data_len += len;
++ } else {
++ len = rx_desc->data_size;
++ data_len += len - ETH_FCS_LEN;
++ }
++
++ dma_dir = page_pool_get_dma_dir(rxq->page_pool);
++ dma_sync_single_for_cpu(dev->dev.parent,
++ rx_desc->buf_phys_addr,
++ len, dma_dir);
++
++ rxq->skb = build_skb(data, PAGE_SIZE);
++ if (unlikely(!rxq->skb)) {
++ netdev_err(dev,
++ "Can't allocate skb on queue %d\n",
++ rxq->id);
++ dev->stats.rx_dropped++;
++ rxq->skb_alloc_err++;
++ return -ENOMEM;
++ }
++ page_pool_release_page(rxq->page_pool, page);
++
++ skb_reserve(rxq->skb, MVNETA_MH_SIZE + NET_SKB_PAD);
++ skb_put(rxq->skb, data_len);
++ mvneta_rx_csum(pp, rx_desc->status, rxq->skb);
++
++ rxq->left_size = rx_desc->data_size - len;
++ rx_desc->buf_phys_addr = 0;
++
++ return 0;
++}
++
++static void
++mvneta_swbm_add_rx_fragment(struct mvneta_port *pp,
++ struct mvneta_rx_desc *rx_desc,
++ struct mvneta_rx_queue *rxq,
++ struct page *page)
++{
++ struct net_device *dev = pp->dev;
++ enum dma_data_direction dma_dir;
++ int data_len, len;
++
++ if (rxq->left_size > MVNETA_MAX_RX_BUF_SIZE) {
++ len = MVNETA_MAX_RX_BUF_SIZE;
++ data_len = len;
++ } else {
++ len = rxq->left_size;
++ data_len = len - ETH_FCS_LEN;
++ }
++ dma_dir = page_pool_get_dma_dir(rxq->page_pool);
++ dma_sync_single_for_cpu(dev->dev.parent,
++ rx_desc->buf_phys_addr,
++ len, dma_dir);
++ if (data_len > 0) {
++ /* refill descriptor with new buffer later */
++ skb_add_rx_frag(rxq->skb,
++ skb_shinfo(rxq->skb)->nr_frags,
++ page, NET_SKB_PAD, data_len,
++ PAGE_SIZE);
++ }
++ page_pool_release_page(rxq->page_pool, page);
++ rx_desc->buf_phys_addr = 0;
++ rxq->left_size -= len;
++}
++
+ /* Main rx processing when using software buffer management */
+ static int mvneta_rx_swbm(struct napi_struct *napi,
+ struct mvneta_port *pp, int budget,
+ struct mvneta_rx_queue *rxq)
+ {
++ int rcvd_pkts = 0, rcvd_bytes = 0, rx_proc = 0;
+ struct net_device *dev = pp->dev;
+- int rx_todo, rx_proc;
+- int refill = 0;
+- u32 rcvd_pkts = 0;
+- u32 rcvd_bytes = 0;
++ int rx_todo, refill;
+
+ /* Get number of received packets */
+ rx_todo = mvneta_rxq_busy_desc_num_get(pp, rxq);
+- rx_proc = 0;
+
+ /* Fairness NAPI loop */
+- while ((rcvd_pkts < budget) && (rx_proc < rx_todo)) {
++ while (rx_proc < budget && rx_proc < rx_todo) {
+ struct mvneta_rx_desc *rx_desc = mvneta_rxq_next_desc_get(rxq);
++ u32 rx_status, index;
+ unsigned char *data;
+ struct page *page;
+- dma_addr_t phys_addr;
+- u32 rx_status, index;
+- int rx_bytes, skb_size, copy_size;
+- int frag_num, frag_size, frag_offset;
+
+ index = rx_desc - rxq->descs;
+ page = (struct page *)rxq->buf_virt_addr[index];
+@@ -1991,100 +2067,30 @@ static int mvneta_rx_swbm(struct napi_st
+ /* Prefetch header */
+ prefetch(data);
+
+- phys_addr = rx_desc->buf_phys_addr;
+ rx_status = rx_desc->status;
+ rx_proc++;
+ rxq->refill_num++;
+
+ if (rx_status & MVNETA_RXD_FIRST_DESC) {
++ int err;
++
+ /* Check errors only for FIRST descriptor */
+ if (rx_status & MVNETA_RXD_ERR_SUMMARY) {
+ mvneta_rx_error(pp, rx_desc);
+ /* leave the descriptor untouched */
+ continue;
+ }
+- rx_bytes = rx_desc->data_size -
+- (ETH_FCS_LEN + MVNETA_MH_SIZE);
+-
+- /* Allocate small skb for each new packet */
+- skb_size = max(rx_copybreak, rx_header_size);
+- rxq->skb = netdev_alloc_skb_ip_align(dev, skb_size);
+- if (unlikely(!rxq->skb)) {
+- struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
+-
+- netdev_err(dev,
+- "Can't allocate skb on queue %d\n",
+- rxq->id);
+-
+- rxq->skb_alloc_err++;
+
+- u64_stats_update_begin(&stats->syncp);
+- stats->rx_dropped++;
+- u64_stats_update_end(&stats->syncp);
++ err = mvneta_swbm_rx_frame(pp, rx_desc, rxq, page);
++ if (err)
+ continue;
+- }
+- copy_size = min(skb_size, rx_bytes);
+-
+- /* Copy data from buffer to SKB, skip Marvell header */
+- memcpy(rxq->skb->data, data + MVNETA_MH_SIZE,
+- copy_size);
+- skb_put(rxq->skb, copy_size);
+- rxq->left_size = rx_bytes - copy_size;
+-
+- mvneta_rx_csum(pp, rx_status, rxq->skb);
+- if (rxq->left_size == 0) {
+- int size = copy_size + MVNETA_MH_SIZE;
+-
+- dma_sync_single_range_for_cpu(dev->dev.parent,
+- phys_addr, 0,
+- size,
+- DMA_FROM_DEVICE);
+-
+- /* leave the descriptor and buffer untouched */
+- } else {
+- /* refill descriptor with new buffer later */
+- rx_desc->buf_phys_addr = 0;
+-
+- frag_num = 0;
+- frag_offset = copy_size + MVNETA_MH_SIZE;
+- frag_size = min(rxq->left_size,
+- (int)(PAGE_SIZE - frag_offset));
+- skb_add_rx_frag(rxq->skb, frag_num, page,
+- frag_offset, frag_size,
+- PAGE_SIZE);
+- page_pool_release_page(rxq->page_pool, page);
+- rxq->left_size -= frag_size;
+- }
+ } else {
+- /* Middle or Last descriptor */
+ if (unlikely(!rxq->skb)) {
+ pr_debug("no skb for rx_status 0x%x\n",
+ rx_status);
+ continue;
+ }
+- if (!rxq->left_size) {
+- /* last descriptor has only FCS */
+- /* and can be discarded */
+- dma_sync_single_range_for_cpu(dev->dev.parent,
+- phys_addr, 0,
+- ETH_FCS_LEN,
+- DMA_FROM_DEVICE);
+- /* leave the descriptor and buffer untouched */
+- } else {
+- /* refill descriptor with new buffer later */
+- rx_desc->buf_phys_addr = 0;
+-
+- frag_num = skb_shinfo(rxq->skb)->nr_frags;
+- frag_offset = 0;
+- frag_size = min(rxq->left_size,
+- (int)(PAGE_SIZE - frag_offset));
+- skb_add_rx_frag(rxq->skb, frag_num, page,
+- frag_offset, frag_size,
+- PAGE_SIZE);
+-
+- page_pool_release_page(rxq->page_pool, page);
+- rxq->left_size -= frag_size;
+- }
++ mvneta_swbm_add_rx_fragment(pp, rx_desc, rxq, page);
+ } /* Middle or Last descriptor */
+
+ if (!(rx_status & MVNETA_RXD_LAST_DESC))
+@@ -2109,7 +2115,6 @@ static int mvneta_rx_swbm(struct napi_st
+
+ /* clean uncomplete skb pointer in queue */
+ rxq->skb = NULL;
+- rxq->left_size = 0;
+ }
+
+ if (rcvd_pkts)
+@@ -2970,7 +2975,7 @@ static void mvneta_rxq_hw_init(struct mv
+ /* Set Offset */
+ mvneta_rxq_offset_set(pp, rxq, 0);
+ mvneta_rxq_buf_size_set(pp, rxq, PAGE_SIZE < SZ_64K ?
+- PAGE_SIZE :
++ MVNETA_MAX_RX_BUF_SIZE :
+ MVNETA_RX_BUF_SIZE(pp->pkt_size));
+ mvneta_rxq_bm_disable(pp, rxq);
+ mvneta_rxq_fill(pp, rxq, rxq->size);
+@@ -4708,7 +4713,7 @@ static int mvneta_probe(struct platform_
+ SET_NETDEV_DEV(dev, &pdev->dev);
+
+ pp->id = global_port_id++;
+- pp->rx_offset_correction = 0; /* not relevant for SW BM */
++ pp->rx_offset_correction = NET_SKB_PAD;
+
+ /* Obtain access to BM resources if enabled and already initialized */
+ bm_node = of_parse_phandle(dn, "buffer-manager", 0);