aboutsummaryrefslogtreecommitdiffstats
path: root/target/linux/generic/backport-5.15/729-16-v6.3-net-ethernet-mtk_wed-get-rid-of-queue-lock-for-rx-qu.patch
diff options
context:
space:
mode:
Diffstat (limited to 'target/linux/generic/backport-5.15/729-16-v6.3-net-ethernet-mtk_wed-get-rid-of-queue-lock-for-rx-qu.patch')
-rw-r--r--target/linux/generic/backport-5.15/729-16-v6.3-net-ethernet-mtk_wed-get-rid-of-queue-lock-for-rx-qu.patch52
1 files changed, 52 insertions, 0 deletions
diff --git a/target/linux/generic/backport-5.15/729-16-v6.3-net-ethernet-mtk_wed-get-rid-of-queue-lock-for-rx-qu.patch b/target/linux/generic/backport-5.15/729-16-v6.3-net-ethernet-mtk_wed-get-rid-of-queue-lock-for-rx-qu.patch
new file mode 100644
index 0000000000..fa6f56dbe7
--- /dev/null
+++ b/target/linux/generic/backport-5.15/729-16-v6.3-net-ethernet-mtk_wed-get-rid-of-queue-lock-for-rx-qu.patch
@@ -0,0 +1,52 @@
+From: Lorenzo Bianconi <lorenzo@kernel.org>
+Date: Tue, 10 Jan 2023 10:31:26 +0100
+Subject: [PATCH] net: ethernet: mtk_wed: get rid of queue lock for rx queue
+
+Queue spinlock is currently held in mtk_wed_wo_queue_rx_clean and
+mtk_wed_wo_queue_refill routines for MTK Wireless Ethernet Dispatcher
+MCU rx queue. mtk_wed_wo_queue_refill() is running during initialization
+and in rx tasklet while mtk_wed_wo_queue_rx_clean() is running in
+mtk_wed_wo_hw_deinit() during hw de-init phase after rx tasklet has been
+disabled. Since mtk_wed_wo_queue_rx_clean and mtk_wed_wo_queue_refill
+routines can't run concurrently get rid of spinlock for mcu rx queue.
+
+Reviewed-by: Alexander Duyck <alexanderduyck@fb.com>
+Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
+Link: https://lore.kernel.org/r/36ec3b729542ea60898471d890796f745479ba32.1673342990.git.lorenzo@kernel.org
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+---
+
+--- a/drivers/net/ethernet/mediatek/mtk_wed_wo.c
++++ b/drivers/net/ethernet/mediatek/mtk_wed_wo.c
+@@ -138,7 +138,6 @@ mtk_wed_wo_queue_refill(struct mtk_wed_w
+ enum dma_data_direction dir = rx ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
+ int n_buf = 0;
+
+- spin_lock_bh(&q->lock);
+ while (q->queued < q->n_desc) {
+ struct mtk_wed_wo_queue_entry *entry;
+ dma_addr_t addr;
+@@ -172,7 +171,6 @@ mtk_wed_wo_queue_refill(struct mtk_wed_w
+ q->queued++;
+ n_buf++;
+ }
+- spin_unlock_bh(&q->lock);
+
+ return n_buf;
+ }
+@@ -316,7 +314,6 @@ mtk_wed_wo_queue_rx_clean(struct mtk_wed
+ {
+ struct page *page;
+
+- spin_lock_bh(&q->lock);
+ for (;;) {
+ void *buf = mtk_wed_wo_dequeue(wo, q, NULL, true);
+
+@@ -325,7 +322,6 @@ mtk_wed_wo_queue_rx_clean(struct mtk_wed
+
+ skb_free_frag(buf);
+ }
+- spin_unlock_bh(&q->lock);
+
+ if (!q->cache.va)
+ return;