aboutsummaryrefslogtreecommitdiffstats
path: root/target/linux/lantiq/patches-4.9/0170-MIPS-lantiq-lock-DMA-register-accesses-for-SMP.patch
blob: 234a2527fca4e1955c6fbc2b9c7cd3c7b9a9bb0b (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
From 58078a30038b578c26c532545448fe3746648390 Mon Sep 17 00:00:00 2001
From: Hauke Mehrtens <hauke@hauke-m.de>
Date: Thu, 29 Dec 2016 21:02:57 +0100
Subject: [PATCH] MIPS: lantiq: lock DMA register accesses for SMP

The DMA controller channel and port configuration is changed by
selecting the port or channel in one register and then update the
configuration in other registers. This has to be done in an atomic
operation. Previously only the local interrupts were deactivated which
works for single CPU systems. If the system supports SMP a better
locking is needed, use spinlocks instead.
On more recent SoCs (at least xrx200 and later) there are two memory
regions to change the configuration, there we could use one area for
each CPU and do not have to synchronize between the CPUs and more.

Signed-off-by: Hauke Mehrtens <hauke@hauke-m.de>
---
 arch/mips/lantiq/xway/dma.c | 38 ++++++++++++++++++++------------------
 1 file changed, 20 insertions(+), 18 deletions(-)

--- a/arch/mips/lantiq/xway/dma.c
+++ b/arch/mips/lantiq/xway/dma.c
@@ -20,6 +20,7 @@
 #include <linux/io.h>
 #include <linux/dma-mapping.h>
 #include <linux/module.h>
+#include <linux/spinlock.h>
 #include <linux/clk.h>
 #include <linux/err.h>
 
@@ -59,16 +60,17 @@
 						ltq_dma_membase + (z))
 
 static void __iomem *ltq_dma_membase;
+static DEFINE_SPINLOCK(ltq_dma_lock);
 
 void
 ltq_dma_enable_irq(struct ltq_dma_channel *ch)
 {
 	unsigned long flags;
 
-	local_irq_save(flags);
+	spin_lock_irqsave(&ltq_dma_lock, flags);
 	ltq_dma_w32(ch->nr, LTQ_DMA_CS);
 	ltq_dma_w32_mask(0, 1 << ch->nr, LTQ_DMA_IRNEN);
-	local_irq_restore(flags);
+	spin_unlock_irqrestore(&ltq_dma_lock, flags);
 }
 EXPORT_SYMBOL_GPL(ltq_dma_enable_irq);
 
@@ -77,10 +79,10 @@ ltq_dma_disable_irq(struct ltq_dma_chann
 {
 	unsigned long flags;
 
-	local_irq_save(flags);
+	spin_lock_irqsave(&ltq_dma_lock, flags);
 	ltq_dma_w32(ch->nr, LTQ_DMA_CS);
 	ltq_dma_w32_mask(1 << ch->nr, 0, LTQ_DMA_IRNEN);
-	local_irq_restore(flags);
+	spin_unlock_irqrestore(&ltq_dma_lock, flags);
 }
 EXPORT_SYMBOL_GPL(ltq_dma_disable_irq);
 
@@ -89,10 +91,10 @@ ltq_dma_ack_irq(struct ltq_dma_channel *
 {
 	unsigned long flags;
 
-	local_irq_save(flags);
+	spin_lock_irqsave(&ltq_dma_lock, flags);
 	ltq_dma_w32(ch->nr, LTQ_DMA_CS);
 	ltq_dma_w32(DMA_IRQ_ACK, LTQ_DMA_CIS);
-	local_irq_restore(flags);
+	spin_unlock_irqrestore(&ltq_dma_lock, flags);
 }
 EXPORT_SYMBOL_GPL(ltq_dma_ack_irq);
 
@@ -101,11 +103,11 @@ ltq_dma_open(struct ltq_dma_channel *ch)
 {
 	unsigned long flag;
 
-	local_irq_save(flag);
+	spin_lock_irqsave(&ltq_dma_lock, flag);
 	ltq_dma_w32(ch->nr, LTQ_DMA_CS);
 	ltq_dma_w32_mask(0, DMA_CHAN_ON, LTQ_DMA_CCTRL);
-	ltq_dma_enable_irq(ch);
-	local_irq_restore(flag);
+	ltq_dma_w32_mask(0, 1 << ch->nr, LTQ_DMA_IRNEN);
+	spin_unlock_irqrestore(&ltq_dma_lock, flag);
 }
 EXPORT_SYMBOL_GPL(ltq_dma_open);
 
@@ -114,11 +116,11 @@ ltq_dma_close(struct ltq_dma_channel *ch
 {
 	unsigned long flag;
 
-	local_irq_save(flag);
+	spin_lock_irqsave(&ltq_dma_lock, flag);
 	ltq_dma_w32(ch->nr, LTQ_DMA_CS);
 	ltq_dma_w32_mask(DMA_CHAN_ON, 0, LTQ_DMA_CCTRL);
-	ltq_dma_disable_irq(ch);
-	local_irq_restore(flag);
+	ltq_dma_w32_mask(1 << ch->nr, 0, LTQ_DMA_IRNEN);
+	spin_unlock_irqrestore(&ltq_dma_lock, flag);
 }
 EXPORT_SYMBOL_GPL(ltq_dma_close);
 
@@ -133,7 +135,7 @@ ltq_dma_alloc(struct ltq_dma_channel *ch
 				&ch->phys, GFP_ATOMIC);
 	memset(ch->desc_base, 0, LTQ_DESC_NUM * LTQ_DESC_SIZE);
 
-	local_irq_save(flags);
+	spin_lock_irqsave(&ltq_dma_lock, flags);
 	ltq_dma_w32(ch->nr, LTQ_DMA_CS);
 	ltq_dma_w32(ch->phys, LTQ_DMA_CDBA);
 	ltq_dma_w32(LTQ_DESC_NUM, LTQ_DMA_CDLEN);
@@ -142,7 +144,7 @@ ltq_dma_alloc(struct ltq_dma_channel *ch
 	ltq_dma_w32_mask(0, DMA_CHAN_RST, LTQ_DMA_CCTRL);
 	while (ltq_dma_r32(LTQ_DMA_CCTRL) & DMA_CHAN_RST)
 		;
-	local_irq_restore(flags);
+	spin_unlock_irqrestore(&ltq_dma_lock, flags);
 }
 
 void
@@ -152,11 +154,11 @@ ltq_dma_alloc_tx(struct ltq_dma_channel
 
 	ltq_dma_alloc(ch);
 
-	local_irq_save(flags);
+	spin_lock_irqsave(&ltq_dma_lock, flags);
 	ltq_dma_w32(DMA_DESCPT, LTQ_DMA_CIE);
 	ltq_dma_w32_mask(0, 1 << ch->nr, LTQ_DMA_IRNEN);
 	ltq_dma_w32(DMA_WEIGHT | DMA_TX, LTQ_DMA_CCTRL);
-	local_irq_restore(flags);
+	spin_unlock_irqrestore(&ltq_dma_lock, flags);
 }
 EXPORT_SYMBOL_GPL(ltq_dma_alloc_tx);
 
@@ -167,11 +169,11 @@ ltq_dma_alloc_rx(struct ltq_dma_channel
 
 	ltq_dma_alloc(ch);
 
-	local_irq_save(flags);
+	spin_lock_irqsave(&ltq_dma_lock, flags);
 	ltq_dma_w32(DMA_DESCPT, LTQ_DMA_CIE);
 	ltq_dma_w32_mask(0, 1 << ch->nr, LTQ_DMA_IRNEN);
 	ltq_dma_w32(DMA_WEIGHT, LTQ_DMA_CCTRL);
-	local_irq_restore(flags);
+	spin_unlock_irqrestore(&ltq_dma_lock, flags);
 }
 EXPORT_SYMBOL_GPL(ltq_dma_alloc_rx);