aboutsummaryrefslogtreecommitdiffstats
path: root/target/linux/bcm27xx/patches-5.15/950-0025-arm-partially-revert-702b94bff3c50542a6e4ab9a4f4cef0.patch
blob: 72ad5673893606a0c20c1b91c5a38b8513a3fe28 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
From 08a1fec2fe9884288efd30ca814ae51fe84f842b Mon Sep 17 00:00:00 2001
From: Dan Pasanen <dan.pasanen@gmail.com>
Date: Thu, 21 Sep 2017 09:55:42 -0500
Subject: [PATCH] arm: partially revert
 702b94bff3c50542a6e4ab9a4f4cef093262fe65

* Re-expose some dmi APIs for use in VCSM
---
 arch/arm/include/asm/cacheflush.h | 21 +++++++++++++++++++++
 arch/arm/include/asm/glue-cache.h |  2 ++
 arch/arm/mm/proc-macros.S         |  2 ++
 arch/arm/mm/proc-syms.c           |  3 +++
 4 files changed, 28 insertions(+)

--- a/arch/arm/include/asm/cacheflush.h
+++ b/arch/arm/include/asm/cacheflush.h
@@ -91,6 +91,21 @@
  *	DMA Cache Coherency
  *	===================
  *
+ *	dma_inv_range(start, end)
+ *
+ *		Invalidate (discard) the specified virtual address range.
+ *		May not write back any entries.  If 'start' or 'end'
+ *		are not cache line aligned, those lines must be written
+ *		back.
+ *		- start  - virtual start address
+ *		- end    - virtual end address
+ *
+ *	dma_clean_range(start, end)
+ *
+ *		Clean (write back) the specified virtual address range.
+ *		- start  - virtual start address
+ *		- end    - virtual end address
+ *
  *	dma_flush_range(start, end)
  *
  *		Clean and invalidate the specified virtual address range.
@@ -112,6 +127,8 @@ struct cpu_cache_fns {
 	void (*dma_map_area)(const void *, size_t, int);
 	void (*dma_unmap_area)(const void *, size_t, int);
 
+	void (*dma_inv_range)(const void *, const void *);
+	void (*dma_clean_range)(const void *, const void *);
 	void (*dma_flush_range)(const void *, const void *);
 } __no_randomize_layout;
 
@@ -137,6 +154,8 @@ extern struct cpu_cache_fns cpu_cache;
  * is visible to DMA, or data written by DMA to system memory is
  * visible to the CPU.
  */
+#define dmac_inv_range			cpu_cache.dma_inv_range
+#define dmac_clean_range		cpu_cache.dma_clean_range
 #define dmac_flush_range		cpu_cache.dma_flush_range
 
 #else
@@ -156,6 +175,8 @@ extern void __cpuc_flush_dcache_area(voi
  * is visible to DMA, or data written by DMA to system memory is
  * visible to the CPU.
  */
+extern void dmac_inv_range(const void *, const void *);
+extern void dmac_clean_range(const void *, const void *);
 extern void dmac_flush_range(const void *, const void *);
 
 #endif
--- a/arch/arm/include/asm/glue-cache.h
+++ b/arch/arm/include/asm/glue-cache.h
@@ -155,6 +155,8 @@ static inline void nop_dma_unmap_area(co
 #define __cpuc_coherent_user_range	__glue(_CACHE,_coherent_user_range)
 #define __cpuc_flush_dcache_area	__glue(_CACHE,_flush_kern_dcache_area)
 
+#define dmac_inv_range			__glue(_CACHE,_dma_inv_range)
+#define dmac_clean_range		__glue(_CACHE,_dma_clean_range)
 #define dmac_flush_range		__glue(_CACHE,_dma_flush_range)
 #endif
 
--- a/arch/arm/mm/proc-macros.S
+++ b/arch/arm/mm/proc-macros.S
@@ -334,6 +334,8 @@ ENTRY(\name\()_cache_fns)
 	.long	\name\()_flush_kern_dcache_area
 	.long	\name\()_dma_map_area
 	.long	\name\()_dma_unmap_area
+	.long	\name\()_dma_inv_range
+	.long	\name\()_dma_clean_range
 	.long	\name\()_dma_flush_range
 	.size	\name\()_cache_fns, . - \name\()_cache_fns
 .endm
--- a/arch/arm/mm/proc-syms.c
+++ b/arch/arm/mm/proc-syms.c
@@ -27,6 +27,9 @@ EXPORT_SYMBOL(__cpuc_flush_user_all);
 EXPORT_SYMBOL(__cpuc_flush_user_range);
 EXPORT_SYMBOL(__cpuc_coherent_kern_range);
 EXPORT_SYMBOL(__cpuc_flush_dcache_area);
+EXPORT_SYMBOL(dmac_inv_range);
+EXPORT_SYMBOL(dmac_clean_range);
+EXPORT_SYMBOL(dmac_flush_range);
 #else
 EXPORT_SYMBOL(cpu_cache);
 #endif