aboutsummaryrefslogtreecommitdiffstats
path: root/target/linux/brcm2708/patches-4.19/950-0001-arm-partially-revert-702b94bff3c50542a6e4ab9a4f4cef0.patch
blob: 618330d1c0a700025990814bb60c16a6b14943c0 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
From 623cca7c1ef73b6b164d487d3bb3b2bddc60abab Mon Sep 17 00:00:00 2001
From: Dan Pasanen <dan.pasanen@gmail.com>
Date: Thu, 21 Sep 2017 09:55:42 -0500
Subject: [PATCH 001/725] arm: partially revert
 702b94bff3c50542a6e4ab9a4f4cef093262fe65

* Re-expose some dmi APIs for use in VCSM
---
 arch/arm/include/asm/cacheflush.h | 21 +++++++++++++++++++++
 arch/arm/include/asm/glue-cache.h |  2 ++
 arch/arm/mm/proc-macros.S         |  2 ++
 arch/arm/mm/proc-syms.c           |  3 +++
 4 files changed, 28 insertions(+)

--- a/arch/arm/include/asm/cacheflush.h
+++ b/arch/arm/include/asm/cacheflush.h
@@ -94,6 +94,21 @@
  *	DMA Cache Coherency
  *	===================
  *
+ *	dma_inv_range(start, end)
+ *
+ *		Invalidate (discard) the specified virtual address range.
+ *		May not write back any entries.  If 'start' or 'end'
+ *		are not cache line aligned, those lines must be written
+ *		back.
+ *		- start  - virtual start address
+ *		- end    - virtual end address
+ *
+ *	dma_clean_range(start, end)
+ *
+ *		Clean (write back) the specified virtual address range.
+ *		- start  - virtual start address
+ *		- end    - virtual end address
+ *
  *	dma_flush_range(start, end)
  *
  *		Clean and invalidate the specified virtual address range.
@@ -115,6 +130,8 @@ struct cpu_cache_fns {
 	void (*dma_map_area)(const void *, size_t, int);
 	void (*dma_unmap_area)(const void *, size_t, int);
 
+	void (*dma_inv_range)(const void *, const void *);
+	void (*dma_clean_range)(const void *, const void *);
 	void (*dma_flush_range)(const void *, const void *);
 } __no_randomize_layout;
 
@@ -140,6 +157,8 @@ extern struct cpu_cache_fns cpu_cache;
  * is visible to DMA, or data written by DMA to system memory is
  * visible to the CPU.
  */
+#define dmac_inv_range			cpu_cache.dma_inv_range
+#define dmac_clean_range		cpu_cache.dma_clean_range
 #define dmac_flush_range		cpu_cache.dma_flush_range
 
 #else
@@ -159,6 +178,8 @@ extern void __cpuc_flush_dcache_area(voi
  * is visible to DMA, or data written by DMA to system memory is
  * visible to the CPU.
  */
+extern void dmac_inv_range(const void *, const void *);
+extern void dmac_clean_range(const void *, const void *);
 extern void dmac_flush_range(const void *, const void *);
 
 #endif
--- a/arch/arm/include/asm/glue-cache.h
+++ b/arch/arm/include/asm/glue-cache.h
@@ -158,6 +158,8 @@ static inline void nop_dma_unmap_area(co
 #define __cpuc_coherent_user_range	__glue(_CACHE,_coherent_user_range)
 #define __cpuc_flush_dcache_area	__glue(_CACHE,_flush_kern_dcache_area)
 
+#define dmac_inv_range			__glue(_CACHE,_dma_inv_range)
+#define dmac_clean_range		__glue(_CACHE,_dma_clean_range)
 #define dmac_flush_range		__glue(_CACHE,_dma_flush_range)
 #endif
 
--- a/arch/arm/mm/proc-macros.S
+++ b/arch/arm/mm/proc-macros.S
@@ -335,6 +335,8 @@ ENTRY(\name\()_cache_fns)
 	.long	\name\()_flush_kern_dcache_area
 	.long	\name\()_dma_map_area
 	.long	\name\()_dma_unmap_area
+	.long	\name\()_dma_inv_range
+	.long	\name\()_dma_clean_range
 	.long	\name\()_dma_flush_range
 	.size	\name\()_cache_fns, . - \name\()_cache_fns
 .endm
--- a/arch/arm/mm/proc-syms.c
+++ b/arch/arm/mm/proc-syms.c
@@ -30,6 +30,9 @@ EXPORT_SYMBOL(__cpuc_flush_user_all);
 EXPORT_SYMBOL(__cpuc_flush_user_range);
 EXPORT_SYMBOL(__cpuc_coherent_kern_range);
 EXPORT_SYMBOL(__cpuc_flush_dcache_area);
+EXPORT_SYMBOL(dmac_inv_range);
+EXPORT_SYMBOL(dmac_clean_range);
+EXPORT_SYMBOL(dmac_flush_range);
 #else
 EXPORT_SYMBOL(cpu_cache);
 #endif