aboutsummaryrefslogtreecommitdiffstats
path: root/target/linux/brcm47xx/patches-4.1/160-kmap_coherent.patch
diff options
context:
space:
mode:
Diffstat (limited to 'target/linux/brcm47xx/patches-4.1/160-kmap_coherent.patch')
-rw-r--r--target/linux/brcm47xx/patches-4.1/160-kmap_coherent.patch70
1 files changed, 70 insertions, 0 deletions
diff --git a/target/linux/brcm47xx/patches-4.1/160-kmap_coherent.patch b/target/linux/brcm47xx/patches-4.1/160-kmap_coherent.patch
new file mode 100644
index 0000000000..a78bfe85ff
--- /dev/null
+++ b/target/linux/brcm47xx/patches-4.1/160-kmap_coherent.patch
@@ -0,0 +1,70 @@
+--- a/arch/mips/include/asm/cpu-features.h
++++ b/arch/mips/include/asm/cpu-features.h
+@@ -158,6 +158,9 @@
+ #ifndef cpu_has_local_ebase
+ #define cpu_has_local_ebase 1
+ #endif
++#ifndef cpu_use_kmap_coherent
++#define cpu_use_kmap_coherent 1
++#endif
+
+ /*
+ * I-Cache snoops remote store. This only matters on SMP. Some multiprocessors
+--- a/arch/mips/include/asm/mach-bcm47xx/cpu-feature-overrides.h
++++ b/arch/mips/include/asm/mach-bcm47xx/cpu-feature-overrides.h
+@@ -79,4 +79,6 @@
+ #define cpu_scache_line_size() 0
+ #define cpu_has_vz 0
+
++#define cpu_use_kmap_coherent 0
++
+ #endif /* __ASM_MACH_BCM47XX_CPU_FEATURE_OVERRIDES_H */
+--- a/arch/mips/mm/c-r4k.c
++++ b/arch/mips/mm/c-r4k.c
+@@ -592,7 +592,7 @@ static inline void local_r4k_flush_cache
+ */
+ map_coherent = (cpu_has_dc_aliases &&
+ page_mapped(page) && !Page_dcache_dirty(page));
+- if (map_coherent)
++ if (map_coherent && cpu_use_kmap_coherent)
+ vaddr = kmap_coherent(page, addr);
+ else
+ vaddr = kmap_atomic(page);
+@@ -617,7 +617,7 @@ static inline void local_r4k_flush_cache
+ }
+
+ if (vaddr) {
+- if (map_coherent)
++ if (map_coherent && cpu_use_kmap_coherent)
+ kunmap_coherent();
+ else
+ kunmap_atomic(vaddr);
+--- a/arch/mips/mm/init.c
++++ b/arch/mips/mm/init.c
+@@ -160,7 +160,7 @@ void copy_user_highpage(struct page *to,
+ void *vfrom, *vto;
+
+ vto = kmap_atomic(to);
+- if (cpu_has_dc_aliases &&
++ if (cpu_has_dc_aliases && cpu_use_kmap_coherent &&
+ page_mapped(from) && !Page_dcache_dirty(from)) {
+ vfrom = kmap_coherent(from, vaddr);
+ copy_page(vto, vfrom);
+@@ -182,7 +182,7 @@ void copy_to_user_page(struct vm_area_st
+ struct page *page, unsigned long vaddr, void *dst, const void *src,
+ unsigned long len)
+ {
+- if (cpu_has_dc_aliases &&
++ if (cpu_has_dc_aliases && cpu_use_kmap_coherent &&
+ page_mapped(page) && !Page_dcache_dirty(page)) {
+ void *vto = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
+ memcpy(vto, src, len);
+@@ -200,7 +200,7 @@ void copy_from_user_page(struct vm_area_
+ struct page *page, unsigned long vaddr, void *dst, const void *src,
+ unsigned long len)
+ {
+- if (cpu_has_dc_aliases &&
++ if (cpu_has_dc_aliases && cpu_use_kmap_coherent &&
+ page_mapped(page) && !Page_dcache_dirty(page)) {
+ void *vfrom = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
+ memcpy(dst, vfrom, len);