aboutsummaryrefslogtreecommitdiffstats
path: root/target/linux/bcm27xx/patches-5.4/950-0631-arm64-mm-reserve-CMA-and-crashkernel-in-ZONE_DMA32.patch
diff options
context:
space:
mode:
Diffstat (limited to 'target/linux/bcm27xx/patches-5.4/950-0631-arm64-mm-reserve-CMA-and-crashkernel-in-ZONE_DMA32.patch')
-rw-r--r--target/linux/bcm27xx/patches-5.4/950-0631-arm64-mm-reserve-CMA-and-crashkernel-in-ZONE_DMA32.patch44
1 files changed, 44 insertions, 0 deletions
diff --git a/target/linux/bcm27xx/patches-5.4/950-0631-arm64-mm-reserve-CMA-and-crashkernel-in-ZONE_DMA32.patch b/target/linux/bcm27xx/patches-5.4/950-0631-arm64-mm-reserve-CMA-and-crashkernel-in-ZONE_DMA32.patch
new file mode 100644
index 0000000000..137a2fa4a0
--- /dev/null
+++ b/target/linux/bcm27xx/patches-5.4/950-0631-arm64-mm-reserve-CMA-and-crashkernel-in-ZONE_DMA32.patch
@@ -0,0 +1,44 @@
+From d4cf092a0e923361f521e1bc7d1fbfb1907958b3 Mon Sep 17 00:00:00 2001
+From: Nicolas Saenz Julienne <nsaenzjulienne@suse.de>
+Date: Thu, 7 Nov 2019 10:56:11 +0100
+Subject: [PATCH] arm64: mm: reserve CMA and crashkernel in ZONE_DMA32
+
+commit bff3b04460a80f425442fe8e5c6ee8c3ebef611f upstream.
+
+With the introduction of ZONE_DMA in arm64 we moved the default CMA and
+crashkernel reservation into that area. This caused a regression on big
+machines that need big CMA and crashkernel reservations. Note that
+ZONE_DMA is only 1GB big.
+
+Restore the previous behavior as the wide majority of devices are OK
+with reserving these in ZONE_DMA32. The ones that need them in ZONE_DMA
+will configure it explicitly.
+
+Fixes: 1a8e1cef7603 ("arm64: use both ZONE_DMA and ZONE_DMA32")
+Reported-by: Qian Cai <cai@lca.pw>
+Signed-off-by: Nicolas Saenz Julienne <nsaenzjulienne@suse.de>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+---
+ arch/arm64/mm/init.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/arch/arm64/mm/init.c
++++ b/arch/arm64/mm/init.c
+@@ -91,7 +91,7 @@ static void __init reserve_crashkernel(v
+
+ if (crash_base == 0) {
+ /* Current arm64 boot protocol requires 2MB alignment */
+- crash_base = memblock_find_in_range(0, ARCH_LOW_ADDRESS_LIMIT,
++ crash_base = memblock_find_in_range(0, arm64_dma32_phys_limit,
+ crash_size, SZ_2M);
+ if (crash_base == 0) {
+ pr_warn("cannot allocate crashkernel (size:0x%llx)\n",
+@@ -459,7 +459,7 @@ void __init arm64_memblock_init(void)
+
+ high_memory = __va(memblock_end_of_DRAM() - 1) + 1;
+
+- dma_contiguous_reserve(arm64_dma_phys_limit ? : arm64_dma32_phys_limit);
++ dma_contiguous_reserve(arm64_dma32_phys_limit);
+ }
+
+ void __init bootmem_init(void)