aboutsummaryrefslogtreecommitdiffstats
path: root/target/linux/bcm27xx/patches-5.4/950-0435-arm64-rename-variables-used-to-calculate-ZONE_DMA32-.patch
diff options
context:
space:
mode:
Diffstat (limited to 'target/linux/bcm27xx/patches-5.4/950-0435-arm64-rename-variables-used-to-calculate-ZONE_DMA32-.patch')
-rw-r--r--target/linux/bcm27xx/patches-5.4/950-0435-arm64-rename-variables-used-to-calculate-ZONE_DMA32-.patch117
1 files changed, 117 insertions, 0 deletions
diff --git a/target/linux/bcm27xx/patches-5.4/950-0435-arm64-rename-variables-used-to-calculate-ZONE_DMA32-.patch b/target/linux/bcm27xx/patches-5.4/950-0435-arm64-rename-variables-used-to-calculate-ZONE_DMA32-.patch
new file mode 100644
index 0000000000..df184f53ee
--- /dev/null
+++ b/target/linux/bcm27xx/patches-5.4/950-0435-arm64-rename-variables-used-to-calculate-ZONE_DMA32-.patch
@@ -0,0 +1,117 @@
+From 4d2bd7f66bac81b042afc2a6e742bd776a5a3938 Mon Sep 17 00:00:00 2001
+From: Nicolas Saenz Julienne <nsaenzjulienne@suse.de>
+Date: Wed, 11 Sep 2019 20:25:44 +0200
+Subject: [PATCH] arm64: rename variables used to calculate
+ ZONE_DMA32's size
+
+commit a573cdd7973dedd87e62196c400332896bb236c8 upstream.
+
+Let the name indicate that they are used to calculate ZONE_DMA32's size
+as opposed to ZONE_DMA.
+
+Signed-off-by: Nicolas Saenz Julienne <nsaenzjulienne@suse.de>
+Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+---
+ arch/arm64/mm/init.c | 30 +++++++++++++++---------------
+ 1 file changed, 15 insertions(+), 15 deletions(-)
+
+--- a/arch/arm64/mm/init.c
++++ b/arch/arm64/mm/init.c
+@@ -50,7 +50,7 @@
+ s64 memstart_addr __ro_after_init = -1;
+ EXPORT_SYMBOL(memstart_addr);
+
+-phys_addr_t arm64_dma_phys_limit __ro_after_init;
++phys_addr_t arm64_dma32_phys_limit __ro_after_init;
+
+ #ifdef CONFIG_KEXEC_CORE
+ /*
+@@ -168,7 +168,7 @@ static void __init reserve_elfcorehdr(vo
+ * currently assumes that for memory starting above 4G, 32-bit devices will
+ * use a DMA offset.
+ */
+-static phys_addr_t __init max_zone_dma_phys(void)
++static phys_addr_t __init max_zone_dma32_phys(void)
+ {
+ phys_addr_t offset = memblock_start_of_DRAM() & GENMASK_ULL(63, 32);
+ return min(offset + (1ULL << 32), memblock_end_of_DRAM());
+@@ -181,7 +181,7 @@ static void __init zone_sizes_init(unsig
+ unsigned long max_zone_pfns[MAX_NR_ZONES] = {0};
+
+ #ifdef CONFIG_ZONE_DMA32
+- max_zone_pfns[ZONE_DMA32] = PFN_DOWN(arm64_dma_phys_limit);
++ max_zone_pfns[ZONE_DMA32] = PFN_DOWN(arm64_dma32_phys_limit);
+ #endif
+ max_zone_pfns[ZONE_NORMAL] = max;
+
+@@ -194,16 +194,16 @@ static void __init zone_sizes_init(unsig
+ {
+ struct memblock_region *reg;
+ unsigned long zone_size[MAX_NR_ZONES], zhole_size[MAX_NR_ZONES];
+- unsigned long max_dma = min;
++ unsigned long max_dma32 = min;
+
+ memset(zone_size, 0, sizeof(zone_size));
+
+ /* 4GB maximum for 32-bit only capable devices */
+ #ifdef CONFIG_ZONE_DMA32
+- max_dma = PFN_DOWN(arm64_dma_phys_limit);
+- zone_size[ZONE_DMA32] = max_dma - min;
++ max_dma32 = PFN_DOWN(arm64_dma32_phys_limit);
++ zone_size[ZONE_DMA32] = max_dma32 - min;
+ #endif
+- zone_size[ZONE_NORMAL] = max - max_dma;
++ zone_size[ZONE_NORMAL] = max - max_dma32;
+
+ memcpy(zhole_size, zone_size, sizeof(zhole_size));
+
+@@ -215,14 +215,14 @@ static void __init zone_sizes_init(unsig
+ continue;
+
+ #ifdef CONFIG_ZONE_DMA32
+- if (start < max_dma) {
+- unsigned long dma_end = min(end, max_dma);
++ if (start < max_dma32) {
++ unsigned long dma_end = min(end, max_dma32);
+ zhole_size[ZONE_DMA32] -= dma_end - start;
+ }
+ #endif
+- if (end > max_dma) {
++ if (end > max_dma32) {
+ unsigned long normal_end = min(end, max);
+- unsigned long normal_start = max(start, max_dma);
++ unsigned long normal_start = max(start, max_dma32);
+ zhole_size[ZONE_NORMAL] -= normal_end - normal_start;
+ }
+ }
+@@ -410,9 +410,9 @@ void __init arm64_memblock_init(void)
+
+ /* 4GB maximum for 32-bit only capable devices */
+ if (IS_ENABLED(CONFIG_ZONE_DMA32))
+- arm64_dma_phys_limit = max_zone_dma_phys();
++ arm64_dma32_phys_limit = max_zone_dma32_phys();
+ else
+- arm64_dma_phys_limit = PHYS_MASK + 1;
++ arm64_dma32_phys_limit = PHYS_MASK + 1;
+
+ reserve_crashkernel();
+
+@@ -420,7 +420,7 @@ void __init arm64_memblock_init(void)
+
+ high_memory = __va(memblock_end_of_DRAM() - 1) + 1;
+
+- dma_contiguous_reserve(arm64_dma_phys_limit);
++ dma_contiguous_reserve(arm64_dma32_phys_limit);
+ }
+
+ void __init bootmem_init(void)
+@@ -524,7 +524,7 @@ static void __init free_unused_memmap(vo
+ void __init mem_init(void)
+ {
+ if (swiotlb_force == SWIOTLB_FORCE ||
+- max_pfn > (arm64_dma_phys_limit >> PAGE_SHIFT))
++ max_pfn > (arm64_dma32_phys_limit >> PAGE_SHIFT))
+ swiotlb_init(1);
+ else
+ swiotlb_force = SWIOTLB_NO_FORCE;