diff options
Diffstat (limited to 'target/linux/layerscape/patches-4.4/9070-Revert-arm64-use-fixmap-region-for-permanent-FDT-map.patch')
-rw-r--r-- | target/linux/layerscape/patches-4.4/9070-Revert-arm64-use-fixmap-region-for-permanent-FDT-map.patch | 304 |
1 files changed, 304 insertions, 0 deletions
diff --git a/target/linux/layerscape/patches-4.4/9070-Revert-arm64-use-fixmap-region-for-permanent-FDT-map.patch b/target/linux/layerscape/patches-4.4/9070-Revert-arm64-use-fixmap-region-for-permanent-FDT-map.patch new file mode 100644 index 0000000000..c247db474a --- /dev/null +++ b/target/linux/layerscape/patches-4.4/9070-Revert-arm64-use-fixmap-region-for-permanent-FDT-map.patch @@ -0,0 +1,304 @@ +From 6f7a129e59721f6d97a0f06f7078d06f19ade69e Mon Sep 17 00:00:00 2001 +From: Yutang Jiang <yutang.jiang@nxp.com> +Date: Thu, 21 Jul 2016 19:37:42 +0800 +Subject: [PATCH 70/70] Revert "arm64: use fixmap region for permanent FDT + mapping" + +Signed-off-by: Yutang Jiang <yutang.jiang@nxp.com> +--- + Documentation/arm64/booting.txt | 10 +++---- + arch/arm64/include/asm/boot.h | 14 ---------- + arch/arm64/include/asm/fixmap.h | 15 ----------- + arch/arm64/include/asm/mmu.h | 1 - + arch/arm64/kernel/head.S | 39 ++++++++++++++++++++++++++- + arch/arm64/kernel/setup.c | 29 +++++++++++++------- + arch/arm64/mm/init.c | 1 + + arch/arm64/mm/mmu.c | 57 --------------------------------------- + 8 files changed, 62 insertions(+), 104 deletions(-) + delete mode 100644 arch/arm64/include/asm/boot.h + +--- a/Documentation/arm64/booting.txt ++++ b/Documentation/arm64/booting.txt +@@ -45,13 +45,11 @@ sees fit.) + + Requirement: MANDATORY + +-The device tree blob (dtb) must be placed on an 8-byte boundary and must +-not exceed 2 megabytes in size. Since the dtb will be mapped cacheable +-using blocks of up to 2 megabytes in size, it must not be placed within +-any 2M region which must be mapped with any specific attributes. ++The device tree blob (dtb) must be placed on an 8-byte boundary within ++the first 512 megabytes from the start of the kernel image and must not ++cross a 2-megabyte boundary. This is to allow the kernel to map the ++blob using a single section mapping in the initial page tables. + +-NOTE: versions prior to v4.2 also require that the DTB be placed within +-the 512 MB region starting at text_offset bytes below the kernel Image. + + 3. Decompress the kernel image + ------------------------------ +--- a/arch/arm64/include/asm/boot.h ++++ /dev/null +@@ -1,14 +0,0 @@ +- +-#ifndef __ASM_BOOT_H +-#define __ASM_BOOT_H +- +-#include <asm/sizes.h> +- +-/* +- * arm64 requires the DTB to be 8 byte aligned and +- * not exceed 2MB in size. +- */ +-#define MIN_FDT_ALIGN 8 +-#define MAX_FDT_SIZE SZ_2M +- +-#endif +--- a/arch/arm64/include/asm/fixmap.h ++++ b/arch/arm64/include/asm/fixmap.h +@@ -18,7 +18,6 @@ + #ifndef __ASSEMBLY__ + #include <linux/kernel.h> + #include <linux/sizes.h> +-#include <asm/boot.h> + #include <asm/page.h> + + /* +@@ -34,20 +33,6 @@ + */ + enum fixed_addresses { + FIX_HOLE, +- +- /* +- * Reserve a virtual window for the FDT that is 2 MB larger than the +- * maximum supported size, and put it at the top of the fixmap region. +- * The additional space ensures that any FDT that does not exceed +- * MAX_FDT_SIZE can be mapped regardless of whether it crosses any +- * 2 MB alignment boundaries. +- * +- * Keep this at the top so it remains 2 MB aligned. +- */ +-#define FIX_FDT_SIZE (MAX_FDT_SIZE + SZ_2M) +- FIX_FDT_END, +- FIX_FDT = FIX_FDT_END + FIX_FDT_SIZE / PAGE_SIZE - 1, +- + FIX_EARLYCON_MEM_BASE, + FIX_TEXT_POKE0, + __end_of_permanent_fixed_addresses, +--- a/arch/arm64/include/asm/mmu.h ++++ b/arch/arm64/include/asm/mmu.h +@@ -34,6 +34,5 @@ extern void init_mem_pgprot(void); + extern void create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys, + unsigned long virt, phys_addr_t size, + pgprot_t prot); +-extern void *fixmap_remap_fdt(phys_addr_t dt_phys); + + #endif +--- a/arch/arm64/kernel/head.S ++++ b/arch/arm64/kernel/head.S +@@ -212,6 +212,8 @@ ENTRY(stext) + bl el2_setup // Drop to EL1, w20=cpu_boot_mode + adrp x24, __PHYS_OFFSET + bl set_cpu_boot_mode_flag ++ ++ bl __vet_fdt + bl __create_page_tables // x25=TTBR0, x26=TTBR1 + /* + * The following calls CPU setup code, see arch/arm64/mm/proc.S for +@@ -243,6 +245,24 @@ preserve_boot_args: + ENDPROC(preserve_boot_args) + + /* ++ * Determine validity of the x21 FDT pointer. ++ * The dtb must be 8-byte aligned and live in the first 512M of memory. ++ */ ++__vet_fdt: ++ tst x21, #0x7 ++ b.ne 1f ++ cmp x21, x24 ++ b.lt 1f ++ mov x0, #(1 << 29) ++ add x0, x0, x24 ++ cmp x21, x0 ++ b.ge 1f ++ ret ++1: ++ mov x21, #0 ++ ret ++ENDPROC(__vet_fdt) ++/* + * Macro to create a table entry to the next page. + * + * tbl: page table address +@@ -306,7 +326,8 @@ ENDPROC(preserve_boot_args) + * required to get the kernel running. The following sections are required: + * - identity mapping to enable the MMU (low address, TTBR0) + * - first few MB of the kernel linear mapping to jump to once the MMU has +- * been enabled ++ * been enabled, including the FDT blob (TTBR1) ++ * - pgd entry for fixed mappings (TTBR1) + */ + __create_page_tables: + adrp x25, idmap_pg_dir +@@ -396,6 +417,22 @@ __create_page_tables: + create_block_map x0, x7, x3, x5, x6 + + /* ++ * Map the FDT blob (maximum 2MB; must be within 512MB of ++ * PHYS_OFFSET). ++ */ ++ mov x3, x21 // FDT phys address ++ and x3, x3, #~((1 << 21) - 1) // 2MB aligned ++ mov x6, #PAGE_OFFSET ++ sub x5, x3, x24 // subtract PHYS_OFFSET ++ tst x5, #~((1 << 29) - 1) // within 512MB? ++ csel x21, xzr, x21, ne // zero the FDT pointer ++ b.ne 1f ++ add x5, x5, x6 // __va(FDT blob) ++ add x6, x5, #1 << 21 // 2MB for the FDT blob ++ sub x6, x6, #1 // inclusive range ++ create_block_map x0, x7, x3, x5, x6 ++1: ++ /* + * Since the page tables have been populated with non-cacheable + * accesses (MMU disabled), invalidate the idmap and swapper page + * tables again to remove any speculatively loaded cache lines. +--- a/arch/arm64/kernel/setup.c ++++ b/arch/arm64/kernel/setup.c +@@ -87,6 +87,18 @@ static struct resource mem_res[] = { + #define kernel_code mem_res[0] + #define kernel_data mem_res[1] + ++void __init early_print(const char *str, ...) ++{ ++ char buf[256]; ++ va_list ap; ++ ++ va_start(ap, str); ++ vsnprintf(buf, sizeof(buf), str, ap); ++ va_end(ap); ++ ++ printk("%s", buf); ++} ++ + /* + * The recorded values of x0 .. x3 upon kernel entry. + */ +@@ -180,14 +192,12 @@ static void __init smp_build_mpidr_hash( + + static void __init setup_machine_fdt(phys_addr_t dt_phys) + { +- void *dt_virt = fixmap_remap_fdt(dt_phys); +- +- if (!dt_virt || !early_init_dt_scan(dt_virt)) { +- pr_crit("\n" +- "Error: invalid device tree blob at physical address %pa (virtual address 0x%p)\n" +- "The dtb must be 8-byte aligned and must not exceed 2 MB in size\n" +- "\nPlease check your bootloader.", +- &dt_phys, dt_virt); ++ if (!dt_phys || !early_init_dt_scan(phys_to_virt(dt_phys))) { ++ early_print("\n" ++ "Error: invalid device tree blob at physical address 0x%p (virtual address 0x%p)\n" ++ "The dtb must be 8-byte aligned and passed in the first 512MB of memory\n" ++ "\nPlease check your bootloader.\n", ++ dt_phys, phys_to_virt(dt_phys)); + + while (true) + cpu_relax(); +@@ -294,6 +304,7 @@ void __init setup_arch(char **cmdline_p) + pr_info("Boot CPU: AArch64 Processor [%08x]\n", read_cpuid_id()); + + sprintf(init_utsname()->machine, ELF_PLATFORM); ++ setup_machine_fdt(__fdt_pointer); + init_mm.start_code = (unsigned long) _text; + init_mm.end_code = (unsigned long) _etext; + init_mm.end_data = (unsigned long) _edata; +@@ -304,8 +315,6 @@ void __init setup_arch(char **cmdline_p) + early_fixmap_init(); + early_ioremap_init(); + +- setup_machine_fdt(__fdt_pointer); +- + parse_early_param(); + + /* +--- a/arch/arm64/mm/init.c ++++ b/arch/arm64/mm/init.c +@@ -171,6 +171,7 @@ void __init arm64_memblock_init(void) + memblock_reserve(__virt_to_phys(initrd_start), initrd_end - initrd_start); + #endif + ++ early_init_fdt_reserve_self(); + early_init_fdt_scan_reserved_mem(); + + /* 4GB maximum for 32-bit only capable devices */ +--- a/arch/arm64/mm/mmu.c ++++ b/arch/arm64/mm/mmu.c +@@ -21,7 +21,6 @@ + #include <linux/kernel.h> + #include <linux/errno.h> + #include <linux/init.h> +-#include <linux/libfdt.h> + #include <linux/mman.h> + #include <linux/nodemask.h> + #include <linux/memblock.h> +@@ -641,59 +640,3 @@ void __set_fixmap(enum fixed_addresses i + flush_tlb_kernel_range(addr, addr+PAGE_SIZE); + } + } +- +-void *__init fixmap_remap_fdt(phys_addr_t dt_phys) +-{ +- const u64 dt_virt_base = __fix_to_virt(FIX_FDT); +- pgprot_t prot = PAGE_KERNEL_RO; +- int size, offset; +- void *dt_virt; +- +- /* +- * Check whether the physical FDT address is set and meets the minimum +- * alignment requirement. Since we are relying on MIN_FDT_ALIGN to be +- * at least 8 bytes so that we can always access the magic and size +- * fields of the FDT header after mapping the first chunk, double check +- * here if that is indeed the case. +- */ +- BUILD_BUG_ON(MIN_FDT_ALIGN < 8); +- if (!dt_phys || dt_phys % MIN_FDT_ALIGN) +- return NULL; +- +- /* +- * Make sure that the FDT region can be mapped without the need to +- * allocate additional translation table pages, so that it is safe +- * to call create_mapping() this early. +- * +- * On 64k pages, the FDT will be mapped using PTEs, so we need to +- * be in the same PMD as the rest of the fixmap. +- * On 4k pages, we'll use section mappings for the FDT so we only +- * have to be in the same PUD. +- */ +- BUILD_BUG_ON(dt_virt_base % SZ_2M); +- +- BUILD_BUG_ON(__fix_to_virt(FIX_FDT_END) >> SWAPPER_TABLE_SHIFT != +- __fix_to_virt(FIX_BTMAP_BEGIN) >> SWAPPER_TABLE_SHIFT); +- +- offset = dt_phys % SWAPPER_BLOCK_SIZE; +- dt_virt = (void *)dt_virt_base + offset; +- +- /* map the first chunk so we can read the size from the header */ +- create_mapping(round_down(dt_phys, SWAPPER_BLOCK_SIZE), dt_virt_base, +- SWAPPER_BLOCK_SIZE, prot); +- +- if (fdt_magic(dt_virt) != FDT_MAGIC) +- return NULL; +- +- size = fdt_totalsize(dt_virt); +- if (size > MAX_FDT_SIZE) +- return NULL; +- +- if (offset + size > SWAPPER_BLOCK_SIZE) +- create_mapping(round_down(dt_phys, SWAPPER_BLOCK_SIZE), dt_virt_base, +- round_up(offset + size, SWAPPER_BLOCK_SIZE), prot); +- +- memblock_reserve(dt_phys, size); +- +- return dt_virt; +-} |