From 849369d6c66d3054688672f97d31fceb8e8230fb Mon Sep 17 00:00:00 2001 From: root Date: Fri, 25 Dec 2015 04:40:36 +0000 Subject: initial_commit --- arch/arm/boot/.gitignore | 5 + arch/arm/boot/Makefile | 119 ++++ arch/arm/boot/bootp/Makefile | 27 + arch/arm/boot/bootp/bootp.lds | 30 + arch/arm/boot/bootp/init.S | 88 +++ arch/arm/boot/bootp/initrd.S | 6 + arch/arm/boot/bootp/kernel.S | 6 + arch/arm/boot/compressed/.gitignore | 7 + arch/arm/boot/compressed/Makefile | 152 ++++ arch/arm/boot/compressed/big-endian.S | 13 + arch/arm/boot/compressed/decompress.c | 50 ++ arch/arm/boot/compressed/head-sa1100.S | 47 ++ arch/arm/boot/compressed/head-shark.S | 139 ++++ arch/arm/boot/compressed/head-sharpsl.S | 150 ++++ arch/arm/boot/compressed/head-shmobile.S | 83 +++ arch/arm/boot/compressed/head-vt8500.S | 46 ++ arch/arm/boot/compressed/head-xscale.S | 41 ++ arch/arm/boot/compressed/head.S | 1121 ++++++++++++++++++++++++++++++ arch/arm/boot/compressed/ll_char_wr.S | 134 ++++ arch/arm/boot/compressed/misc.c | 194 ++++++ arch/arm/boot/compressed/mmcif-sh7372.c | 88 +++ arch/arm/boot/compressed/ofw-shark.c | 260 +++++++ arch/arm/boot/compressed/piggy.gzip.S | 6 + arch/arm/boot/compressed/piggy.lzma.S | 6 + arch/arm/boot/compressed/piggy.lzo.S | 6 + arch/arm/boot/compressed/vmlinux.lds.in | 68 ++ arch/arm/boot/install.sh | 52 ++ 27 files changed, 2944 insertions(+) create mode 100644 arch/arm/boot/.gitignore create mode 100644 arch/arm/boot/Makefile create mode 100644 arch/arm/boot/bootp/Makefile create mode 100644 arch/arm/boot/bootp/bootp.lds create mode 100644 arch/arm/boot/bootp/init.S create mode 100644 arch/arm/boot/bootp/initrd.S create mode 100644 arch/arm/boot/bootp/kernel.S create mode 100644 arch/arm/boot/compressed/.gitignore create mode 100644 arch/arm/boot/compressed/Makefile create mode 100644 arch/arm/boot/compressed/big-endian.S create mode 100644 arch/arm/boot/compressed/decompress.c create mode 100644 arch/arm/boot/compressed/head-sa1100.S create mode 100644 arch/arm/boot/compressed/head-shark.S create mode 100644 arch/arm/boot/compressed/head-sharpsl.S create mode 100644 arch/arm/boot/compressed/head-shmobile.S create mode 100644 arch/arm/boot/compressed/head-vt8500.S create mode 100644 arch/arm/boot/compressed/head-xscale.S create mode 100644 arch/arm/boot/compressed/head.S create mode 100644 arch/arm/boot/compressed/ll_char_wr.S create mode 100644 arch/arm/boot/compressed/misc.c create mode 100644 arch/arm/boot/compressed/mmcif-sh7372.c create mode 100644 arch/arm/boot/compressed/ofw-shark.c create mode 100644 arch/arm/boot/compressed/piggy.gzip.S create mode 100644 arch/arm/boot/compressed/piggy.lzma.S create mode 100644 arch/arm/boot/compressed/piggy.lzo.S create mode 100644 arch/arm/boot/compressed/vmlinux.lds.in create mode 100644 arch/arm/boot/install.sh (limited to 'arch/arm/boot') diff --git a/arch/arm/boot/.gitignore b/arch/arm/boot/.gitignore new file mode 100644 index 00000000..ce1c5ff7 --- /dev/null +++ b/arch/arm/boot/.gitignore @@ -0,0 +1,5 @@ +Image +zImage +xipImage +bootpImage +uImage diff --git a/arch/arm/boot/Makefile b/arch/arm/boot/Makefile new file mode 100644 index 00000000..04d687eb --- /dev/null +++ b/arch/arm/boot/Makefile @@ -0,0 +1,119 @@ +# +# arch/arm/boot/Makefile +# +# This file is included by the global makefile so that you can add your own +# architecture-specific flags and dependencies. +# +# This file is subject to the terms and conditions of the GNU General Public +# License. See the file "COPYING" in the main directory of this archive +# for more details. +# +# Copyright (C) 1995-2002 Russell King +# + +MKIMAGE := $(srctree)/scripts/mkuboot.sh + +ifneq ($(MACHINE),) +include $(srctree)/$(MACHINE)/Makefile.boot +endif + +# Note: the following conditions must always be true: +# ZRELADDR == virt_to_phys(PAGE_OFFSET + TEXT_OFFSET) +# PARAMS_PHYS must be within 4MB of ZRELADDR +# INITRD_PHYS must be in RAM +ZRELADDR := $(zreladdr-y) +PARAMS_PHYS := $(params_phys-y) +INITRD_PHYS := $(initrd_phys-y) + +export ZRELADDR INITRD_PHYS PARAMS_PHYS + +targets := Image zImage xipImage bootpImage uImage + +ifeq ($(CONFIG_XIP_KERNEL),y) + +$(obj)/xipImage: vmlinux FORCE + $(call if_changed,objcopy) + @echo ' Kernel: $@ is ready (physical address: $(CONFIG_XIP_PHYS_ADDR))' + +$(obj)/Image $(obj)/zImage: FORCE + @echo 'Kernel configured for XIP (CONFIG_XIP_KERNEL=y)' + @echo 'Only the xipImage target is available in this case' + @false + +else + +$(obj)/xipImage: FORCE + @echo 'Kernel not configured for XIP (CONFIG_XIP_KERNEL!=y)' + @false + +$(obj)/Image: vmlinux FORCE + $(call if_changed,objcopy) + @echo ' Kernel: $@ is ready' + +$(obj)/compressed/vmlinux: $(obj)/Image FORCE + $(Q)$(MAKE) $(build)=$(obj)/compressed $@ + +$(obj)/zImage: $(obj)/compressed/vmlinux FORCE + $(call if_changed,objcopy) + @echo ' Kernel: $@ is ready' + +endif + +#quiet_cmd_uimage = UIMAGE $@ +# cmd_uimage = $(CONFIG_SHELL) $(MKIMAGE) -A arm -O linux -T kernel \ +# -C none -a $(LOADADDR) -e $(STARTADDR) \ +# -n 'Linux-$(KERNELRELEASE)' -d $< $@ + +quiet_cmd_uimage = UIMAGE $@ + cmd_uimage = $(CONFIG_SHELL) $(MKIMAGE) -A arm -O linux -T kernel \ + -C none -a $(LOADADDR) -e $(STARTADDR) \ + -d $< $@ + +ifeq ($(CONFIG_ZBOOT_ROM),y) +$(obj)/uImage: LOADADDR=$(CONFIG_ZBOOT_ROM_TEXT) +else +$(obj)/uImage: LOADADDR=$(ZRELADDR) +endif + +$(obj)/uImage: STARTADDR=$(LOADADDR) + +$(obj)/uImage: $(obj)/zImage FORCE + $(call if_changed,uimage) + @echo ' Image $@ is ready' + +$(obj)/bootp/bootp: $(obj)/zImage initrd FORCE + $(Q)$(MAKE) $(build)=$(obj)/bootp $@ + @: + +$(obj)/bootpImage: $(obj)/bootp/bootp FORCE + $(call if_changed,objcopy) + @echo ' Kernel: $@ is ready' + +PHONY += initrd FORCE +initrd: + @test "$(INITRD_PHYS)" != "" || \ + (echo This machine does not support INITRD; exit -1) + @test "$(INITRD)" != "" || \ + (echo You must specify INITRD; exit -1) + +install: $(obj)/Image + $(CONFIG_SHELL) $(srctree)/$(src)/install.sh $(KERNELRELEASE) \ + $(obj)/Image System.map "$(INSTALL_PATH)" + +zinstall: $(obj)/zImage + $(CONFIG_SHELL) $(srctree)/$(src)/install.sh $(KERNELRELEASE) \ + $(obj)/zImage System.map "$(INSTALL_PATH)" + +uinstall: $(obj)/uImage + $(CONFIG_SHELL) $(srctree)/$(src)/install.sh $(KERNELRELEASE) \ + $(obj)/uImage System.map "$(INSTALL_PATH)" + +zi: + $(CONFIG_SHELL) $(srctree)/$(src)/install.sh $(KERNELRELEASE) \ + $(obj)/zImage System.map "$(INSTALL_PATH)" + +i: + $(CONFIG_SHELL) $(srctree)/$(src)/install.sh $(KERNELRELEASE) \ + $(obj)/Image System.map "$(INSTALL_PATH)" + +subdir- := bootp compressed diff --git a/arch/arm/boot/bootp/Makefile b/arch/arm/boot/bootp/Makefile new file mode 100644 index 00000000..c394e305 --- /dev/null +++ b/arch/arm/boot/bootp/Makefile @@ -0,0 +1,27 @@ +# +# linux/arch/arm/boot/bootp/Makefile +# +# This file is included by the global makefile so that you can add your own +# architecture-specific flags and dependencies. +# + +LDFLAGS_bootp :=-p --no-undefined -X \ + --defsym initrd_phys=$(INITRD_PHYS) \ + --defsym params_phys=$(PARAMS_PHYS) -T +AFLAGS_initrd.o :=-DINITRD=\"$(INITRD)\" + +targets := bootp init.o kernel.o initrd.o + +# Note that bootp.lds picks up kernel.o and initrd.o +$(obj)/bootp: $(src)/bootp.lds $(addprefix $(obj)/,init.o kernel.o initrd.o) FORCE + $(call if_changed,ld) + @: + +# kernel.o and initrd.o includes a binary image using +# .incbin, a dependency which is not tracked automatically + +$(obj)/kernel.o: arch/arm/boot/zImage FORCE + +$(obj)/initrd.o: $(INITRD) FORCE + +PHONY += $(INITRD) FORCE diff --git a/arch/arm/boot/bootp/bootp.lds b/arch/arm/boot/bootp/bootp.lds new file mode 100644 index 00000000..fc54394f --- /dev/null +++ b/arch/arm/boot/bootp/bootp.lds @@ -0,0 +1,30 @@ +/* + * linux/arch/arm/boot/bootp/bootp.lds + * + * Copyright (C) 2000-2002 Russell King + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +OUTPUT_ARCH(arm) +ENTRY(_start) +SECTIONS +{ + . = 0; + .text : { + _stext = .; + *(.start) + *(.text) + initrd_size = initrd_end - initrd_start; + _etext = .; + } + + .stab 0 : { *(.stab) } + .stabstr 0 : { *(.stabstr) } + .stab.excl 0 : { *(.stab.excl) } + .stab.exclstr 0 : { *(.stab.exclstr) } + .stab.index 0 : { *(.stab.index) } + .stab.indexstr 0 : { *(.stab.indexstr) } + .comment 0 : { *(.comment) } +} diff --git a/arch/arm/boot/bootp/init.S b/arch/arm/boot/bootp/init.S new file mode 100644 index 00000000..78b50807 --- /dev/null +++ b/arch/arm/boot/bootp/init.S @@ -0,0 +1,88 @@ +/* + * linux/arch/arm/boot/bootp/init.S + * + * Copyright (C) 2000-2003 Russell King. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * "Header" file for splitting kernel + initrd. Note that we pass + * r0 through to r3 straight through. + * + * This demonstrates how to append code to the start of the kernel + * zImage, and boot the kernel without copying it around. This + * example would be simpler; if we didn't have an object of unknown + * size immediately following the kernel, we could build this into + * a binary blob, and concatenate the zImage using the cat command. + */ + .section .start,#alloc,#execinstr + .type _start, #function + .globl _start + +_start: add lr, pc, #-0x8 @ lr = current load addr + adr r13, data + ldmia r13!, {r4-r6} @ r5 = dest, r6 = length + add r4, r4, lr @ r4 = initrd_start + load addr + bl move @ move the initrd + +/* + * Setup the initrd parameters to pass to the kernel. This can only be + * passed in via the tagged list. + */ + ldmia r13, {r5-r9} @ get size and addr of initrd + @ r5 = ATAG_CORE + @ r6 = ATAG_INITRD2 + @ r7 = initrd start + @ r8 = initrd end + @ r9 = param_struct address + + ldr r10, [r9, #4] @ get first tag + teq r10, r5 @ is it ATAG_CORE? +/* + * If we didn't find a valid tag list, create a dummy ATAG_CORE entry. + */ + movne r10, #0 @ terminator + movne r4, #2 @ Size of this entry (2 words) + stmneia r9, {r4, r5, r10} @ Size, ATAG_CORE, terminator + +/* + * find the end of the tag list, and then add an INITRD tag on the end. + * If there is already an INITRD tag, then we ignore it; the last INITRD + * tag takes precedence. + */ +taglist: ldr r10, [r9, #0] @ tag length + teq r10, #0 @ last tag (zero length)? + addne r9, r9, r10, lsl #2 + bne taglist + + mov r5, #4 @ Size of initrd tag (4 words) + stmia r9, {r5, r6, r7, r8, r10} + b kernel_start @ call kernel + +/* + * Move the block of memory length r6 from address r4 to address r5 + */ +move: ldmia r4!, {r7 - r10} @ move 32-bytes at a time + stmia r5!, {r7 - r10} + ldmia r4!, {r7 - r10} + stmia r5!, {r7 - r10} + subs r6, r6, #8 * 4 + bcs move + mov pc, lr + + .size _start, . - _start + + .align + + .type data,#object +data: .word initrd_start @ source initrd address + .word initrd_phys @ destination initrd address + .word initrd_size @ initrd size + + .word 0x54410001 @ r5 = ATAG_CORE + .word 0x54420005 @ r6 = ATAG_INITRD2 + .word initrd_phys @ r7 + .word initrd_size @ r8 + .word params_phys @ r9 + .size data, . - data diff --git a/arch/arm/boot/bootp/initrd.S b/arch/arm/boot/bootp/initrd.S new file mode 100644 index 00000000..d81ea183 --- /dev/null +++ b/arch/arm/boot/bootp/initrd.S @@ -0,0 +1,6 @@ + .type initrd_start,#object + .globl initrd_start +initrd_start: + .incbin INITRD + .globl initrd_end +initrd_end: diff --git a/arch/arm/boot/bootp/kernel.S b/arch/arm/boot/bootp/kernel.S new file mode 100644 index 00000000..b87a25c7 --- /dev/null +++ b/arch/arm/boot/bootp/kernel.S @@ -0,0 +1,6 @@ + .globl kernel_start +kernel_start: + .incbin "arch/arm/boot/zImage" + .globl kernel_end +kernel_end: + .align 2 diff --git a/arch/arm/boot/compressed/.gitignore b/arch/arm/boot/compressed/.gitignore new file mode 100644 index 00000000..c6028967 --- /dev/null +++ b/arch/arm/boot/compressed/.gitignore @@ -0,0 +1,7 @@ +font.c +lib1funcs.S +piggy.gzip +piggy.lzo +piggy.lzma +vmlinux +vmlinux.lds diff --git a/arch/arm/boot/compressed/Makefile b/arch/arm/boot/compressed/Makefile new file mode 100644 index 00000000..23aad072 --- /dev/null +++ b/arch/arm/boot/compressed/Makefile @@ -0,0 +1,152 @@ +# +# linux/arch/arm/boot/compressed/Makefile +# +# create a compressed vmlinuz image from the original vmlinux +# + +OBJS = + +# Ensure that mmcif loader code appears early in the image +# to minimise that number of bocks that have to be read in +# order to load it. +ifeq ($(CONFIG_ZBOOT_ROM_MMCIF),y) +ifeq ($(CONFIG_ARCH_SH7372),y) +OBJS += mmcif-sh7372.o +endif +endif + +AFLAGS_head.o += -DTEXT_OFFSET=$(TEXT_OFFSET) +HEAD = head.o +OBJS += misc.o decompress.o +FONTC = $(srctree)/drivers/video/console/font_acorn_8x8.c + +# +# Architecture dependencies +# +ifeq ($(CONFIG_ARCH_ACORN),y) +OBJS += ll_char_wr.o font.o +endif + +ifeq ($(CONFIG_ARCH_SHARK),y) +OBJS += head-shark.o ofw-shark.o +endif + +ifeq ($(CONFIG_ARCH_P720T),y) +# Borrow this code from SA1100 +OBJS += head-sa1100.o +endif + +ifeq ($(CONFIG_ARCH_SA1100),y) +OBJS += head-sa1100.o +endif + +ifeq ($(CONFIG_ARCH_VT8500),y) +OBJS += head-vt8500.o +endif + +ifeq ($(CONFIG_CPU_XSCALE),y) +OBJS += head-xscale.o +endif + +ifeq ($(CONFIG_PXA_SHARPSL_DETECT_MACH_ID),y) +OBJS += head-sharpsl.o +endif + +ifeq ($(CONFIG_CPU_ENDIAN_BE32),y) +ifeq ($(CONFIG_CPU_CP15),y) +OBJS += big-endian.o +else +# The endian should be set by h/w design. +endif +endif + +ifeq ($(CONFIG_ARCH_SHMOBILE),y) +OBJS += head-shmobile.o +endif + +# +# We now have a PIC decompressor implementation. Decompressors running +# from RAM should not define ZTEXTADDR. Decompressors running directly +# from ROM or Flash must define ZTEXTADDR (preferably via the config) +# FIXME: Previous assignment to ztextaddr-y is lost here. See SHARK +ifeq ($(CONFIG_ZBOOT_ROM),y) +ZTEXTADDR := $(CONFIG_ZBOOT_ROM_TEXT) +ZBSSADDR := $(CONFIG_ZBOOT_ROM_BSS) +else +ZTEXTADDR := 0 +ZBSSADDR := ALIGN(8) +endif + +SEDFLAGS = s/TEXT_START/$(ZTEXTADDR)/;s/BSS_START/$(ZBSSADDR)/ + +suffix_$(CONFIG_KERNEL_GZIP) = gzip +suffix_$(CONFIG_KERNEL_LZO) = lzo +suffix_$(CONFIG_KERNEL_LZMA) = lzma + +targets := vmlinux vmlinux.lds \ + piggy.$(suffix_y) piggy.$(suffix_y).o \ + font.o font.c head.o misc.o $(OBJS) + +# Make sure files are removed during clean +extra-y += piggy.gzip piggy.lzo piggy.lzma lib1funcs.S + +ifeq ($(CONFIG_FUNCTION_TRACER),y) +ORIG_CFLAGS := $(KBUILD_CFLAGS) +KBUILD_CFLAGS = $(subst -pg, , $(ORIG_CFLAGS)) +endif + +ccflags-y := -fpic -fno-builtin +asflags-y := -Wa,-march=all + +# Supply ZRELADDR to the decompressor via a linker symbol. +ifneq ($(CONFIG_AUTO_ZRELADDR),y) +LDFLAGS_vmlinux += --defsym zreladdr=$(ZRELADDR) +endif +ifeq ($(CONFIG_CPU_ENDIAN_BE8),y) +LDFLAGS_vmlinux += --be8 +endif +# ? +LDFLAGS_vmlinux += -p +# Report unresolved symbol references +LDFLAGS_vmlinux += --no-undefined +# Delete all temporary local symbols +LDFLAGS_vmlinux += -X +# Next argument is a linker script +LDFLAGS_vmlinux += -T + +# For __aeabi_uidivmod +lib1funcs = $(obj)/lib1funcs.o + +$(obj)/lib1funcs.S: $(srctree)/arch/$(SRCARCH)/lib/lib1funcs.S FORCE + $(call cmd,shipped) + +# We need to prevent any GOTOFF relocs being used with references +# to symbols in the .bss section since we cannot relocate them +# independently from the rest at run time. This can be achieved by +# ensuring that no private .bss symbols exist, as global symbols +# always have a GOT entry which is what we need. +# The .data section is already discarded by the linker script so no need +# to bother about it here. +check_for_bad_syms = \ +bad_syms=$$($(CROSS_COMPILE)nm $@ | sed -n 's/^.\{8\} [bc] \(.*\)/\1/p') && \ +[ -z "$$bad_syms" ] || \ + ( echo "following symbols must have non local/private scope:" >&2; \ + echo "$$bad_syms" >&2; rm -f $@; false ) + +$(obj)/vmlinux: $(obj)/vmlinux.lds $(obj)/$(HEAD) $(obj)/piggy.$(suffix_y).o \ + $(addprefix $(obj)/, $(OBJS)) $(lib1funcs) FORCE + $(call if_changed,ld) + @$(check_for_bad_syms) + +$(obj)/piggy.$(suffix_y): $(obj)/../Image FORCE + $(call if_changed,$(suffix_y)) + +$(obj)/piggy.$(suffix_y).o: $(obj)/piggy.$(suffix_y) FORCE + +CFLAGS_font.o := -Dstatic= + +$(obj)/font.c: $(FONTC) + $(call cmd,shipped) + +$(obj)/vmlinux.lds: $(obj)/vmlinux.lds.in arch/arm/boot/Makefile $(KCONFIG_CONFIG) + @sed "$(SEDFLAGS)" < $< > $@ diff --git a/arch/arm/boot/compressed/big-endian.S b/arch/arm/boot/compressed/big-endian.S new file mode 100644 index 00000000..25ab26f1 --- /dev/null +++ b/arch/arm/boot/compressed/big-endian.S @@ -0,0 +1,13 @@ +/* + * linux/arch/arm/boot/compressed/big-endian.S + * + * Switch CPU into big endian mode. + * Author: Nicolas Pitre + */ + + .section ".start", #alloc, #execinstr + + mrc p15, 0, r0, c1, c0, 0 @ read control reg + orr r0, r0, #(1 << 7) @ enable big endian mode + mcr p15, 0, r0, c1, c0, 0 @ write control reg + diff --git a/arch/arm/boot/compressed/decompress.c b/arch/arm/boot/compressed/decompress.c new file mode 100644 index 00000000..07be5a2f --- /dev/null +++ b/arch/arm/boot/compressed/decompress.c @@ -0,0 +1,50 @@ +#define _LINUX_STRING_H_ + +#include /* for inline */ +#include /* for size_t */ +#include /* for NULL */ +#include +#include + +extern unsigned long free_mem_ptr; +extern unsigned long free_mem_end_ptr; +extern void error(char *); + +#define STATIC static +#define STATIC_RW_DATA /* non-static please */ + +#define ARCH_HAS_DECOMP_WDOG + +/* Diagnostic functions */ +#ifdef DEBUG +# define Assert(cond,msg) {if(!(cond)) error(msg);} +# define Trace(x) fprintf x +# define Tracev(x) {if (verbose) fprintf x ;} +# define Tracevv(x) {if (verbose>1) fprintf x ;} +# define Tracec(c,x) {if (verbose && (c)) fprintf x ;} +# define Tracecv(c,x) {if (verbose>1 && (c)) fprintf x ;} +#else +# define Assert(cond,msg) +# define Trace(x) +# define Tracev(x) +# define Tracevv(x) +# define Tracec(c,x) +# define Tracecv(c,x) +#endif + +#ifdef CONFIG_KERNEL_GZIP +#include "../../../../lib/decompress_inflate.c" +#endif + +#ifdef CONFIG_KERNEL_LZO +#include "../../../../lib/decompress_unlzo.c" +#endif + +#ifdef CONFIG_KERNEL_LZMA +#include "../../../../lib/decompress_unlzma.c" +#endif + +int do_decompress(u8 *input, int len, u8 *output, void (*error)(char *x)) +{ + return decompress(input, len, NULL, NULL, output, NULL, error); +} diff --git a/arch/arm/boot/compressed/head-sa1100.S b/arch/arm/boot/compressed/head-sa1100.S new file mode 100644 index 00000000..6179d94d --- /dev/null +++ b/arch/arm/boot/compressed/head-sa1100.S @@ -0,0 +1,47 @@ +/* + * linux/arch/arm/boot/compressed/head-sa1100.S + * + * Copyright (C) 1999 Nicolas Pitre + * + * SA1100 specific tweaks. This is merged into head.S by the linker. + * + */ + +#include +#include + + .section ".start", "ax" + +__SA1100_start: + + @ Preserve r8/r7 i.e. kernel entry values +#ifdef CONFIG_SA1100_COLLIE + mov r7, #MACH_TYPE_COLLIE +#endif +#ifdef CONFIG_SA1100_SIMPAD + @ UNTIL we've something like an open bootldr + mov r7, #MACH_TYPE_SIMPAD @should be 87 +#endif + mrc p15, 0, r0, c1, c0, 0 @ read control reg + ands r0, r0, #0x0d + beq 99f + + @ Data cache might be active. + @ Be sure to flush kernel binary out of the cache, + @ whatever state it is, before it is turned off. + @ This is done by fetching through currently executed + @ memory to be sure we hit the same cache. + bic r2, pc, #0x1f + add r3, r2, #0x4000 @ 16 kb is quite enough... +1: ldr r0, [r2], #32 + teq r2, r3 + bne 1b + mcr p15, 0, r0, c7, c10, 4 @ drain WB + mcr p15, 0, r0, c7, c7, 0 @ flush I & D caches + + @ disabling MMU and caches + mrc p15, 0, r0, c1, c0, 0 @ read control reg + bic r0, r0, #0x0d @ clear WB, DC, MMU + bic r0, r0, #0x1000 @ clear Icache + mcr p15, 0, r0, c1, c0, 0 +99: diff --git a/arch/arm/boot/compressed/head-shark.S b/arch/arm/boot/compressed/head-shark.S new file mode 100644 index 00000000..089c560e --- /dev/null +++ b/arch/arm/boot/compressed/head-shark.S @@ -0,0 +1,139 @@ +/* The head-file for the Shark + * by Alexander Schulz + * + * Does the following: + * - get the memory layout from firmware. This can only be done as long as the mmu + * is still on. + * - switch the mmu off, so we have physical addresses + * - copy the kernel to 0x08508000. This is done to have a fixed address where the + * C-parts (misc.c) are executed. This address must be known at compile-time, + * but the load-address of the kernel depends on how much memory is installed. + * - Jump to this location. + * - Set r8 with 0, r7 with the architecture ID for head.S + */ + +#include + +#include + + .section ".start", "ax" + + b __beginning + +__ofw_data: .long 0 @ the number of memory blocks + .space 128 @ (startaddr,size) ... + .space 128 @ bootargs + .align + +__beginning: mov r4, r0 @ save the entry to the firmware + + mov r0, #0xC0 @ disable irq and fiq + mov r1, r0 + mrs r3, cpsr + bic r2, r3, r0 + eor r2, r2, r1 + msr cpsr_c, r2 + + mov r0, r4 @ get the Memory layout from firmware + adr r1, __ofw_data + add r2, r1, #4 + mov lr, pc + b ofw_init + mov r1, #0 + + adr r2, __mmu_off @ calculate physical address + sub r2, r2, #0xf0000000 @ openprom maps us at f000 virt, 0e50 phys + adr r0, __ofw_data + ldr r0, [r0, #4] + add r2, r2, r0 + add r2, r2, #0x00500000 + + mrc p15, 0, r3, c1, c0 + bic r3, r3, #0xC @ Write Buffer and DCache + bic r3, r3, #0x1000 @ ICache + mcr p15, 0, r3, c1, c0 @ disabled + + mov r0, #0 + mcr p15, 0, r0, c7, c7 @ flush I,D caches on v4 + mcr p15, 0, r0, c7, c10, 4 @ drain write buffer on v4 + mcr p15, 0, r0, c8, c7 @ flush I,D TLBs on v4 + + bic r3, r3, #0x1 @ MMU + mcr p15, 0, r3, c1, c0 @ disabled + + mov pc, r2 + +__copy_target: .long 0x08507FFC +__copy_end: .long 0x08607FFC + + .word _start + .word __bss_start + + .align +__temp_stack: .space 128 + +__mmu_off: + adr r0, __ofw_data @ read the 1. entry of the memory map + ldr r0, [r0, #4] + orr r0, r0, #0x00600000 + sub r0, r0, #4 + + ldr r1, __copy_end + ldr r3, __copy_target + +/* r0 = 0x0e600000 (current end of kernelcode) + * r3 = 0x08508000 (where it should begin) + * r1 = 0x08608000 (end of copying area, 1MB) + * The kernel is compressed, so 1 MB should be enough. + * copy the kernel to the beginning of physical memory + * We start from the highest address, so we can copy + * from 0x08500000 to 0x08508000 if we have only 8MB + */ + +/* As we get more 2.6-kernels it gets more and more + * uncomfortable to be bound to kernel images of 1MB only. + * So we add a loop here, to be able to copy some more. + * Alexander Schulz 2005-07-17 + */ + + mov r4, #3 @ How many megabytes to copy + + +__MoveCode: sub r4, r4, #1 + +__Copy: ldr r2, [r0], #-4 + str r2, [r1], #-4 + teq r1, r3 + bne __Copy + + /* The firmware maps us in blocks of 1 MB, the next block is + _below_ the last one. So our decrementing source pointer + ist right here, but the destination pointer must be increased + by 2 MB */ + add r1, r1, #0x00200000 + add r3, r3, #0x00100000 + + teq r4, #0 + bne __MoveCode + + + /* and jump to it */ + adr r2, __go_on @ where we want to jump + adr r0, __ofw_data @ read the 1. entry of the memory map + ldr r0, [r0, #4] + sub r2, r2, r0 @ we are mapped add 0e50 now, sub that (-0e00) + sub r2, r2, #0x00500000 @ -0050 + ldr r0, __copy_target @ and add 0850 8000 instead + add r0, r0, #4 + add r2, r2, r0 + mov pc, r2 @ and jump there + +__go_on: + adr sp, __temp_stack + add sp, sp, #128 + adr r0, __ofw_data + mov lr, pc + b create_params + + mov r8, #0 + mov r7, #15 diff --git a/arch/arm/boot/compressed/head-sharpsl.S b/arch/arm/boot/compressed/head-sharpsl.S new file mode 100644 index 00000000..eb0084ea --- /dev/null +++ b/arch/arm/boot/compressed/head-sharpsl.S @@ -0,0 +1,150 @@ +/* + * linux/arch/arm/boot/compressed/head-sharpsl.S + * + * Copyright (C) 2004-2005 Richard Purdie + * + * Sharp's bootloader doesn't pass any kind of machine ID + * so we have to figure out the machine for ourselves... + * + * Support for Poodle, Corgi (SL-C700), Shepherd (SL-C750) + * Husky (SL-C760), Tosa (SL-C6000), Spitz (SL-C3000), + * Akita (SL-C1000) and Borzoi (SL-C3100). + * + */ + +#include +#include + +#ifndef CONFIG_PXA_SHARPSL +#error What am I doing here... +#endif + + .section ".start", "ax" + +__SharpSL_start: + +/* Check for TC6393 - if found we have a Tosa */ + ldr r7, .TOSAID + mov r1, #0x10000000 @ Base address of TC6393 chip + mov r6, #0x03 + ldrh r3, [r1, #8] @ Load TC6393XB Revison: This is 0x0003 + cmp r6, r3 + beq .SHARPEND @ Success -> tosa + +/* Check for pxa270 - if found, branch */ + mrc p15, 0, r4, c0, c0 @ Get Processor ID + and r4, r4, #0xffffff00 + ldr r3, .PXA270ID + cmp r4, r3 + beq .PXA270 + +/* Check for w100 - if not found we have a Poodle */ + ldr r1, .W100ADDR @ Base address of w100 chip + regs offset + + mov r6, #0x31 @ Load Magic Init value + str r6, [r1, #0x280] @ to SCRATCH_UMSK + mov r5, #0x3000 +.W100LOOP: + subs r5, r5, #1 + bne .W100LOOP + mov r6, #0x30 @ Load 2nd Magic Init value + str r6, [r1, #0x280] @ to SCRATCH_UMSK + + ldr r6, [r1, #0] @ Load Chip ID + ldr r3, .W100ID + ldr r7, .POODLEID + cmp r6, r3 + bne .SHARPEND @ We have no w100 - Poodle + +/* Check for pxa250 - if found we have a Corgi */ + ldr r7, .CORGIID + ldr r3, .PXA255ID + cmp r4, r3 + blo .SHARPEND @ We have a PXA250 - Corgi + +/* Check for 64MiB flash - if found we have a Shepherd */ + bl get_flash_ids + ldr r7, .SHEPHERDID + cmp r3, #0x76 @ 64MiB flash + beq .SHARPEND @ We have Shepherd + +/* Must be a Husky */ + ldr r7, .HUSKYID @ Must be Husky + b .SHARPEND + +.PXA270: +/* Check for 16MiB flash - if found we have Spitz */ + bl get_flash_ids + ldr r7, .SPITZID + cmp r3, #0x73 @ 16MiB flash + beq .SHARPEND @ We have Spitz + +/* Check for a second SCOOP chip - if found we have Borzoi */ + ldr r1, .SCOOP2ADDR + ldr r7, .BORZOIID + mov r6, #0x0140 + strh r6, [r1] + ldrh r6, [r1] + cmp r6, #0x0140 + beq .SHARPEND @ We have Borzoi + +/* Must be Akita */ + ldr r7, .AKITAID + b .SHARPEND @ We have Borzoi + +.PXA255ID: + .word 0x69052d00 @ PXA255 Processor ID +.PXA270ID: + .word 0x69054100 @ PXA270 Processor ID +.W100ID: + .word 0x57411002 @ w100 Chip ID +.W100ADDR: + .word 0x08010000 @ w100 Chip ID Reg Address +.SCOOP2ADDR: + .word 0x08800040 +.POODLEID: + .word MACH_TYPE_POODLE +.CORGIID: + .word MACH_TYPE_CORGI +.SHEPHERDID: + .word MACH_TYPE_SHEPHERD +.HUSKYID: + .word MACH_TYPE_HUSKY +.TOSAID: + .word MACH_TYPE_TOSA +.SPITZID: + .word MACH_TYPE_SPITZ +.AKITAID: + .word MACH_TYPE_AKITA +.BORZOIID: + .word MACH_TYPE_BORZOI + +/* + * Return: r2 - NAND Manufacturer ID + * r3 - NAND Chip ID + * Corrupts: r1 + */ +get_flash_ids: + mov r1, #0x0c000000 @ Base address of NAND chip + ldrb r3, [r1, #24] @ Load FLASHCTL + bic r3, r3, #0x11 @ SET NCE + orr r3, r3, #0x0a @ SET CLR + FLWP + strb r3, [r1, #24] @ Save to FLASHCTL + mov r2, #0x90 @ Command "readid" + strb r2, [r1, #20] @ Save to FLASHIO + bic r3, r3, #2 @ CLR CLE + orr r3, r3, #4 @ SET ALE + strb r3, [r1, #24] @ Save to FLASHCTL + mov r2, #0 @ Address 0x00 + strb r2, [r1, #20] @ Save to FLASHIO + bic r3, r3, #4 @ CLR ALE + strb r3, [r1, #24] @ Save to FLASHCTL +.fids1: + ldrb r3, [r1, #24] @ Load FLASHCTL + tst r3, #32 @ Is chip ready? + beq .fids1 + ldrb r2, [r1, #20] @ NAND Manufacturer ID + ldrb r3, [r1, #20] @ NAND Chip ID + mov pc, lr + +.SHARPEND: diff --git a/arch/arm/boot/compressed/head-shmobile.S b/arch/arm/boot/compressed/head-shmobile.S new file mode 100644 index 00000000..c943d2e7 --- /dev/null +++ b/arch/arm/boot/compressed/head-shmobile.S @@ -0,0 +1,83 @@ +/* + * The head-file for SH-Mobile ARM platforms + * + * Kuninori Morimoto + * Simon Horman + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifdef CONFIG_ZBOOT_ROM + + .section ".start", "ax" + + /* load board-specific initialization code */ +#include + +#ifdef CONFIG_ZBOOT_ROM_MMCIF + /* Load image from MMC */ + adr sp, __tmp_stack + 128 + ldr r0, __image_start + ldr r1, __image_end + subs r1, r1, r0 + ldr r0, __load_base + bl mmcif_loader + + /* Jump to loaded code */ + ldr r0, __loaded + ldr r1, __image_start + sub r0, r0, r1 + ldr r1, __load_base + add pc, r0, r1 + +__image_start: + .long _start +__image_end: + .long _got_end +__load_base: + .long CONFIG_MEMORY_START + 0x02000000 @ Load at 32Mb into SDRAM +__loaded: + .long __continue + .align +__tmp_stack: + .space 128 +__continue: +#endif /* CONFIG_ZBOOT_ROM_MMCIF */ + + b 1f +__atags:@ tag #1 + .long 12 @ tag->hdr.size = tag_size(tag_core); + .long 0x54410001 @ tag->hdr.tag = ATAG_CORE; + .long 0 @ tag->u.core.flags = 0; + .long 0 @ tag->u.core.pagesize = 0; + .long 0 @ tag->u.core.rootdev = 0; + @ tag #2 + .long 8 @ tag->hdr.size = tag_size(tag_mem32); + .long 0x54410002 @ tag->hdr.tag = ATAG_MEM; + .long CONFIG_MEMORY_SIZE @ tag->u.mem.size = CONFIG_MEMORY_SIZE; + .long CONFIG_MEMORY_START @ @ tag->u.mem.start = CONFIG_MEMORY_START; + @ tag #3 + .long 0 @ tag->hdr.size = 0 + .long 0 @ tag->hdr.tag = ATAG_NONE; +1: + + /* Set board ID necessary for boot */ + ldr r7, 1f @ Set machine type register + adr r8, __atags @ Set atag register + b 2f + +1 : .long MACH_TYPE +2 : + +#endif /* CONFIG_ZBOOT_ROM */ diff --git a/arch/arm/boot/compressed/head-vt8500.S b/arch/arm/boot/compressed/head-vt8500.S new file mode 100644 index 00000000..1dc1e21a --- /dev/null +++ b/arch/arm/boot/compressed/head-vt8500.S @@ -0,0 +1,46 @@ +/* + * linux/arch/arm/boot/compressed/head-vt8500.S + * + * Copyright (C) 2010 Alexey Charkov + * + * VIA VT8500 specific tweaks. This is merged into head.S by the linker. + * + */ + +#include +#include + + .section ".start", "ax" + +__VT8500_start: + @ Compare the SCC ID register against a list of known values + ldr r1, .SCCID + ldr r3, [r1] + + @ VT8500 override + ldr r4, .VT8500SCC + cmp r3, r4 + ldreq r7, .ID_BV07 + beq .Lendvt8500 + + @ WM8505 override + ldr r4, .WM8505SCC + cmp r3, r4 + ldreq r7, .ID_8505 + beq .Lendvt8500 + + @ Otherwise, leave the bootloader's machine id untouched + +.SCCID: + .word 0xd8120000 +.VT8500SCC: + .word 0x34000102 +.WM8505SCC: + .word 0x34260103 + +.ID_BV07: + .word MACH_TYPE_BV07 +.ID_8505: + .word MACH_TYPE_WM8505_7IN_NETBOOK + +.Lendvt8500: diff --git a/arch/arm/boot/compressed/head-xscale.S b/arch/arm/boot/compressed/head-xscale.S new file mode 100644 index 00000000..aa5ee49c --- /dev/null +++ b/arch/arm/boot/compressed/head-xscale.S @@ -0,0 +1,41 @@ +/* + * linux/arch/arm/boot/compressed/head-xscale.S + * + * XScale specific tweaks. This is merged into head.S by the linker. + * + */ + +#include + + .section ".start", "ax" + +__XScale_start: + + @ Preserve r8/r7 i.e. kernel entry values + + @ Data cache might be active. + @ Be sure to flush kernel binary out of the cache, + @ whatever state it is, before it is turned off. + @ This is done by fetching through currently executed + @ memory to be sure we hit the same cache. + bic r2, pc, #0x1f + add r3, r2, #0x10000 @ 64 kb is quite enough... +1: ldr r0, [r2], #32 + teq r2, r3 + bne 1b + mcr p15, 0, r0, c7, c10, 4 @ drain WB + mcr p15, 0, r0, c7, c7, 0 @ flush I & D caches + + @ disabling MMU and caches + mrc p15, 0, r0, c1, c0, 0 @ read control reg + bic r0, r0, #0x05 @ clear DC, MMU + bic r0, r0, #0x1000 @ clear Icache + mcr p15, 0, r0, c1, c0, 0 + +#ifdef CONFIG_ARCH_IXP2000 + mov r1, #-1 + mov r0, #0xd6000000 + str r1, [r0, #0x14] + str r1, [r0, #0x18] +#endif + diff --git a/arch/arm/boot/compressed/head.S b/arch/arm/boot/compressed/head.S new file mode 100644 index 00000000..e58603b0 --- /dev/null +++ b/arch/arm/boot/compressed/head.S @@ -0,0 +1,1121 @@ +/* + * linux/arch/arm/boot/compressed/head.S + * + * Copyright (C) 1996-2002 Russell King + * Copyright (C) 2004 Hyok S. Choi (MPU support) + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#include + +/* + * Debugging stuff + * + * Note that these macros must not contain any code which is not + * 100% relocatable. Any attempt to do so will result in a crash. + * Please select one of the following when turning on debugging. + */ +#ifdef DEBUG + +#if defined(CONFIG_DEBUG_ICEDCC) + +#if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K) || defined(CONFIG_CPU_V7) + .macro loadsp, rb, tmp + .endm + .macro writeb, ch, rb + mcr p14, 0, \ch, c0, c5, 0 + .endm +#elif defined(CONFIG_CPU_XSCALE) + .macro loadsp, rb, tmp + .endm + .macro writeb, ch, rb + mcr p14, 0, \ch, c8, c0, 0 + .endm +#else + .macro loadsp, rb, tmp + .endm + .macro writeb, ch, rb + mcr p14, 0, \ch, c1, c0, 0 + .endm +#endif + +#else + +#include + + .macro writeb, ch, rb + senduart \ch, \rb + .endm + +#if defined(CONFIG_ARCH_SA1100) + .macro loadsp, rb, tmp + mov \rb, #0x80000000 @ physical base address +#ifdef CONFIG_DEBUG_LL_SER3 + add \rb, \rb, #0x00050000 @ Ser3 +#else + add \rb, \rb, #0x00010000 @ Ser1 +#endif + .endm +#elif defined(CONFIG_ARCH_S3C2410) + .macro loadsp, rb, tmp + mov \rb, #0x50000000 + add \rb, \rb, #0x4000 * CONFIG_S3C_LOWLEVEL_UART_PORT + .endm +#else + .macro loadsp, rb, tmp + addruart \rb, \tmp + .endm +#endif +#endif +#endif + + .macro kputc,val + mov r0, \val + bl putc + .endm + + .macro kphex,val,len + mov r0, \val + mov r1, #\len + bl phex + .endm + + .macro debug_reloc_start +#ifdef DEBUG + kputc #'\n' + kphex r6, 8 /* processor id */ + kputc #':' + kphex r7, 8 /* architecture id */ +#ifdef CONFIG_CPU_CP15 + kputc #':' + mrc p15, 0, r0, c1, c0 + kphex r0, 8 /* control reg */ +#endif + kputc #'\n' + kphex r5, 8 /* decompressed kernel start */ + kputc #'-' + kphex r9, 8 /* decompressed kernel end */ + kputc #'>' + kphex r4, 8 /* kernel execution address */ + kputc #'\n' +#endif + .endm + + .macro debug_reloc_end +#ifdef DEBUG + kphex r5, 8 /* end of kernel */ + kputc #'\n' + mov r0, r4 + bl memdump /* dump 256 bytes at start of kernel */ +#endif + .endm + + .section ".start", #alloc, #execinstr +/* + * sort out different calling conventions + */ + .align + .arm @ Always enter in ARM state +start: + .type start,#function + .rept 7 + mov r0, r0 + .endr + ARM( mov r0, r0 ) + ARM( b 1f ) + THUMB( adr r12, BSYM(1f) ) + THUMB( bx r12 ) + + .word 0x016f2818 @ Magic numbers to help the loader + .word start @ absolute load/run zImage address + .word _edata @ zImage end address + THUMB( .thumb ) +1: mov r7, r1 @ save architecture ID + mov r8, r2 @ save atags pointer + +#ifndef __ARM_ARCH_2__ + /* + * Booting from Angel - need to enter SVC mode and disable + * FIQs/IRQs (numeric definitions from angel arm.h source). + * We only do this if we were in user mode on entry. + */ + mrs r2, cpsr @ get current mode + tst r2, #3 @ not user? + bne not_angel + mov r0, #0x17 @ angel_SWIreason_EnterSVC + ARM( swi 0x123456 ) @ angel_SWI_ARM + THUMB( svc 0xab ) @ angel_SWI_THUMB +not_angel: + mrs r2, cpsr @ turn off interrupts to + orr r2, r2, #0xc0 @ prevent angel from running + msr cpsr_c, r2 +#else + teqp pc, #0x0c000003 @ turn off interrupts +#endif + + /* + * Note that some cache flushing and other stuff may + * be needed here - is there an Angel SWI call for this? + */ + + /* + * some architecture specific code can be inserted + * by the linker here, but it should preserve r7, r8, and r9. + */ + + .text + +#ifdef CONFIG_AUTO_ZRELADDR + @ determine final kernel image address + mov r4, pc + and r4, r4, #0xf8000000 + add r4, r4, #TEXT_OFFSET +#else + ldr r4, =zreladdr +#endif + + bl cache_on + +restart: adr r0, LC0 + ldmia r0, {r1, r2, r3, r6, r10, r11, r12} + ldr sp, [r0, #28] + + /* + * We might be running at a different address. We need + * to fix up various pointers. + */ + sub r0, r0, r1 @ calculate the delta offset + add r6, r6, r0 @ _edata + add r10, r10, r0 @ inflated kernel size location + + /* + * The kernel build system appends the size of the + * decompressed kernel at the end of the compressed data + * in little-endian form. + */ + ldrb r9, [r10, #0] + ldrb lr, [r10, #1] + orr r9, r9, lr, lsl #8 + ldrb lr, [r10, #2] + ldrb r10, [r10, #3] + orr r9, r9, lr, lsl #16 + orr r9, r9, r10, lsl #24 + +#ifndef CONFIG_ZBOOT_ROM + /* malloc space is above the relocated stack (64k max) */ + add sp, sp, r0 + add r10, sp, #0x10000 +#else + /* + * With ZBOOT_ROM the bss/stack is non relocatable, + * but someone could still run this code from RAM, + * in which case our reference is _edata. + */ + mov r10, r6 +#endif + +/* + * Check to see if we will overwrite ourselves. + * r4 = final kernel address + * r9 = size of decompressed image + * r10 = end of this image, including bss/stack/malloc space if non XIP + * We basically want: + * r4 - 16k page directory >= r10 -> OK + * r4 + image length <= current position (pc) -> OK + */ + add r10, r10, #16384 + cmp r4, r10 + bhs wont_overwrite + add r10, r4, r9 + ARM( cmp r10, pc ) + THUMB( mov lr, pc ) + THUMB( cmp r10, lr ) + bls wont_overwrite + +/* + * Relocate ourselves past the end of the decompressed kernel. + * r6 = _edata + * r10 = end of the decompressed kernel + * Because we always copy ahead, we need to do it from the end and go + * backward in case the source and destination overlap. + */ + /* + * Bump to the next 256-byte boundary with the size of + * the relocation code added. This avoids overwriting + * ourself when the offset is small. + */ + add r10, r10, #((reloc_code_end - restart + 256) & ~255) + bic r10, r10, #255 + + /* Get start of code we want to copy and align it down. */ + adr r5, restart + bic r5, r5, #31 + + sub r9, r6, r5 @ size to copy + add r9, r9, #31 @ rounded up to a multiple + bic r9, r9, #31 @ ... of 32 bytes + add r6, r9, r5 + add r9, r9, r10 + +1: ldmdb r6!, {r0 - r3, r10 - r12, lr} + cmp r6, r5 + stmdb r9!, {r0 - r3, r10 - r12, lr} + bhi 1b + + /* Preserve offset to relocated code. */ + sub r6, r9, r6 + +#ifndef CONFIG_ZBOOT_ROM + /* cache_clean_flush may use the stack, so relocate it */ + add sp, sp, r6 +#endif + + bl cache_clean_flush + + adr r0, BSYM(restart) + add r0, r0, r6 + mov pc, r0 + +wont_overwrite: +/* + * If delta is zero, we are running at the address we were linked at. + * r0 = delta + * r2 = BSS start + * r3 = BSS end + * r4 = kernel execution address + * r7 = architecture ID + * r8 = atags pointer + * r11 = GOT start + * r12 = GOT end + * sp = stack pointer + */ + teq r0, #0 + beq not_relocated + add r11, r11, r0 + add r12, r12, r0 + +#ifndef CONFIG_ZBOOT_ROM + /* + * If we're running fully PIC === CONFIG_ZBOOT_ROM = n, + * we need to fix up pointers into the BSS region. + * Note that the stack pointer has already been fixed up. + */ + add r2, r2, r0 + add r3, r3, r0 + + /* + * Relocate all entries in the GOT table. + */ +1: ldr r1, [r11, #0] @ relocate entries in the GOT + add r1, r1, r0 @ table. This fixes up the + str r1, [r11], #4 @ C references. + cmp r11, r12 + blo 1b +#else + + /* + * Relocate entries in the GOT table. We only relocate + * the entries that are outside the (relocated) BSS region. + */ +1: ldr r1, [r11, #0] @ relocate entries in the GOT + cmp r1, r2 @ entry < bss_start || + cmphs r3, r1 @ _end < entry + addlo r1, r1, r0 @ table. This fixes up the + str r1, [r11], #4 @ C references. + cmp r11, r12 + blo 1b +#endif + +not_relocated: mov r0, #0 +1: str r0, [r2], #4 @ clear bss + str r0, [r2], #4 + str r0, [r2], #4 + str r0, [r2], #4 + cmp r2, r3 + blo 1b + +/* + * The C runtime environment should now be setup sufficiently. + * Set up some pointers, and start decompressing. + * r4 = kernel execution address + * r7 = architecture ID + * r8 = atags pointer + */ + mov r0, r4 + mov r1, sp @ malloc space above stack + add r2, sp, #0x10000 @ 64k max + mov r3, r7 + bl decompress_kernel + bl cache_clean_flush + bl cache_off + mov r0, #0 @ must be zero + mov r1, r7 @ restore architecture number + mov r2, r8 @ restore atags pointer + mov pc, r4 @ call kernel + + .align 2 + .type LC0, #object +LC0: .word LC0 @ r1 + .word __bss_start @ r2 + .word _end @ r3 + .word _edata @ r6 + .word input_data_end - 4 @ r10 (inflated size location) + .word _got_start @ r11 + .word _got_end @ ip + .word .L_user_stack_end @ sp + .size LC0, . - LC0 + +#ifdef CONFIG_ARCH_RPC + .globl params +params: ldr r0, =0x10000100 @ params_phys for RPC + mov pc, lr + .ltorg + .align +#endif + +/* + * Turn on the cache. We need to setup some page tables so that we + * can have both the I and D caches on. + * + * We place the page tables 16k down from the kernel execution address, + * and we hope that nothing else is using it. If we're using it, we + * will go pop! + * + * On entry, + * r4 = kernel execution address + * r7 = architecture number + * r8 = atags pointer + * On exit, + * r0, r1, r2, r3, r9, r10, r12 corrupted + * This routine must preserve: + * r4, r7, r8 + */ + .align 5 +cache_on: mov r3, #8 @ cache_on function + b call_cache_fn + +/* + * Initialize the highest priority protection region, PR7 + * to cover all 32bit address and cacheable and bufferable. + */ +__armv4_mpu_cache_on: + mov r0, #0x3f @ 4G, the whole + mcr p15, 0, r0, c6, c7, 0 @ PR7 Area Setting + mcr p15, 0, r0, c6, c7, 1 + + mov r0, #0x80 @ PR7 + mcr p15, 0, r0, c2, c0, 0 @ D-cache on + mcr p15, 0, r0, c2, c0, 1 @ I-cache on + mcr p15, 0, r0, c3, c0, 0 @ write-buffer on + + mov r0, #0xc000 + mcr p15, 0, r0, c5, c0, 1 @ I-access permission + mcr p15, 0, r0, c5, c0, 0 @ D-access permission + + mov r0, #0 + mcr p15, 0, r0, c7, c10, 4 @ drain write buffer + mcr p15, 0, r0, c7, c5, 0 @ flush(inval) I-Cache + mcr p15, 0, r0, c7, c6, 0 @ flush(inval) D-Cache + mrc p15, 0, r0, c1, c0, 0 @ read control reg + @ ...I .... ..D. WC.M + orr r0, r0, #0x002d @ .... .... ..1. 11.1 + orr r0, r0, #0x1000 @ ...1 .... .... .... + + mcr p15, 0, r0, c1, c0, 0 @ write control reg + + mov r0, #0 + mcr p15, 0, r0, c7, c5, 0 @ flush(inval) I-Cache + mcr p15, 0, r0, c7, c6, 0 @ flush(inval) D-Cache + mov pc, lr + +__armv3_mpu_cache_on: + mov r0, #0x3f @ 4G, the whole + mcr p15, 0, r0, c6, c7, 0 @ PR7 Area Setting + + mov r0, #0x80 @ PR7 + mcr p15, 0, r0, c2, c0, 0 @ cache on + mcr p15, 0, r0, c3, c0, 0 @ write-buffer on + + mov r0, #0xc000 + mcr p15, 0, r0, c5, c0, 0 @ access permission + + mov r0, #0 + mcr p15, 0, r0, c7, c0, 0 @ invalidate whole cache v3 + /* + * ?? ARMv3 MMU does not allow reading the control register, + * does this really work on ARMv3 MPU? + */ + mrc p15, 0, r0, c1, c0, 0 @ read control reg + @ .... .... .... WC.M + orr r0, r0, #0x000d @ .... .... .... 11.1 + /* ?? this overwrites the value constructed above? */ + mov r0, #0 + mcr p15, 0, r0, c1, c0, 0 @ write control reg + + /* ?? invalidate for the second time? */ + mcr p15, 0, r0, c7, c0, 0 @ invalidate whole cache v3 + mov pc, lr + +__setup_mmu: sub r3, r4, #16384 @ Page directory size + bic r3, r3, #0xff @ Align the pointer + bic r3, r3, #0x3f00 +/* + * Initialise the page tables, turning on the cacheable and bufferable + * bits for the RAM area only. + */ + mov r0, r3 + mov r9, r0, lsr #18 + mov r9, r9, lsl #18 @ start of RAM + add r10, r9, #0x10000000 @ a reasonable RAM size + mov r1, #0x12 + orr r1, r1, #3 << 10 + add r2, r3, #16384 +1: cmp r1, r9 @ if virt > start of RAM +#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH + orrhs r1, r1, #0x08 @ set cacheable +#else + orrhs r1, r1, #0x0c @ set cacheable, bufferable +#endif + cmp r1, r10 @ if virt > end of RAM + bichs r1, r1, #0x0c @ clear cacheable, bufferable + str r1, [r0], #4 @ 1:1 mapping + add r1, r1, #1048576 + teq r0, r2 + bne 1b +/* + * If ever we are running from Flash, then we surely want the cache + * to be enabled also for our execution instance... We map 2MB of it + * so there is no map overlap problem for up to 1 MB compressed kernel. + * If the execution is in RAM then we would only be duplicating the above. + */ + mov r1, #0x1e + orr r1, r1, #3 << 10 + mov r2, pc + mov r2, r2, lsr #20 + orr r1, r1, r2, lsl #20 + add r0, r3, r2, lsl #2 + str r1, [r0], #4 + add r1, r1, #1048576 + str r1, [r0] + mov pc, lr +ENDPROC(__setup_mmu) + +__arm926ejs_mmu_cache_on: +#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH + mov r0, #4 @ put dcache in WT mode + mcr p15, 7, r0, c15, c0, 0 +#endif + +__armv4_mmu_cache_on: + mov r12, lr +#ifdef CONFIG_MMU + bl __setup_mmu + mov r0, #0 + mcr p15, 0, r0, c7, c10, 4 @ drain write buffer + mcr p15, 0, r0, c8, c7, 0 @ flush I,D TLBs + mrc p15, 0, r0, c1, c0, 0 @ read control reg + orr r0, r0, #0x5000 @ I-cache enable, RR cache replacement + orr r0, r0, #0x0030 +#ifdef CONFIG_CPU_ENDIAN_BE8 + orr r0, r0, #1 << 25 @ big-endian page tables +#endif + bl __common_mmu_cache_on + mov r0, #0 + mcr p15, 0, r0, c8, c7, 0 @ flush I,D TLBs +#endif + mov pc, r12 + +__armv7_mmu_cache_on: + mov r12, lr +#ifdef CONFIG_MMU + mrc p15, 0, r11, c0, c1, 4 @ read ID_MMFR0 + tst r11, #0xf @ VMSA + blne __setup_mmu + mov r0, #0 + mcr p15, 0, r0, c7, c10, 4 @ drain write buffer + tst r11, #0xf @ VMSA + mcrne p15, 0, r0, c8, c7, 0 @ flush I,D TLBs +#endif + mrc p15, 0, r0, c1, c0, 0 @ read control reg + orr r0, r0, #0x5000 @ I-cache enable, RR cache replacement + orr r0, r0, #0x003c @ write buffer +#ifdef CONFIG_MMU +#ifdef CONFIG_CPU_ENDIAN_BE8 + orr r0, r0, #1 << 25 @ big-endian page tables +#endif + orrne r0, r0, #1 @ MMU enabled + movne r1, #-1 + mcrne p15, 0, r3, c2, c0, 0 @ load page table pointer + mcrne p15, 0, r1, c3, c0, 0 @ load domain access control +#endif + mcr p15, 0, r0, c1, c0, 0 @ load control register + mrc p15, 0, r0, c1, c0, 0 @ and read it back + mov r0, #0 + mcr p15, 0, r0, c7, c5, 4 @ ISB + mov pc, r12 + +__fa526_cache_on: + mov r12, lr + bl __setup_mmu + mov r0, #0 + mcr p15, 0, r0, c7, c7, 0 @ Invalidate whole cache + mcr p15, 0, r0, c7, c10, 4 @ drain write buffer + mcr p15, 0, r0, c8, c7, 0 @ flush UTLB + mrc p15, 0, r0, c1, c0, 0 @ read control reg + orr r0, r0, #0x1000 @ I-cache enable + bl __common_mmu_cache_on + mov r0, #0 + mcr p15, 0, r0, c8, c7, 0 @ flush UTLB + mov pc, r12 + +__arm6_mmu_cache_on: + mov r12, lr + bl __setup_mmu + mov r0, #0 + mcr p15, 0, r0, c7, c0, 0 @ invalidate whole cache v3 + mcr p15, 0, r0, c5, c0, 0 @ invalidate whole TLB v3 + mov r0, #0x30 + bl __common_mmu_cache_on + mov r0, #0 + mcr p15, 0, r0, c5, c0, 0 @ invalidate whole TLB v3 + mov pc, r12 + +__common_mmu_cache_on: +#ifndef CONFIG_THUMB2_KERNEL +#ifndef DEBUG + orr r0, r0, #0x000d @ Write buffer, mmu +#endif + mov r1, #-1 + mcr p15, 0, r3, c2, c0, 0 @ load page table pointer + mcr p15, 0, r1, c3, c0, 0 @ load domain access control + b 1f + .align 5 @ cache line aligned +1: mcr p15, 0, r0, c1, c0, 0 @ load control register + mrc p15, 0, r0, c1, c0, 0 @ and read it back to + sub pc, lr, r0, lsr #32 @ properly flush pipeline +#endif + +#define PROC_ENTRY_SIZE (4*5) + +/* + * Here follow the relocatable cache support functions for the + * various processors. This is a generic hook for locating an + * entry and jumping to an instruction at the specified offset + * from the start of the block. Please note this is all position + * independent code. + * + * r1 = corrupted + * r2 = corrupted + * r3 = block offset + * r9 = corrupted + * r12 = corrupted + */ + +call_cache_fn: adr r12, proc_types +#ifdef CONFIG_CPU_CP15 + mrc p15, 0, r9, c0, c0 @ get processor ID +#else + ldr r9, =CONFIG_PROCESSOR_ID +#endif +1: ldr r1, [r12, #0] @ get value + ldr r2, [r12, #4] @ get mask + eor r1, r1, r9 @ (real ^ match) + tst r1, r2 @ & mask + ARM( addeq pc, r12, r3 ) @ call cache function + THUMB( addeq r12, r3 ) + THUMB( moveq pc, r12 ) @ call cache function + add r12, r12, #PROC_ENTRY_SIZE + b 1b + +/* + * Table for cache operations. This is basically: + * - CPU ID match + * - CPU ID mask + * - 'cache on' method instruction + * - 'cache off' method instruction + * - 'cache flush' method instruction + * + * We match an entry using: ((real_id ^ match) & mask) == 0 + * + * Writethrough caches generally only need 'on' and 'off' + * methods. Writeback caches _must_ have the flush method + * defined. + */ + .align 2 + .type proc_types,#object +proc_types: + .word 0x41560600 @ ARM6/610 + .word 0xffffffe0 + W(b) __arm6_mmu_cache_off @ works, but slow + W(b) __arm6_mmu_cache_off + mov pc, lr + THUMB( nop ) +@ b __arm6_mmu_cache_on @ untested +@ b __arm6_mmu_cache_off +@ b __armv3_mmu_cache_flush + +#if !defined(CONFIG_CPU_V7) + /* This collides with some V7 IDs, preventing correct detection */ + .word 0x00000000 @ old ARM ID + .word 0x0000f000 + mov pc, lr + THUMB( nop ) + mov pc, lr + THUMB( nop ) + mov pc, lr + THUMB( nop ) +#endif + + .word 0x41007000 @ ARM7/710 + .word 0xfff8fe00 + W(b) __arm7_mmu_cache_off + W(b) __arm7_mmu_cache_off + mov pc, lr + THUMB( nop ) + + .word 0x41807200 @ ARM720T (writethrough) + .word 0xffffff00 + W(b) __armv4_mmu_cache_on + W(b) __armv4_mmu_cache_off + mov pc, lr + THUMB( nop ) + + .word 0x41007400 @ ARM74x + .word 0xff00ff00 + W(b) __armv3_mpu_cache_on + W(b) __armv3_mpu_cache_off + W(b) __armv3_mpu_cache_flush + + .word 0x41009400 @ ARM94x + .word 0xff00ff00 + W(b) __armv4_mpu_cache_on + W(b) __armv4_mpu_cache_off + W(b) __armv4_mpu_cache_flush + + .word 0x41069260 @ ARM926EJ-S (v5TEJ) + .word 0xff0ffff0 + W(b) __arm926ejs_mmu_cache_on + W(b) __armv4_mmu_cache_off + W(b) __armv5tej_mmu_cache_flush + + .word 0x00007000 @ ARM7 IDs + .word 0x0000f000 + mov pc, lr + THUMB( nop ) + mov pc, lr + THUMB( nop ) + mov pc, lr + THUMB( nop ) + + @ Everything from here on will be the new ID system. + + .word 0x4401a100 @ sa110 / sa1100 + .word 0xffffffe0 + W(b) __armv4_mmu_cache_on + W(b) __armv4_mmu_cache_off + W(b) __armv4_mmu_cache_flush + + .word 0x6901b110 @ sa1110 + .word 0xfffffff0 + W(b) __armv4_mmu_cache_on + W(b) __armv4_mmu_cache_off + W(b) __armv4_mmu_cache_flush + + .word 0x56056900 + .word 0xffffff00 @ PXA9xx + W(b) __armv4_mmu_cache_on + W(b) __armv4_mmu_cache_off + W(b) __armv4_mmu_cache_flush + + .word 0x56158000 @ PXA168 + .word 0xfffff000 + W(b) __armv4_mmu_cache_on + W(b) __armv4_mmu_cache_off + W(b) __armv5tej_mmu_cache_flush + + .word 0x56050000 @ Feroceon + .word 0xff0f0000 + W(b) __armv4_mmu_cache_on + W(b) __armv4_mmu_cache_off + W(b) __armv5tej_mmu_cache_flush + +#ifdef CONFIG_CPU_FEROCEON_OLD_ID + /* this conflicts with the standard ARMv5TE entry */ + .long 0x41009260 @ Old Feroceon + .long 0xff00fff0 + b __armv4_mmu_cache_on + b __armv4_mmu_cache_off + b __armv5tej_mmu_cache_flush +#endif + + .word 0x66015261 @ FA526 + .word 0xff01fff1 + W(b) __fa526_cache_on + W(b) __armv4_mmu_cache_off + W(b) __fa526_cache_flush + + @ These match on the architecture ID + + .word 0x00020000 @ ARMv4T + .word 0x000f0000 + W(b) __armv4_mmu_cache_on + W(b) __armv4_mmu_cache_off + W(b) __armv4_mmu_cache_flush + + .word 0x00050000 @ ARMv5TE + .word 0x000f0000 + W(b) __armv4_mmu_cache_on + W(b) __armv4_mmu_cache_off + W(b) __armv4_mmu_cache_flush + + .word 0x00060000 @ ARMv5TEJ + .word 0x000f0000 + W(b) __armv4_mmu_cache_on + W(b) __armv4_mmu_cache_off + W(b) __armv5tej_mmu_cache_flush + + .word 0x0007b000 @ ARMv6 + .word 0x000ff000 + W(b) __armv4_mmu_cache_on + W(b) __armv4_mmu_cache_off + W(b) __armv6_mmu_cache_flush + + .word 0x000f0000 @ new CPU Id + .word 0x000f0000 + W(b) __armv7_mmu_cache_on + W(b) __armv7_mmu_cache_off + W(b) __armv7_mmu_cache_flush + + .word 0 @ unrecognised type + .word 0 + mov pc, lr + THUMB( nop ) + mov pc, lr + THUMB( nop ) + mov pc, lr + THUMB( nop ) + + .size proc_types, . - proc_types + + /* + * If you get a "non-constant expression in ".if" statement" + * error from the assembler on this line, check that you have + * not accidentally written a "b" instruction where you should + * have written W(b). + */ + .if (. - proc_types) % PROC_ENTRY_SIZE != 0 + .error "The size of one or more proc_types entries is wrong." + .endif + +/* + * Turn off the Cache and MMU. ARMv3 does not support + * reading the control register, but ARMv4 does. + * + * On exit, + * r0, r1, r2, r3, r9, r12 corrupted + * This routine must preserve: + * r4, r7, r8 + */ + .align 5 +cache_off: mov r3, #12 @ cache_off function + b call_cache_fn + +__armv4_mpu_cache_off: + mrc p15, 0, r0, c1, c0 + bic r0, r0, #0x000d + mcr p15, 0, r0, c1, c0 @ turn MPU and cache off + mov r0, #0 + mcr p15, 0, r0, c7, c10, 4 @ drain write buffer + mcr p15, 0, r0, c7, c6, 0 @ flush D-Cache + mcr p15, 0, r0, c7, c5, 0 @ flush I-Cache + mov pc, lr + +__armv3_mpu_cache_off: + mrc p15, 0, r0, c1, c0 + bic r0, r0, #0x000d + mcr p15, 0, r0, c1, c0, 0 @ turn MPU and cache off + mov r0, #0 + mcr p15, 0, r0, c7, c0, 0 @ invalidate whole cache v3 + mov pc, lr + +__armv4_mmu_cache_off: +#ifdef CONFIG_MMU + mrc p15, 0, r0, c1, c0 + bic r0, r0, #0x000d + mcr p15, 0, r0, c1, c0 @ turn MMU and cache off + mov r0, #0 + mcr p15, 0, r0, c7, c7 @ invalidate whole cache v4 + mcr p15, 0, r0, c8, c7 @ invalidate whole TLB v4 +#endif + mov pc, lr + +__armv7_mmu_cache_off: + mrc p15, 0, r0, c1, c0 +#ifdef CONFIG_MMU + bic r0, r0, #0x000d +#else + bic r0, r0, #0x000c +#endif + mcr p15, 0, r0, c1, c0 @ turn MMU and cache off + mov r12, lr + bl __armv7_mmu_cache_flush + mov r0, #0 +#ifdef CONFIG_MMU + mcr p15, 0, r0, c8, c7, 0 @ invalidate whole TLB +#endif + mcr p15, 0, r0, c7, c5, 6 @ invalidate BTC + mcr p15, 0, r0, c7, c10, 4 @ DSB + mcr p15, 0, r0, c7, c5, 4 @ ISB + mov pc, r12 + +__arm6_mmu_cache_off: + mov r0, #0x00000030 @ ARM6 control reg. + b __armv3_mmu_cache_off + +__arm7_mmu_cache_off: + mov r0, #0x00000070 @ ARM7 control reg. + b __armv3_mmu_cache_off + +__armv3_mmu_cache_off: + mcr p15, 0, r0, c1, c0, 0 @ turn MMU and cache off + mov r0, #0 + mcr p15, 0, r0, c7, c0, 0 @ invalidate whole cache v3 + mcr p15, 0, r0, c5, c0, 0 @ invalidate whole TLB v3 + mov pc, lr + +/* + * Clean and flush the cache to maintain consistency. + * + * On exit, + * r1, r2, r3, r9, r10, r11, r12 corrupted + * This routine must preserve: + * r4, r6, r7, r8 + */ + .align 5 +cache_clean_flush: + mov r3, #16 + b call_cache_fn + +__armv4_mpu_cache_flush: + mov r2, #1 + mov r3, #0 + mcr p15, 0, ip, c7, c6, 0 @ invalidate D cache + mov r1, #7 << 5 @ 8 segments +1: orr r3, r1, #63 << 26 @ 64 entries +2: mcr p15, 0, r3, c7, c14, 2 @ clean & invalidate D index + subs r3, r3, #1 << 26 + bcs 2b @ entries 63 to 0 + subs r1, r1, #1 << 5 + bcs 1b @ segments 7 to 0 + + teq r2, #0 + mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache + mcr p15, 0, ip, c7, c10, 4 @ drain WB + mov pc, lr + +__fa526_cache_flush: + mov r1, #0 + mcr p15, 0, r1, c7, c14, 0 @ clean and invalidate D cache + mcr p15, 0, r1, c7, c5, 0 @ flush I cache + mcr p15, 0, r1, c7, c10, 4 @ drain WB + mov pc, lr + +__armv6_mmu_cache_flush: + mov r1, #0 + mcr p15, 0, r1, c7, c14, 0 @ clean+invalidate D + mcr p15, 0, r1, c7, c5, 0 @ invalidate I+BTB + mcr p15, 0, r1, c7, c15, 0 @ clean+invalidate unified + mcr p15, 0, r1, c7, c10, 4 @ drain WB + mov pc, lr + +__armv7_mmu_cache_flush: + mrc p15, 0, r10, c0, c1, 5 @ read ID_MMFR1 + tst r10, #0xf << 16 @ hierarchical cache (ARMv7) + mov r10, #0 + beq hierarchical + mcr p15, 0, r10, c7, c14, 0 @ clean+invalidate D + b iflush +hierarchical: + mcr p15, 0, r10, c7, c10, 5 @ DMB + stmfd sp!, {r0-r7, r9-r11} + mrc p15, 1, r0, c0, c0, 1 @ read clidr + ands r3, r0, #0x7000000 @ extract loc from clidr + mov r3, r3, lsr #23 @ left align loc bit field + beq finished @ if loc is 0, then no need to clean + mov r10, #0 @ start clean at cache level 0 +loop1: + add r2, r10, r10, lsr #1 @ work out 3x current cache level + mov r1, r0, lsr r2 @ extract cache type bits from clidr + and r1, r1, #7 @ mask of the bits for current cache only + cmp r1, #2 @ see what cache we have at this level + blt skip @ skip if no cache, or just i-cache + mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr + mcr p15, 0, r10, c7, c5, 4 @ isb to sych the new cssr&csidr + mrc p15, 1, r1, c0, c0, 0 @ read the new csidr + and r2, r1, #7 @ extract the length of the cache lines + add r2, r2, #4 @ add 4 (line length offset) + ldr r4, =0x3ff + ands r4, r4, r1, lsr #3 @ find maximum number on the way size + clz r5, r4 @ find bit position of way size increment + ldr r7, =0x7fff + ands r7, r7, r1, lsr #13 @ extract max number of the index size +loop2: + mov r9, r4 @ create working copy of max way size +loop3: + ARM( orr r11, r10, r9, lsl r5 ) @ factor way and cache number into r11 + ARM( orr r11, r11, r7, lsl r2 ) @ factor index number into r11 + THUMB( lsl r6, r9, r5 ) + THUMB( orr r11, r10, r6 ) @ factor way and cache number into r11 + THUMB( lsl r6, r7, r2 ) + THUMB( orr r11, r11, r6 ) @ factor index number into r11 + mcr p15, 0, r11, c7, c14, 2 @ clean & invalidate by set/way + subs r9, r9, #1 @ decrement the way + bge loop3 + subs r7, r7, #1 @ decrement the index + bge loop2 +skip: + add r10, r10, #2 @ increment cache number + cmp r3, r10 + bgt loop1 +finished: + ldmfd sp!, {r0-r7, r9-r11} + mov r10, #0 @ swith back to cache level 0 + mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr +iflush: + mcr p15, 0, r10, c7, c10, 4 @ DSB + mcr p15, 0, r10, c7, c5, 0 @ invalidate I+BTB + mcr p15, 0, r10, c7, c10, 4 @ DSB + mcr p15, 0, r10, c7, c5, 4 @ ISB + mov pc, lr + +__armv5tej_mmu_cache_flush: +1: mrc p15, 0, r15, c7, c14, 3 @ test,clean,invalidate D cache + bne 1b + mcr p15, 0, r0, c7, c5, 0 @ flush I cache + mcr p15, 0, r0, c7, c10, 4 @ drain WB + mov pc, lr + +__armv4_mmu_cache_flush: + mov r2, #64*1024 @ default: 32K dcache size (*2) + mov r11, #32 @ default: 32 byte line size + mrc p15, 0, r3, c0, c0, 1 @ read cache type + teq r3, r9 @ cache ID register present? + beq no_cache_id + mov r1, r3, lsr #18 + and r1, r1, #7 + mov r2, #1024 + mov r2, r2, lsl r1 @ base dcache size *2 + tst r3, #1 << 14 @ test M bit + addne r2, r2, r2, lsr #1 @ +1/2 size if M == 1 + mov r3, r3, lsr #12 + and r3, r3, #3 + mov r11, #8 + mov r11, r11, lsl r3 @ cache line size in bytes +no_cache_id: + mov r1, pc + bic r1, r1, #63 @ align to longest cache line + add r2, r1, r2 +1: + ARM( ldr r3, [r1], r11 ) @ s/w flush D cache + THUMB( ldr r3, [r1] ) @ s/w flush D cache + THUMB( add r1, r1, r11 ) + teq r1, r2 + bne 1b + + mcr p15, 0, r1, c7, c5, 0 @ flush I cache + mcr p15, 0, r1, c7, c6, 0 @ flush D cache + mcr p15, 0, r1, c7, c10, 4 @ drain WB + mov pc, lr + +__armv3_mmu_cache_flush: +__armv3_mpu_cache_flush: + mov r1, #0 + mcr p15, 0, r1, c7, c0, 0 @ invalidate whole cache v3 + mov pc, lr + +/* + * Various debugging routines for printing hex characters and + * memory, which again must be relocatable. + */ +#ifdef DEBUG + .align 2 + .type phexbuf,#object +phexbuf: .space 12 + .size phexbuf, . - phexbuf + +@ phex corrupts {r0, r1, r2, r3} +phex: adr r3, phexbuf + mov r2, #0 + strb r2, [r3, r1] +1: subs r1, r1, #1 + movmi r0, r3 + bmi puts + and r2, r0, #15 + mov r0, r0, lsr #4 + cmp r2, #10 + addge r2, r2, #7 + add r2, r2, #'0' + strb r2, [r3, r1] + b 1b + +@ puts corrupts {r0, r1, r2, r3} +puts: loadsp r3, r1 +1: ldrb r2, [r0], #1 + teq r2, #0 + moveq pc, lr +2: writeb r2, r3 + mov r1, #0x00020000 +3: subs r1, r1, #1 + bne 3b + teq r2, #'\n' + moveq r2, #'\r' + beq 2b + teq r0, #0 + bne 1b + mov pc, lr +@ putc corrupts {r0, r1, r2, r3} +putc: + mov r2, r0 + mov r0, #0 + loadsp r3, r1 + b 2b + +@ memdump corrupts {r0, r1, r2, r3, r10, r11, r12, lr} +memdump: mov r12, r0 + mov r10, lr + mov r11, #0 +2: mov r0, r11, lsl #2 + add r0, r0, r12 + mov r1, #8 + bl phex + mov r0, #':' + bl putc +1: mov r0, #' ' + bl putc + ldr r0, [r12, r11, lsl #2] + mov r1, #8 + bl phex + and r0, r11, #7 + teq r0, #3 + moveq r0, #' ' + bleq putc + and r0, r11, #7 + add r11, r11, #1 + teq r0, #7 + bne 1b + mov r0, #'\n' + bl putc + cmp r11, #64 + blt 2b + mov pc, r10 +#endif + + .ltorg +reloc_code_end: + + .align + .section ".stack", "aw", %nobits +.L_user_stack: .space 4096 +.L_user_stack_end: diff --git a/arch/arm/boot/compressed/ll_char_wr.S b/arch/arm/boot/compressed/ll_char_wr.S new file mode 100644 index 00000000..8517c860 --- /dev/null +++ b/arch/arm/boot/compressed/ll_char_wr.S @@ -0,0 +1,134 @@ +/* + * linux/arch/arm/lib/ll_char_wr.S + * + * Copyright (C) 1995, 1996 Russell King. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * Speedups & 1bpp code (C) 1996 Philip Blundell & Russell King. + * + * 10-04-96 RMK Various cleanups & reduced register usage. + * 08-04-98 RMK Shifts re-ordered + */ + +@ Regs: [] = corruptible +@ {} = used +@ () = do not use + +#include +#include + .text + +LC0: .word LC0 + .word bytes_per_char_h + .word video_size_row + .word acorndata_8x8 + .word con_charconvtable + +/* + * r0 = ptr + * r1 = char + * r2 = white + */ +ENTRY(ll_write_char) + stmfd sp!, {r4 - r7, lr} +@ +@ Smashable regs: {r0 - r3}, [r4 - r7], (r8 - fp), [ip], (sp), [lr], (pc) +@ + /* + * calculate offset into character table + */ + mov r1, r1, lsl #3 + /* + * calculate offset required for each row. + */ + adr ip, LC0 + ldmia ip, {r3, r4, r5, r6, lr} + sub ip, ip, r3 + add r6, r6, ip + add lr, lr, ip + ldr r4, [r4, ip] + ldr r5, [r5, ip] + /* + * Go to resolution-dependent routine... + */ + cmp r4, #4 + blt Lrow1bpp + add r0, r0, r5, lsl #3 @ Move to bottom of character + orr r1, r1, #7 + ldrb r7, [r6, r1] + teq r4, #8 + beq Lrow8bpplp +@ +@ Smashable regs: {r0 - r3}, [r4], {r5 - r7}, (r8 - fp), [ip], (sp), {lr}, (pc) +@ +Lrow4bpplp: + ldr r7, [lr, r7, lsl #2] + mul r7, r2, r7 + sub r1, r1, #1 @ avoid using r7 directly after + str r7, [r0, -r5]! + ldrb r7, [r6, r1] + ldr r7, [lr, r7, lsl #2] + mul r7, r2, r7 + tst r1, #7 @ avoid using r7 directly after + str r7, [r0, -r5]! + subne r1, r1, #1 + ldrneb r7, [r6, r1] + bne Lrow4bpplp + ldmfd sp!, {r4 - r7, pc} + +@ +@ Smashable regs: {r0 - r3}, [r4], {r5 - r7}, (r8 - fp), [ip], (sp), {lr}, (pc) +@ +Lrow8bpplp: + mov ip, r7, lsr #4 + ldr ip, [lr, ip, lsl #2] + mul r4, r2, ip + and ip, r7, #15 @ avoid r4 + ldr ip, [lr, ip, lsl #2] @ avoid r4 + mul ip, r2, ip @ avoid r4 + sub r1, r1, #1 @ avoid ip + sub r0, r0, r5 @ avoid ip + stmia r0, {r4, ip} + ldrb r7, [r6, r1] + mov ip, r7, lsr #4 + ldr ip, [lr, ip, lsl #2] + mul r4, r2, ip + and ip, r7, #15 @ avoid r4 + ldr ip, [lr, ip, lsl #2] @ avoid r4 + mul ip, r2, ip @ avoid r4 + tst r1, #7 @ avoid ip + sub r0, r0, r5 @ avoid ip + stmia r0, {r4, ip} + subne r1, r1, #1 + ldrneb r7, [r6, r1] + bne Lrow8bpplp + ldmfd sp!, {r4 - r7, pc} + +@ +@ Smashable regs: {r0 - r3}, [r4], {r5, r6}, [r7], (r8 - fp), [ip], (sp), [lr], (pc) +@ +Lrow1bpp: + add r6, r6, r1 + ldmia r6, {r4, r7} + strb r4, [r0], r5 + mov r4, r4, lsr #8 + strb r4, [r0], r5 + mov r4, r4, lsr #8 + strb r4, [r0], r5 + mov r4, r4, lsr #8 + strb r4, [r0], r5 + strb r7, [r0], r5 + mov r7, r7, lsr #8 + strb r7, [r0], r5 + mov r7, r7, lsr #8 + strb r7, [r0], r5 + mov r7, r7, lsr #8 + strb r7, [r0], r5 + ldmfd sp!, {r4 - r7, pc} + + .bss +ENTRY(con_charconvtable) + .space 1024 diff --git a/arch/arm/boot/compressed/misc.c b/arch/arm/boot/compressed/misc.c new file mode 100644 index 00000000..832d3723 --- /dev/null +++ b/arch/arm/boot/compressed/misc.c @@ -0,0 +1,194 @@ +/* + * misc.c + * + * This is a collection of several routines from gzip-1.0.3 + * adapted for Linux. + * + * malloc by Hannu Savolainen 1993 and Matthias Urlichs 1994 + * + * Modified for ARM Linux by Russell King + * + * Nicolas Pitre 1999/04/14 : + * For this code to run directly from Flash, all constant variables must + * be marked with 'const' and all other variables initialized at run-time + * only. This way all non constant variables will end up in the bss segment, + * which should point to addresses in RAM and cleared to 0 on start. + * This allows for a much quicker boot time. + */ + +unsigned int __machine_arch_type; + +#define _LINUX_STRING_H_ + +#include /* for inline */ +#include /* for size_t */ +#include /* for NULL */ +#include +#include + + +static void putstr(const char *ptr); +extern void error(char *x); + +#include + +#ifdef CONFIG_DEBUG_ICEDCC + +#if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K) || defined(CONFIG_CPU_V7) + +static void icedcc_putc(int ch) +{ + int status, i = 0x4000000; + + do { + if (--i < 0) + return; + + asm volatile ("mrc p14, 0, %0, c0, c1, 0" : "=r" (status)); + } while (status & (1 << 29)); + + asm("mcr p14, 0, %0, c0, c5, 0" : : "r" (ch)); +} + + +#elif defined(CONFIG_CPU_XSCALE) + +static void icedcc_putc(int ch) +{ + int status, i = 0x4000000; + + do { + if (--i < 0) + return; + + asm volatile ("mrc p14, 0, %0, c14, c0, 0" : "=r" (status)); + } while (status & (1 << 28)); + + asm("mcr p14, 0, %0, c8, c0, 0" : : "r" (ch)); +} + +#else + +static void icedcc_putc(int ch) +{ + int status, i = 0x4000000; + + do { + if (--i < 0) + return; + + asm volatile ("mrc p14, 0, %0, c0, c0, 0" : "=r" (status)); + } while (status & 2); + + asm("mcr p14, 0, %0, c1, c0, 0" : : "r" (ch)); +} + +#endif + +#define putc(ch) icedcc_putc(ch) +#endif + +static void putstr(const char *ptr) +{ + char c; + + while ((c = *ptr++) != '\0') { + if (c == '\n') + putc('\r'); + putc(c); + } + + flush(); +} + + +void *memcpy(void *__dest, __const void *__src, size_t __n) +{ + int i = 0; + unsigned char *d = (unsigned char *)__dest, *s = (unsigned char *)__src; + + for (i = __n >> 3; i > 0; i--) { + *d++ = *s++; + *d++ = *s++; + *d++ = *s++; + *d++ = *s++; + *d++ = *s++; + *d++ = *s++; + *d++ = *s++; + *d++ = *s++; + } + + if (__n & 1 << 2) { + *d++ = *s++; + *d++ = *s++; + *d++ = *s++; + *d++ = *s++; + } + + if (__n & 1 << 1) { + *d++ = *s++; + *d++ = *s++; + } + + if (__n & 1) + *d++ = *s++; + + return __dest; +} + +/* + * gzip declarations + */ +extern char input_data[]; +extern char input_data_end[]; + +unsigned char *output_data; + +unsigned long free_mem_ptr; +unsigned long free_mem_end_ptr; + +#ifndef arch_error +#define arch_error(x) +#endif + +void error(char *x) +{ + arch_error(x); + + putstr("\n\n"); + putstr(x); + putstr("\n\n -- System halted"); + + while(1); /* Halt */ +} + +asmlinkage void __div0(void) +{ + error("Attempting division by 0!"); +} + +extern int do_decompress(u8 *input, int len, u8 *output, void (*error)(char *x)); + + +void +decompress_kernel(unsigned long output_start, unsigned long free_mem_ptr_p, + unsigned long free_mem_ptr_end_p, + int arch_id) +{ + int ret; + + output_data = (unsigned char *)output_start; + free_mem_ptr = free_mem_ptr_p; + free_mem_end_ptr = free_mem_ptr_end_p; + __machine_arch_type = arch_id; + + arch_decomp_setup(); + + putstr("Uncompressing Linux..."); + ret = do_decompress(input_data, input_data_end - input_data, + output_data, error); + if (ret) + error("decompressor returned an error"); + else + putstr(" done, booting the kernel.\n"); +} diff --git a/arch/arm/boot/compressed/mmcif-sh7372.c b/arch/arm/boot/compressed/mmcif-sh7372.c new file mode 100644 index 00000000..7453c833 --- /dev/null +++ b/arch/arm/boot/compressed/mmcif-sh7372.c @@ -0,0 +1,88 @@ +/* + * sh7372 MMCIF loader + * + * Copyright (C) 2010 Magnus Damm + * Copyright (C) 2010 Simon Horman + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ + +#include +#include +#include + +#define MMCIF_BASE (void __iomem *)0xe6bd0000 + +#define PORT84CR (void __iomem *)0xe6050054 +#define PORT85CR (void __iomem *)0xe6050055 +#define PORT86CR (void __iomem *)0xe6050056 +#define PORT87CR (void __iomem *)0xe6050057 +#define PORT88CR (void __iomem *)0xe6050058 +#define PORT89CR (void __iomem *)0xe6050059 +#define PORT90CR (void __iomem *)0xe605005a +#define PORT91CR (void __iomem *)0xe605005b +#define PORT92CR (void __iomem *)0xe605005c +#define PORT99CR (void __iomem *)0xe6050063 + +#define SMSTPCR3 (void __iomem *)0xe615013c + +/* SH7372 specific MMCIF loader + * + * loads the zImage from an MMC card starting from block 1. + * + * The image must be start with a vrl4 header and + * the zImage must start at offset 512 of the image. That is, + * at block 2 (=byte 1024) on the media + * + * Use the following line to write the vrl4 formated zImage + * to an MMC card + * # dd if=vrl4.out of=/dev/sdx bs=512 seek=1 + */ +asmlinkage void mmcif_loader(unsigned char *buf, unsigned long len) +{ + mmc_init_progress(); + mmc_update_progress(MMC_PROGRESS_ENTER); + + /* Initialise MMC + * registers: PORT84CR-PORT92CR + * (MMCD0_0-MMCD0_7,MMCCMD0 Control) + * value: 0x04 - select function 4 + */ + __raw_writeb(0x04, PORT84CR); + __raw_writeb(0x04, PORT85CR); + __raw_writeb(0x04, PORT86CR); + __raw_writeb(0x04, PORT87CR); + __raw_writeb(0x04, PORT88CR); + __raw_writeb(0x04, PORT89CR); + __raw_writeb(0x04, PORT90CR); + __raw_writeb(0x04, PORT91CR); + __raw_writeb(0x04, PORT92CR); + + /* Initialise MMC + * registers: PORT99CR (MMCCLK0 Control) + * value: 0x10 | 0x04 - enable output | select function 4 + */ + __raw_writeb(0x14, PORT99CR); + + /* Enable clock to MMC hardware block */ + __raw_writel(__raw_readl(SMSTPCR3) & ~(1 << 12), SMSTPCR3); + + mmc_update_progress(MMC_PROGRESS_INIT); + + /* setup MMCIF hardware */ + sh_mmcif_boot_init(MMCIF_BASE); + + mmc_update_progress(MMC_PROGRESS_LOAD); + + /* load kernel via MMCIF interface */ + sh_mmcif_boot_do_read(MMCIF_BASE, 2, /* Kernel is at block 2 */ + (len + SH_MMCIF_BBS - 1) / SH_MMCIF_BBS, buf); + + + /* Disable clock to MMC hardware block */ + __raw_writel(__raw_readl(SMSTPCR3) & (1 << 12), SMSTPCR3); + + mmc_update_progress(MMC_PROGRESS_DONE); +} diff --git a/arch/arm/boot/compressed/ofw-shark.c b/arch/arm/boot/compressed/ofw-shark.c new file mode 100644 index 00000000..465c54b6 --- /dev/null +++ b/arch/arm/boot/compressed/ofw-shark.c @@ -0,0 +1,260 @@ +/* + * linux/arch/arm/boot/compressed/ofw-shark.c + * + * by Alexander Schulz + * + * This file is used to get some basic information + * about the memory layout of the shark we are running + * on. Memory is usually divided in blocks a 8 MB. + * And bootargs are copied from OpenFirmware. + */ + + +#include +#include +#include +#include + + +asmlinkage void +create_params (unsigned long *buffer) +{ + /* Is there a better address? Also change in mach-shark/core.c */ + struct tag *tag = (struct tag *) 0x08003000; + int j,i,m,k,nr_banks,size; + unsigned char *c; + + k = 0; + + /* Head of the taglist */ + tag->hdr.tag = ATAG_CORE; + tag->hdr.size = tag_size(tag_core); + tag->u.core.flags = 1; + tag->u.core.pagesize = PAGE_SIZE; + tag->u.core.rootdev = 0; + + /* Build up one tagged block for each memory region */ + size=0; + nr_banks=(unsigned int) buffer[0]; + for (j=0;jhdr.tag = ATAG_MEM; + tag->hdr.size = tag_size(tag_mem32); + tag->u.mem.size = buffer[2*k+2]; + tag->u.mem.start = buffer[2*k+1]; + + size += buffer[2*k+2]; + + buffer[2*k+1]=0xffffffff; /* mark as copied */ + } + + /* The command line */ + tag = tag_next(tag); + tag->hdr.tag = ATAG_CMDLINE; + + c=(unsigned char *)(&buffer[34]); + j=0; + while (*c) tag->u.cmdline.cmdline[j++]=*c++; + + tag->u.cmdline.cmdline[j]=0; + tag->hdr.size = (j + 7 + sizeof(struct tag_header)) >> 2; + + /* Hardware revision */ + tag = tag_next(tag); + tag->hdr.tag = ATAG_REVISION; + tag->hdr.size = tag_size(tag_revision); + tag->u.revision.rev = ((unsigned char) buffer[33])-'0'; + + /* End of the taglist */ + tag = tag_next(tag); + tag->hdr.tag = 0; + tag->hdr.size = 0; +} + + +typedef int (*ofw_handle_t)(void *); + +/* Everything below is called with a wrong MMU setting. + * This means: no string constants, no initialization of + * arrays, no global variables! This is ugly but I didn't + * want to write this in assembler :-) + */ + +int +of_decode_int(const unsigned char *p) +{ + unsigned int i = *p++ << 8; + i = (i + *p++) << 8; + i = (i + *p++) << 8; + return (i + *p); +} + +int +OF_finddevice(ofw_handle_t openfirmware, char *name) +{ + unsigned int args[8]; + char service[12]; + + service[0]='f'; + service[1]='i'; + service[2]='n'; + service[3]='d'; + service[4]='d'; + service[5]='e'; + service[6]='v'; + service[7]='i'; + service[8]='c'; + service[9]='e'; + service[10]='\0'; + + args[0]=(unsigned int)service; + args[1]=1; + args[2]=1; + args[3]=(unsigned int)name; + + if (openfirmware(args) == -1) + return -1; + return args[4]; +} + +int +OF_getproplen(ofw_handle_t openfirmware, int handle, char *prop) +{ + unsigned int args[8]; + char service[12]; + + service[0]='g'; + service[1]='e'; + service[2]='t'; + service[3]='p'; + service[4]='r'; + service[5]='o'; + service[6]='p'; + service[7]='l'; + service[8]='e'; + service[9]='n'; + service[10]='\0'; + + args[0] = (unsigned int)service; + args[1] = 2; + args[2] = 1; + args[3] = (unsigned int)handle; + args[4] = (unsigned int)prop; + + if (openfirmware(args) == -1) + return -1; + return args[5]; +} + +int +OF_getprop(ofw_handle_t openfirmware, int handle, char *prop, void *buf, unsigned int buflen) +{ + unsigned int args[8]; + char service[8]; + + service[0]='g'; + service[1]='e'; + service[2]='t'; + service[3]='p'; + service[4]='r'; + service[5]='o'; + service[6]='p'; + service[7]='\0'; + + args[0] = (unsigned int)service; + args[1] = 4; + args[2] = 1; + args[3] = (unsigned int)handle; + args[4] = (unsigned int)prop; + args[5] = (unsigned int)buf; + args[6] = buflen; + + if (openfirmware(args) == -1) + return -1; + return args[7]; +} + +asmlinkage void ofw_init(ofw_handle_t o, int *nomr, int *pointer) +{ + int phandle,i,mem_len,buffer[32]; + char temp[15]; + + temp[0]='/'; + temp[1]='m'; + temp[2]='e'; + temp[3]='m'; + temp[4]='o'; + temp[5]='r'; + temp[6]='y'; + temp[7]='\0'; + + phandle=OF_finddevice(o,temp); + + temp[0]='r'; + temp[1]='e'; + temp[2]='g'; + temp[3]='\0'; + + mem_len = OF_getproplen(o,phandle, temp); + OF_getprop(o,phandle, temp, buffer, mem_len); + *nomr=mem_len >> 3; + + for (i=0; i<=mem_len/4; i++) pointer[i]=of_decode_int((const unsigned char *)&buffer[i]); + + temp[0]='/'; + temp[1]='c'; + temp[2]='h'; + temp[3]='o'; + temp[4]='s'; + temp[5]='e'; + temp[6]='n'; + temp[7]='\0'; + + phandle=OF_finddevice(o,temp); + + temp[0]='b'; + temp[1]='o'; + temp[2]='o'; + temp[3]='t'; + temp[4]='a'; + temp[5]='r'; + temp[6]='g'; + temp[7]='s'; + temp[8]='\0'; + + mem_len = OF_getproplen(o,phandle, temp); + OF_getprop(o,phandle, temp, buffer, mem_len); + if (mem_len > 128) mem_len=128; + for (i=0; i<=mem_len/4; i++) pointer[i+33]=buffer[i]; + pointer[i+33]=0; + + temp[0]='/'; + temp[1]='\0'; + phandle=OF_finddevice(o,temp); + temp[0]='b'; + temp[1]='a'; + temp[2]='n'; + temp[3]='n'; + temp[4]='e'; + temp[5]='r'; + temp[6]='-'; + temp[7]='n'; + temp[8]='a'; + temp[9]='m'; + temp[10]='e'; + temp[11]='\0'; + mem_len = OF_getproplen(o,phandle, temp); + OF_getprop(o,phandle, temp, buffer, mem_len); + * ((unsigned char *) &pointer[32]) = ((unsigned char *) buffer)[mem_len-2]; +} diff --git a/arch/arm/boot/compressed/piggy.gzip.S b/arch/arm/boot/compressed/piggy.gzip.S new file mode 100644 index 00000000..a68adf91 --- /dev/null +++ b/arch/arm/boot/compressed/piggy.gzip.S @@ -0,0 +1,6 @@ + .section .piggydata,#alloc + .globl input_data +input_data: + .incbin "arch/arm/boot/compressed/piggy.gzip" + .globl input_data_end +input_data_end: diff --git a/arch/arm/boot/compressed/piggy.lzma.S b/arch/arm/boot/compressed/piggy.lzma.S new file mode 100644 index 00000000..d7e69cff --- /dev/null +++ b/arch/arm/boot/compressed/piggy.lzma.S @@ -0,0 +1,6 @@ + .section .piggydata,#alloc + .globl input_data +input_data: + .incbin "arch/arm/boot/compressed/piggy.lzma" + .globl input_data_end +input_data_end: diff --git a/arch/arm/boot/compressed/piggy.lzo.S b/arch/arm/boot/compressed/piggy.lzo.S new file mode 100644 index 00000000..a425ad95 --- /dev/null +++ b/arch/arm/boot/compressed/piggy.lzo.S @@ -0,0 +1,6 @@ + .section .piggydata,#alloc + .globl input_data +input_data: + .incbin "arch/arm/boot/compressed/piggy.lzo" + .globl input_data_end +input_data_end: diff --git a/arch/arm/boot/compressed/vmlinux.lds.in b/arch/arm/boot/compressed/vmlinux.lds.in new file mode 100644 index 00000000..ea80abe7 --- /dev/null +++ b/arch/arm/boot/compressed/vmlinux.lds.in @@ -0,0 +1,68 @@ +/* + * linux/arch/arm/boot/compressed/vmlinux.lds.in + * + * Copyright (C) 2000 Russell King + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +OUTPUT_ARCH(arm) +ENTRY(_start) +SECTIONS +{ + /DISCARD/ : { + *(.ARM.exidx*) + *(.ARM.extab*) + /* + * Discard any r/w data - this produces a link error if we have any, + * which is required for PIC decompression. Local data generates + * GOTOFF relocations, which prevents it being relocated independently + * of the text/got segments. + */ + *(.data) + } + + . = TEXT_START; + _text = .; + + .text : { + _start = .; + *(.start) + *(.text) + *(.text.*) + *(.fixup) + *(.gnu.warning) + *(.rodata) + *(.rodata.*) + *(.glue_7) + *(.glue_7t) + *(.piggydata) + . = ALIGN(4); + } + + _etext = .; + + _got_start = .; + .got : { *(.got) } + _got_end = .; + .got.plt : { *(.got.plt) } + _edata = .; + + . = BSS_START; + __bss_start = .; + .bss : { *(.bss) } + _end = .; + + . = ALIGN(8); /* the stack must be 64-bit aligned */ + .stack : { *(.stack) } + + .stab 0 : { *(.stab) } + .stabstr 0 : { *(.stabstr) } + .stab.excl 0 : { *(.stab.excl) } + .stab.exclstr 0 : { *(.stab.exclstr) } + .stab.index 0 : { *(.stab.index) } + .stab.indexstr 0 : { *(.stab.indexstr) } + .comment 0 : { *(.comment) } +} + diff --git a/arch/arm/boot/install.sh b/arch/arm/boot/install.sh new file mode 100644 index 00000000..06ea7d42 --- /dev/null +++ b/arch/arm/boot/install.sh @@ -0,0 +1,52 @@ +#!/bin/sh +# +# arch/arm/boot/install.sh +# +# This file is subject to the terms and conditions of the GNU General Public +# License. See the file "COPYING" in the main directory of this archive +# for more details. +# +# Copyright (C) 1995 by Linus Torvalds +# +# Adapted from code in arch/i386/boot/Makefile by H. Peter Anvin +# Adapted from code in arch/i386/boot/install.sh by Russell King +# +# "make install" script for arm architecture +# +# Arguments: +# $1 - kernel version +# $2 - kernel image file +# $3 - kernel map file +# $4 - default install path (blank if root directory) +# + +# User may have a custom install script +if [ -x ~/bin/${INSTALLKERNEL} ]; then exec ~/bin/${INSTALLKERNEL} "$@"; fi +if [ -x /sbin/${INSTALLKERNEL} ]; then exec /sbin/${INSTALLKERNEL} "$@"; fi + +if [ "$(basename $2)" = "zImage" ]; then +# Compressed install + echo "Installing compressed kernel" + base=vmlinuz +else +# Normal install + echo "Installing normal kernel" + base=vmlinux +fi + +if [ -f $4/$base-$1 ]; then + mv $4/$base-$1 $4/$base-$1.old +fi +cat $2 > $4/$base-$1 + +# Install system map file +if [ -f $4/System.map-$1 ]; then + mv $4/System.map-$1 $4/System.map-$1.old +fi +cp $3 $4/System.map-$1 + +if [ -x /sbin/loadmap ]; then + /sbin/loadmap +else + echo "You have to install it yourself" +fi -- cgit v1.2.3