diff options
Diffstat (limited to 'target/linux/generic/patches-3.18/220-gc_sections.patch')
-rw-r--r-- | target/linux/generic/patches-3.18/220-gc_sections.patch | 531 |
1 files changed, 531 insertions, 0 deletions
diff --git a/target/linux/generic/patches-3.18/220-gc_sections.patch b/target/linux/generic/patches-3.18/220-gc_sections.patch new file mode 100644 index 0000000000..c105c40b80 --- /dev/null +++ b/target/linux/generic/patches-3.18/220-gc_sections.patch @@ -0,0 +1,531 @@ +From: Felix Fietkau <nbd@openwrt.org> + +use -ffunction-sections, -fdata-sections and --gc-sections --sort-section=name + +In combination with kernel symbol export stripping this significantly reduces +the kernel image size. Used on both ARM and MIPS architectures. + +Signed-off-by: Felix Fietkau <nbd@openwrt.org> +Signed-off-by: Jonas Gorski <jogo@openwrt.org> +Signed-off-by: Gabor Juhos <juhosg@openwrt.org> +--- + +--- a/arch/mips/Makefile ++++ b/arch/mips/Makefile +@@ -89,10 +89,14 @@ all-$(CONFIG_SYS_SUPPORTS_ZBOOT)+= vmlin + # + cflags-y += -G 0 -mno-abicalls -fno-pic -pipe + cflags-y += -msoft-float +-LDFLAGS_vmlinux += -G 0 -static -n -nostdlib ++LDFLAGS_vmlinux += -G 0 -static -n -nostdlib --gc-sections --sort-section=name + KBUILD_AFLAGS_MODULE += -mlong-calls + KBUILD_CFLAGS_MODULE += -mlong-calls + ++ifndef CONFIG_FUNCTION_TRACER ++KBUILD_CFLAGS_KERNEL += -ffunction-sections -fdata-sections ++endif ++ + cflags-y += -ffreestanding + + # +--- a/arch/mips/kernel/vmlinux.lds.S ++++ b/arch/mips/kernel/vmlinux.lds.S +@@ -67,7 +67,7 @@ SECTIONS + /* Exception table for data bus errors */ + __dbe_table : { + __start___dbe_table = .; +- *(__dbe_table) ++ KEEP(*(__dbe_table)) + __stop___dbe_table = .; + } + +@@ -112,7 +112,7 @@ SECTIONS + . = ALIGN(4); + .mips.machines.init : AT(ADDR(.mips.machines.init) - LOAD_OFFSET) { + __mips_machines_start = .; +- *(.mips.machines.init) ++ KEEP(*(.mips.machines.init)) + __mips_machines_end = .; + } + +--- a/include/asm-generic/vmlinux.lds.h ++++ b/include/asm-generic/vmlinux.lds.h +@@ -87,7 +87,7 @@ + #ifdef CONFIG_FTRACE_MCOUNT_RECORD + #define MCOUNT_REC() . = ALIGN(8); \ + VMLINUX_SYMBOL(__start_mcount_loc) = .; \ +- *(__mcount_loc) \ ++ KEEP(*(__mcount_loc)) \ + VMLINUX_SYMBOL(__stop_mcount_loc) = .; + #else + #define MCOUNT_REC() +@@ -95,7 +95,7 @@ + + #ifdef CONFIG_TRACE_BRANCH_PROFILING + #define LIKELY_PROFILE() VMLINUX_SYMBOL(__start_annotated_branch_profile) = .; \ +- *(_ftrace_annotated_branch) \ ++ KEEP(*(_ftrace_annotated_branch)) \ + VMLINUX_SYMBOL(__stop_annotated_branch_profile) = .; + #else + #define LIKELY_PROFILE() +@@ -103,7 +103,7 @@ + + #ifdef CONFIG_PROFILE_ALL_BRANCHES + #define BRANCH_PROFILE() VMLINUX_SYMBOL(__start_branch_profile) = .; \ +- *(_ftrace_branch) \ ++ KEEP(*(_ftrace_branch)) \ + VMLINUX_SYMBOL(__stop_branch_profile) = .; + #else + #define BRANCH_PROFILE() +@@ -112,7 +112,7 @@ + #ifdef CONFIG_EVENT_TRACING + #define FTRACE_EVENTS() . = ALIGN(8); \ + VMLINUX_SYMBOL(__start_ftrace_events) = .; \ +- *(_ftrace_events) \ ++ KEEP(*(_ftrace_events)) \ + VMLINUX_SYMBOL(__stop_ftrace_events) = .; + #else + #define FTRACE_EVENTS() +@@ -120,7 +120,7 @@ + + #ifdef CONFIG_TRACING + #define TRACE_PRINTKS() VMLINUX_SYMBOL(__start___trace_bprintk_fmt) = .; \ +- *(__trace_printk_fmt) /* Trace_printk fmt' pointer */ \ ++ KEEP(*(__trace_printk_fmt)) /* Trace_printk fmt' pointer */ \ + VMLINUX_SYMBOL(__stop___trace_bprintk_fmt) = .; + #define TRACEPOINT_STR() VMLINUX_SYMBOL(__start___tracepoint_str) = .; \ + *(__tracepoint_str) /* Trace_printk fmt' pointer */ \ +@@ -133,7 +133,7 @@ + #ifdef CONFIG_FTRACE_SYSCALLS + #define TRACE_SYSCALLS() . = ALIGN(8); \ + VMLINUX_SYMBOL(__start_syscalls_metadata) = .; \ +- *(__syscalls_metadata) \ ++ KEEP(*(__syscalls_metadata)) \ + VMLINUX_SYMBOL(__stop_syscalls_metadata) = .; + #else + #define TRACE_SYSCALLS() +@@ -142,8 +142,8 @@ + #ifdef CONFIG_CLKSRC_OF + #define CLKSRC_OF_TABLES() . = ALIGN(8); \ + VMLINUX_SYMBOL(__clksrc_of_table) = .; \ +- *(__clksrc_of_table) \ +- *(__clksrc_of_table_end) ++ KEEP(*(__clksrc_of_table)) \ ++ KEEP(*(__clksrc_of_table_end)) + #else + #define CLKSRC_OF_TABLES() + #endif +@@ -152,8 +152,8 @@ + #define IRQCHIP_OF_MATCH_TABLE() \ + . = ALIGN(8); \ + VMLINUX_SYMBOL(__irqchip_begin) = .; \ +- *(__irqchip_of_table) \ +- *(__irqchip_of_end) ++ KEEP(*(__irqchip_of_table)) \ ++ KEEP(*(__irqchip_of_end)) + #else + #define IRQCHIP_OF_MATCH_TABLE() + #endif +@@ -161,8 +161,8 @@ + #ifdef CONFIG_COMMON_CLK + #define CLK_OF_TABLES() . = ALIGN(8); \ + VMLINUX_SYMBOL(__clk_of_table) = .; \ +- *(__clk_of_table) \ +- *(__clk_of_table_end) ++ KEEP(*(__clk_of_table)) \ ++ KEEP(*(__clk_of_table_end)) + #else + #define CLK_OF_TABLES() + #endif +@@ -170,7 +170,7 @@ + #define KERNEL_DTB() \ + STRUCT_ALIGN(); \ + VMLINUX_SYMBOL(__dtb_start) = .; \ +- *(.dtb.init.rodata) \ ++ KEEP(*(.dtb.init.rodata)) \ + VMLINUX_SYMBOL(__dtb_end) = .; + + /* .data section */ +@@ -186,16 +186,17 @@ + /* implement dynamic printk debug */ \ + . = ALIGN(8); \ + VMLINUX_SYMBOL(__start___jump_table) = .; \ +- *(__jump_table) \ ++ KEEP(*(__jump_table)) \ + VMLINUX_SYMBOL(__stop___jump_table) = .; \ + . = ALIGN(8); \ + VMLINUX_SYMBOL(__start___verbose) = .; \ +- *(__verbose) \ ++ KEEP(*(__verbose)) \ + VMLINUX_SYMBOL(__stop___verbose) = .; \ + LIKELY_PROFILE() \ + BRANCH_PROFILE() \ + TRACE_PRINTKS() \ +- TRACEPOINT_STR() ++ TRACEPOINT_STR() \ ++ *(.data.[a-zA-Z_]*) + + /* + * Data section helpers +@@ -249,32 +250,32 @@ + /* PCI quirks */ \ + .pci_fixup : AT(ADDR(.pci_fixup) - LOAD_OFFSET) { \ + VMLINUX_SYMBOL(__start_pci_fixups_early) = .; \ +- *(.pci_fixup_early) \ ++ KEEP(*(.pci_fixup_early)) \ + VMLINUX_SYMBOL(__end_pci_fixups_early) = .; \ + VMLINUX_SYMBOL(__start_pci_fixups_header) = .; \ +- *(.pci_fixup_header) \ ++ KEEP(*(.pci_fixup_header)) \ + VMLINUX_SYMBOL(__end_pci_fixups_header) = .; \ + VMLINUX_SYMBOL(__start_pci_fixups_final) = .; \ +- *(.pci_fixup_final) \ ++ KEEP(*(.pci_fixup_final)) \ + VMLINUX_SYMBOL(__end_pci_fixups_final) = .; \ + VMLINUX_SYMBOL(__start_pci_fixups_enable) = .; \ +- *(.pci_fixup_enable) \ ++ KEEP(*(.pci_fixup_enable)) \ + VMLINUX_SYMBOL(__end_pci_fixups_enable) = .; \ + VMLINUX_SYMBOL(__start_pci_fixups_resume) = .; \ +- *(.pci_fixup_resume) \ ++ KEEP(*(.pci_fixup_resume)) \ + VMLINUX_SYMBOL(__end_pci_fixups_resume) = .; \ + VMLINUX_SYMBOL(__start_pci_fixups_resume_early) = .; \ +- *(.pci_fixup_resume_early) \ ++ KEEP(*(.pci_fixup_resume_early)) \ + VMLINUX_SYMBOL(__end_pci_fixups_resume_early) = .; \ + VMLINUX_SYMBOL(__start_pci_fixups_suspend) = .; \ +- *(.pci_fixup_suspend) \ ++ KEEP(*(.pci_fixup_suspend)) \ + VMLINUX_SYMBOL(__end_pci_fixups_suspend) = .; \ + } \ + \ + /* Built-in firmware blobs */ \ + .builtin_fw : AT(ADDR(.builtin_fw) - LOAD_OFFSET) { \ + VMLINUX_SYMBOL(__start_builtin_fw) = .; \ +- *(.builtin_fw) \ ++ KEEP(*(.builtin_fw)) \ + VMLINUX_SYMBOL(__end_builtin_fw) = .; \ + } \ + \ +@@ -283,49 +284,49 @@ + /* Kernel symbol table: Normal symbols */ \ + __ksymtab : AT(ADDR(__ksymtab) - LOAD_OFFSET) { \ + VMLINUX_SYMBOL(__start___ksymtab) = .; \ +- *(SORT(___ksymtab+*)) \ ++ KEEP(*(SORT(___ksymtab+*))) \ + VMLINUX_SYMBOL(__stop___ksymtab) = .; \ + } \ + \ + /* Kernel symbol table: GPL-only symbols */ \ + __ksymtab_gpl : AT(ADDR(__ksymtab_gpl) - LOAD_OFFSET) { \ + VMLINUX_SYMBOL(__start___ksymtab_gpl) = .; \ +- *(SORT(___ksymtab_gpl+*)) \ ++ KEEP(*(SORT(___ksymtab_gpl+*))) \ + VMLINUX_SYMBOL(__stop___ksymtab_gpl) = .; \ + } \ + \ + /* Kernel symbol table: Normal unused symbols */ \ + __ksymtab_unused : AT(ADDR(__ksymtab_unused) - LOAD_OFFSET) { \ + VMLINUX_SYMBOL(__start___ksymtab_unused) = .; \ +- *(SORT(___ksymtab_unused+*)) \ ++ KEEP(*(SORT(___ksymtab_unused+*))) \ + VMLINUX_SYMBOL(__stop___ksymtab_unused) = .; \ + } \ + \ + /* Kernel symbol table: GPL-only unused symbols */ \ + __ksymtab_unused_gpl : AT(ADDR(__ksymtab_unused_gpl) - LOAD_OFFSET) { \ + VMLINUX_SYMBOL(__start___ksymtab_unused_gpl) = .; \ +- *(SORT(___ksymtab_unused_gpl+*)) \ ++ KEEP(*(SORT(___ksymtab_unused_gpl+*))) \ + VMLINUX_SYMBOL(__stop___ksymtab_unused_gpl) = .; \ + } \ + \ + /* Kernel symbol table: GPL-future-only symbols */ \ + __ksymtab_gpl_future : AT(ADDR(__ksymtab_gpl_future) - LOAD_OFFSET) { \ + VMLINUX_SYMBOL(__start___ksymtab_gpl_future) = .; \ +- *(SORT(___ksymtab_gpl_future+*)) \ ++ KEEP(*(SORT(___ksymtab_gpl_future+*))) \ + VMLINUX_SYMBOL(__stop___ksymtab_gpl_future) = .; \ + } \ + \ + /* Kernel symbol table: Normal symbols */ \ + __kcrctab : AT(ADDR(__kcrctab) - LOAD_OFFSET) { \ + VMLINUX_SYMBOL(__start___kcrctab) = .; \ +- *(SORT(___kcrctab+*)) \ ++ KEEP(*(SORT(___kcrctab+*))) \ + VMLINUX_SYMBOL(__stop___kcrctab) = .; \ + } \ + \ + /* Kernel symbol table: GPL-only symbols */ \ + __kcrctab_gpl : AT(ADDR(__kcrctab_gpl) - LOAD_OFFSET) { \ + VMLINUX_SYMBOL(__start___kcrctab_gpl) = .; \ +- *(SORT(___kcrctab_gpl+*)) \ ++ KEEP(*(SORT(___kcrctab_gpl+*))) \ + VMLINUX_SYMBOL(__stop___kcrctab_gpl) = .; \ + } \ + \ +@@ -339,14 +340,14 @@ + /* Kernel symbol table: GPL-only unused symbols */ \ + __kcrctab_unused_gpl : AT(ADDR(__kcrctab_unused_gpl) - LOAD_OFFSET) { \ + VMLINUX_SYMBOL(__start___kcrctab_unused_gpl) = .; \ +- *(SORT(___kcrctab_unused_gpl+*)) \ ++ KEEP(*(SORT(___kcrctab_unused_gpl+*))) \ + VMLINUX_SYMBOL(__stop___kcrctab_unused_gpl) = .; \ + } \ + \ + /* Kernel symbol table: GPL-future-only symbols */ \ + __kcrctab_gpl_future : AT(ADDR(__kcrctab_gpl_future) - LOAD_OFFSET) { \ + VMLINUX_SYMBOL(__start___kcrctab_gpl_future) = .; \ +- *(SORT(___kcrctab_gpl_future+*)) \ ++ KEEP(*(SORT(___kcrctab_gpl_future+*))) \ + VMLINUX_SYMBOL(__stop___kcrctab_gpl_future) = .; \ + } \ + \ +@@ -365,14 +366,14 @@ + /* Built-in module parameters. */ \ + __param : AT(ADDR(__param) - LOAD_OFFSET) { \ + VMLINUX_SYMBOL(__start___param) = .; \ +- *(__param) \ ++ KEEP(*(__param)) \ + VMLINUX_SYMBOL(__stop___param) = .; \ + } \ + \ + /* Built-in module versions. */ \ + __modver : AT(ADDR(__modver) - LOAD_OFFSET) { \ + VMLINUX_SYMBOL(__start___modver) = .; \ +- *(__modver) \ ++ KEEP(*(__modver)) \ + VMLINUX_SYMBOL(__stop___modver) = .; \ + . = ALIGN((align)); \ + VMLINUX_SYMBOL(__end_rodata) = .; \ +@@ -428,7 +429,7 @@ + #define ENTRY_TEXT \ + ALIGN_FUNCTION(); \ + VMLINUX_SYMBOL(__entry_text_start) = .; \ +- *(.entry.text) \ ++ KEEP(*(.entry.text)) \ + VMLINUX_SYMBOL(__entry_text_end) = .; + + #ifdef CONFIG_FUNCTION_GRAPH_TRACER +@@ -456,7 +457,7 @@ + . = ALIGN(align); \ + __ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) { \ + VMLINUX_SYMBOL(__start___ex_table) = .; \ +- *(__ex_table) \ ++ KEEP(*(__ex_table)) \ + VMLINUX_SYMBOL(__stop___ex_table) = .; \ + } + +@@ -472,8 +473,8 @@ + #ifdef CONFIG_CONSTRUCTORS + #define KERNEL_CTORS() . = ALIGN(8); \ + VMLINUX_SYMBOL(__ctors_start) = .; \ +- *(.ctors) \ +- *(.init_array) \ ++ KEEP(*(.ctors)) \ ++ KEEP(*(.init_array)) \ + VMLINUX_SYMBOL(__ctors_end) = .; + #else + #define KERNEL_CTORS() +@@ -517,7 +518,7 @@ + #define SBSS(sbss_align) \ + . = ALIGN(sbss_align); \ + .sbss : AT(ADDR(.sbss) - LOAD_OFFSET) { \ +- *(.sbss) \ ++ *(.sbss .sbss.*) \ + *(.scommon) \ + } + +@@ -535,7 +536,7 @@ + BSS_FIRST_SECTIONS \ + *(.bss..page_aligned) \ + *(.dynbss) \ +- *(.bss) \ ++ *(.bss .bss.*) \ + *(COMMON) \ + } + +@@ -596,7 +597,7 @@ + . = ALIGN(4); \ + .tracedata : AT(ADDR(.tracedata) - LOAD_OFFSET) { \ + VMLINUX_SYMBOL(__tracedata_start) = .; \ +- *(.tracedata) \ ++ KEEP(*(.tracedata)) \ + VMLINUX_SYMBOL(__tracedata_end) = .; \ + } + #else +@@ -613,17 +614,17 @@ + #define INIT_SETUP(initsetup_align) \ + . = ALIGN(initsetup_align); \ + VMLINUX_SYMBOL(__setup_start) = .; \ +- *(.init.setup) \ ++ KEEP(*(.init.setup)) \ + VMLINUX_SYMBOL(__setup_end) = .; + + #define INIT_CALLS_LEVEL(level) \ + VMLINUX_SYMBOL(__initcall##level##_start) = .; \ +- *(.initcall##level##.init) \ +- *(.initcall##level##s.init) \ ++ KEEP(*(.initcall##level##.init)) \ ++ KEEP(*(.initcall##level##s.init)) \ + + #define INIT_CALLS \ + VMLINUX_SYMBOL(__initcall_start) = .; \ +- *(.initcallearly.init) \ ++ KEEP(*(.initcallearly.init)) \ + INIT_CALLS_LEVEL(0) \ + INIT_CALLS_LEVEL(1) \ + INIT_CALLS_LEVEL(2) \ +@@ -637,21 +638,21 @@ + + #define CON_INITCALL \ + VMLINUX_SYMBOL(__con_initcall_start) = .; \ +- *(.con_initcall.init) \ ++ KEEP(*(.con_initcall.init)) \ + VMLINUX_SYMBOL(__con_initcall_end) = .; + + #define SECURITY_INITCALL \ + VMLINUX_SYMBOL(__security_initcall_start) = .; \ +- *(.security_initcall.init) \ ++ KEEP(*(.security_initcall.init)) \ + VMLINUX_SYMBOL(__security_initcall_end) = .; + + #ifdef CONFIG_BLK_DEV_INITRD + #define INIT_RAM_FS \ + . = ALIGN(4); \ + VMLINUX_SYMBOL(__initramfs_start) = .; \ +- *(.init.ramfs) \ ++ KEEP(*(.init.ramfs)) \ + . = ALIGN(8); \ +- *(.init.ramfs.info) ++ KEEP(*(.init.ramfs.info)) + #else + #define INIT_RAM_FS + #endif +--- a/arch/arm/Makefile ++++ b/arch/arm/Makefile +@@ -18,11 +18,16 @@ ifeq ($(CONFIG_CPU_ENDIAN_BE8),y) + LDFLAGS_vmlinux += --be8 + LDFLAGS_MODULE += --be8 + endif ++LDFLAGS_vmlinux += --gc-sections --sort-section=name + + OBJCOPYFLAGS :=-O binary -R .comment -S + GZFLAGS :=-9 + #KBUILD_CFLAGS +=-pipe + ++ifndef CONFIG_FUNCTION_TRACER ++KBUILD_CFLAGS_KERNEL += -ffunction-sections -fdata-sections ++endif ++ + # Never generate .eh_frame + KBUILD_CFLAGS += $(call cc-option,-fno-dwarf2-cfi-asm) + +--- a/arch/arm/kernel/vmlinux.lds.S ++++ b/arch/arm/kernel/vmlinux.lds.S +@@ -12,13 +12,13 @@ + #define PROC_INFO \ + . = ALIGN(4); \ + VMLINUX_SYMBOL(__proc_info_begin) = .; \ +- *(.proc.info.init) \ ++ KEEP(*(.proc.info.init)) \ + VMLINUX_SYMBOL(__proc_info_end) = .; + + #define IDMAP_TEXT \ + ALIGN_FUNCTION(); \ + VMLINUX_SYMBOL(__idmap_text_start) = .; \ +- *(.idmap.text) \ ++ KEEP(*(.idmap.text)) \ + VMLINUX_SYMBOL(__idmap_text_end) = .; \ + . = ALIGN(32); \ + VMLINUX_SYMBOL(__hyp_idmap_text_start) = .; \ +@@ -93,7 +93,7 @@ SECTIONS + .text : { /* Real text segment */ + _stext = .; /* Text and read-only data */ + __exception_text_start = .; +- *(.exception.text) ++ KEEP(*(.exception.text)) + __exception_text_end = .; + IRQENTRY_TEXT + TEXT_TEXT +@@ -118,7 +118,7 @@ SECTIONS + __ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) { + __start___ex_table = .; + #ifdef CONFIG_MMU +- *(__ex_table) ++ KEEP(*(__ex_table)) + #endif + __stop___ex_table = .; + } +@@ -130,12 +130,12 @@ SECTIONS + . = ALIGN(8); + .ARM.unwind_idx : { + __start_unwind_idx = .; +- *(.ARM.exidx*) ++ KEEP(*(.ARM.exidx*)) + __stop_unwind_idx = .; + } + .ARM.unwind_tab : { + __start_unwind_tab = .; +- *(.ARM.extab*) ++ KEEP(*(.ARM.extab*)) + __stop_unwind_tab = .; + } + #endif +@@ -154,14 +154,14 @@ SECTIONS + */ + __vectors_start = .; + .vectors 0 : AT(__vectors_start) { +- *(.vectors) ++ KEEP(*(.vectors)) + } + . = __vectors_start + SIZEOF(.vectors); + __vectors_end = .; + + __stubs_start = .; + .stubs 0x1000 : AT(__stubs_start) { +- *(.stubs) ++ KEEP(*(.stubs)) + } + . = __stubs_start + SIZEOF(.stubs); + __stubs_end = .; +@@ -175,24 +175,24 @@ SECTIONS + } + .init.arch.info : { + __arch_info_begin = .; +- *(.arch.info.init) ++ KEEP(*(.arch.info.init)) + __arch_info_end = .; + } + .init.tagtable : { + __tagtable_begin = .; +- *(.taglist.init) ++ KEEP(*(.taglist.init)) + __tagtable_end = .; + } + #ifdef CONFIG_SMP_ON_UP + .init.smpalt : { + __smpalt_begin = .; +- *(.alt.smp.init) ++ KEEP(*(.alt.smp.init)) + __smpalt_end = .; + } + #endif + .init.pv_table : { + __pv_table_begin = .; +- *(.pv_table) ++ KEEP(*(.pv_table)) + __pv_table_end = .; + } + .init.data : { +--- a/arch/arm/boot/compressed/Makefile ++++ b/arch/arm/boot/compressed/Makefile +@@ -120,6 +120,7 @@ ifeq ($(CONFIG_FUNCTION_TRACER),y) + ORIG_CFLAGS := $(KBUILD_CFLAGS) + KBUILD_CFLAGS = $(subst -pg, , $(ORIG_CFLAGS)) + endif ++KBUILD_CFLAGS_KERNEL := $(patsubst -f%-sections,,$(KBUILD_CFLAGS_KERNEL)) + + ccflags-y := -fpic -mno-single-pic-base -fno-builtin -I$(obj) + asflags-y := -DZIMAGE |