From e3d8676f5722b7622685581e06e8f53e6138e3ab Mon Sep 17 00:00:00 2001 From: Felix Fietkau Date: Sat, 15 Jul 2017 23:42:36 +0200 Subject: use -ffunction-sections, -fdata-sections and --gc-sections In combination with kernel symbol export stripping this significantly reduces the kernel image size. Used on both ARM and MIPS architectures. Signed-off-by: Felix Fietkau Signed-off-by: Jonas Gorski Signed-off-by: Gabor Juhos --- Makefile | 10 +++---- arch/arm/Kconfig | 1 + arch/arm/boot/compressed/Makefile | 1 + arch/arm/kernel/vmlinux.lds.S | 26 ++++++++-------- arch/mips/Kconfig | 1 + arch/mips/kernel/vmlinux.lds.S | 4 +-- include/asm-generic/vmlinux.lds.h | 63 ++++++++++++++++++++------------------- 7 files changed, 55 insertions(+), 51 deletions(-) --- a/Makefile +++ b/Makefile @@ -402,6 +402,11 @@ KBUILD_LDFLAGS_MODULE = -T $(srctree)/sc GCC_PLUGINS_CFLAGS := CLANG_FLAGS := +ifdef CONFIG_LD_DEAD_CODE_DATA_ELIMINATION +KBUILD_CFLAGS_KERNEL += $(call cc-option,-ffunction-sections,) +KBUILD_CFLAGS_KERNEL += $(call cc-option,-fdata-sections,) +endif + # Read KERNELRELEASE from include/config/kernel.release (if it exists) KERNELRELEASE = $(shell cat include/config/kernel.release 2> /dev/null) KERNELVERSION = $(VERSION)$(if $(PATCHLEVEL),.$(PATCHLEVEL)$(if $(SUBLEVEL),.$(SUBLEVEL)))$(EXTRAVERSION) @@ -652,11 +657,6 @@ KBUILD_CFLAGS += $(call cc-disable-warni KBUILD_CFLAGS += $(call cc-disable-warning, address-of-packed-member) KBUILD_CFLAGS += $(call cc-disable-warning, attribute-alias) -ifdef CONFIG_LD_DEAD_CODE_DATA_ELIMINATION -KBUILD_CFLAGS += $(call cc-option,-ffunction-sections,) -KBUILD_CFLAGS += $(call cc-option,-fdata-sections,) -endif - ifdef CONFIG_CC_OPTIMIZE_FOR_SIZE KBUILD_CFLAGS += -Os $(call cc-disable-warning,maybe-uninitialized,) $(EXTRA_OPTIMIZATION) else --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -81,6 +81,7 @@ config ARM select HAVE_UID16 select HAVE_VIRT_CPU_ACCOUNTING_GEN select IRQ_FORCED_THREADING + select LD_DEAD_CODE_DATA_ELIMINATION select MODULES_USE_ELF_REL select NO_BOOTMEM select OF_EARLY_FLATTREE if OF --- a/arch/arm/boot/compressed/Makefile +++ b/arch/arm/boot/compressed/Makefile @@ -102,6 +102,7 @@ ifeq ($(CONFIG_FUNCTION_TRACER),y) ORIG_CFLAGS := $(KBUILD_CFLAGS) KBUILD_CFLAGS = $(subst -pg, , $(ORIG_CFLAGS)) endif +KBUILD_CFLAGS_KERNEL := $(patsubst -f%-sections,,$(KBUILD_CFLAGS_KERNEL)) # -fstack-protector-strong triggers protection checks in this code, # but it is being used too early to link to meaningful stack_chk logic. --- a/arch/arm/kernel/vmlinux.lds.S +++ b/arch/arm/kernel/vmlinux.lds.S @@ -17,7 +17,7 @@ #define PROC_INFO \ . = ALIGN(4); \ VMLINUX_SYMBOL(__proc_info_begin) = .; \ - *(.proc.info.init) \ + KEEP(*(.proc.info.init)) \ VMLINUX_SYMBOL(__proc_info_end) = .; #define HYPERVISOR_TEXT \ @@ -28,11 +28,11 @@ #define IDMAP_TEXT \ ALIGN_FUNCTION(); \ VMLINUX_SYMBOL(__idmap_text_start) = .; \ - *(.idmap.text) \ + KEEP(*(.idmap.text)) \ VMLINUX_SYMBOL(__idmap_text_end) = .; \ . = ALIGN(PAGE_SIZE); \ VMLINUX_SYMBOL(__hyp_idmap_text_start) = .; \ - *(.hyp.idmap.text) \ + KEEP(*(.hyp.idmap.text)) \ VMLINUX_SYMBOL(__hyp_idmap_text_end) = .; #ifdef CONFIG_HOTPLUG_CPU @@ -105,7 +105,7 @@ SECTIONS _stext = .; /* Text and read-only data */ IDMAP_TEXT __exception_text_start = .; - *(.exception.text) + KEEP(*(.exception.text)) __exception_text_end = .; IRQENTRY_TEXT SOFTIRQENTRY_TEXT @@ -134,7 +134,7 @@ SECTIONS __ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) { __start___ex_table = .; #ifdef CONFIG_MMU - *(__ex_table) + KEEP(*(__ex_table)) #endif __stop___ex_table = .; } @@ -146,12 +146,12 @@ SECTIONS . = ALIGN(8); .ARM.unwind_idx : { __start_unwind_idx = .; - *(.ARM.exidx*) + KEEP(*(.ARM.exidx*)) __stop_unwind_idx = .; } .ARM.unwind_tab : { __start_unwind_tab = .; - *(.ARM.extab*) + KEEP(*(.ARM.extab*)) __stop_unwind_tab = .; } #endif @@ -171,14 +171,14 @@ SECTIONS */ __vectors_start = .; .vectors 0xffff0000 : AT(__vectors_start) { - *(.vectors) + KEEP(*(.vectors)) } . = __vectors_start + SIZEOF(.vectors); __vectors_end = .; __stubs_start = .; .stubs ADDR(.vectors) + 0x1000 : AT(__stubs_start) { - *(.stubs) + KEEP(*(.stubs)) } . = __stubs_start + SIZEOF(.stubs); __stubs_end = .; @@ -194,24 +194,24 @@ SECTIONS } .init.arch.info : { __arch_info_begin = .; - *(.arch.info.init) + KEEP(*(.arch.info.init)) __arch_info_end = .; } .init.tagtable : { __tagtable_begin = .; - *(.taglist.init) + KEEP(*(.taglist.init)) __tagtable_end = .; } #ifdef CONFIG_SMP_ON_UP .init.smpalt : { __smpalt_begin = .; - *(.alt.smp.init) + KEEP(*(.alt.smp.init)) __smpalt_end = .; } #endif .init.pv_table : { __pv_table_begin = .; - *(.pv_table) + KEEP(*(.pv_table)) __pv_table_end = .; } .init.data : { --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig @@ -56,6 +56,7 @@ config MIPS select CLONE_BACKWARDS select HAVE_DEBUG_STACKOVERFLOW select HAVE_CC_STACKPROTECTOR + select LD_DEAD_CODE_DATA_ELIMINATION select CPU_PM if CPU_IDLE select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST select ARCH_BINFMT_ELF_STATE --- a/arch/mips/kernel/vmlinux.lds.S +++ b/arch/mips/kernel/vmlinux.lds.S @@ -71,7 +71,7 @@ SECTIONS /* Exception table for data bus errors */ __dbe_table : { __start___dbe_table = .; - *(__dbe_table) + KEEP(*(__dbe_table)) __stop___dbe_table = .; } @@ -121,7 +121,7 @@ SECTIONS . = ALIGN(4); .mips.machines.init : AT(ADDR(.mips.machines.init) - LOAD_OFFSET) { __mips_machines_start = .; - *(.mips.machines.init) + KEEP(*(.mips.machines.init)) __mips_machines_end = .; } --- a/include/asm-generic/vmlinux.lds.h +++ b/include/asm-generic/vmlinux.lds.h @@ -105,7 +105,7 @@ #ifdef CONFIG_FTRACE_MCOUNT_RECORD #define MCOUNT_REC() . = ALIGN(8); \ VMLINUX_SYMBOL(__start_mcount_loc) = .; \ - *(__mcount_loc) \ + KEEP(*(__mcount_loc)) \ VMLINUX_SYMBOL(__stop_mcount_loc) = .; #else #define MCOUNT_REC() @@ -113,7 +113,7 @@ #ifdef CONFIG_TRACE_BRANCH_PROFILING #define LIKELY_PROFILE() VMLINUX_SYMBOL(__start_annotated_branch_profile) = .; \ - *(_ftrace_annotated_branch) \ + KEEP(*(_ftrace_annotated_branch)) \ VMLINUX_SYMBOL(__stop_annotated_branch_profile) = .; #else #define LIKELY_PROFILE() @@ -121,7 +121,7 @@ #ifdef CONFIG_PROFILE_ALL_BRANCHES #define BRANCH_PROFILE() VMLINUX_SYMBOL(__start_branch_profile) = .; \ - *(_ftrace_branch) \ + KEEP(*(_ftrace_branch)) \ VMLINUX_SYMBOL(__stop_branch_profile) = .; #else #define BRANCH_PROFILE() @@ -130,7 +130,7 @@ #ifdef CONFIG_KPROBES #define KPROBE_BLACKLIST() . = ALIGN(8); \ VMLINUX_SYMBOL(__start_kprobe_blacklist) = .; \ - *(_kprobe_blacklist) \ + KEEP(*(_kprobe_blacklist)) \ VMLINUX_SYMBOL(__stop_kprobe_blacklist) = .; #else #define KPROBE_BLACKLIST() @@ -139,10 +139,10 @@ #ifdef CONFIG_EVENT_TRACING #define FTRACE_EVENTS() . = ALIGN(8); \ VMLINUX_SYMBOL(__start_ftrace_events) = .; \ - *(_ftrace_events) \ + KEEP(*(_ftrace_events)) \ VMLINUX_SYMBOL(__stop_ftrace_events) = .; \ VMLINUX_SYMBOL(__start_ftrace_enum_maps) = .; \ - *(_ftrace_enum_map) \ + KEEP(*(_ftrace_enum_map)) \ VMLINUX_SYMBOL(__stop_ftrace_enum_maps) = .; #else #define FTRACE_EVENTS() @@ -163,7 +163,7 @@ #ifdef CONFIG_FTRACE_SYSCALLS #define TRACE_SYSCALLS() . = ALIGN(8); \ VMLINUX_SYMBOL(__start_syscalls_metadata) = .; \ - *(__syscalls_metadata) \ + KEEP(*(__syscalls_metadata)) \ VMLINUX_SYMBOL(__stop_syscalls_metadata) = .; #else #define TRACE_SYSCALLS() @@ -172,7 +172,7 @@ #ifdef CONFIG_SERIAL_EARLYCON #define EARLYCON_TABLE() . = ALIGN(8); \ VMLINUX_SYMBOL(__earlycon_table) = .; \ - *(__earlycon_table) \ + KEEP(*(__earlycon_table)) \ VMLINUX_SYMBOL(__earlycon_table_end) = .; #else #define EARLYCON_TABLE() @@ -185,8 +185,8 @@ #define _OF_TABLE_1(name) \ . = ALIGN(8); \ VMLINUX_SYMBOL(__##name##_of_table) = .; \ - *(__##name##_of_table) \ - *(__##name##_of_table_end) + KEEP(*(__##name##_of_table)) \ + KEEP(*(__##name##_of_table_end)) #define CLKSRC_OF_TABLES() OF_TABLE(CONFIG_CLKSRC_OF, clksrc) #define IRQCHIP_OF_MATCH_TABLE() OF_TABLE(CONFIG_IRQCHIP, irqchip) @@ -209,7 +209,7 @@ #define KERNEL_DTB() \ STRUCT_ALIGN(); \ VMLINUX_SYMBOL(__dtb_start) = .; \ - *(.dtb.init.rodata) \ + KEEP(*(.dtb.init.rodata)) \ VMLINUX_SYMBOL(__dtb_end) = .; /* @@ -227,16 +227,17 @@ /* implement dynamic printk debug */ \ . = ALIGN(8); \ VMLINUX_SYMBOL(__start___jump_table) = .; \ - *(__jump_table) \ + KEEP(*(__jump_table)) \ VMLINUX_SYMBOL(__stop___jump_table) = .; \ . = ALIGN(8); \ VMLINUX_SYMBOL(__start___verbose) = .; \ - *(__verbose) \ + KEEP(*(__verbose)) \ VMLINUX_SYMBOL(__stop___verbose) = .; \ LIKELY_PROFILE() \ BRANCH_PROFILE() \ TRACE_PRINTKS() \ - TRACEPOINT_STR() + TRACEPOINT_STR() \ + *(.data.[a-zA-Z_]*) /* * Data section helpers @@ -304,35 +305,35 @@ /* PCI quirks */ \ .pci_fixup : AT(ADDR(.pci_fixup) - LOAD_OFFSET) { \ VMLINUX_SYMBOL(__start_pci_fixups_early) = .; \ - *(.pci_fixup_early) \ + KEEP(*(.pci_fixup_early)) \ VMLINUX_SYMBOL(__end_pci_fixups_early) = .; \ VMLINUX_SYMBOL(__start_pci_fixups_header) = .; \ - *(.pci_fixup_header) \ + KEEP(*(.pci_fixup_header)) \ VMLINUX_SYMBOL(__end_pci_fixups_header) = .; \ VMLINUX_SYMBOL(__start_pci_fixups_final) = .; \ - *(.pci_fixup_final) \ + KEEP(*(.pci_fixup_final)) \ VMLINUX_SYMBOL(__end_pci_fixups_final) = .; \ VMLINUX_SYMBOL(__start_pci_fixups_enable) = .; \ - *(.pci_fixup_enable) \ + KEEP(*(.pci_fixup_enable)) \ VMLINUX_SYMBOL(__end_pci_fixups_enable) = .; \ VMLINUX_SYMBOL(__start_pci_fixups_resume) = .; \ - *(.pci_fixup_resume) \ + KEEP(*(.pci_fixup_resume)) \ VMLINUX_SYMBOL(__end_pci_fixups_resume) = .; \ VMLINUX_SYMBOL(__start_pci_fixups_resume_early) = .; \ - *(.pci_fixup_resume_early) \ + KEEP(*(.pci_fixup_resume_early)) \ VMLINUX_SYMBOL(__end_pci_fixups_resume_early) = .; \ VMLINUX_SYMBOL(__start_pci_fixups_suspend) = .; \ - *(.pci_fixup_suspend) \ + KEEP(*(.pci_fixup_suspend)) \ VMLINUX_SYMBOL(__end_pci_fixups_suspend) = .; \ VMLINUX_SYMBOL(__start_pci_fixups_suspend_late) = .; \ - *(.pci_fixup_suspend_late) \ + KEEP(*(.pci_fixup_suspend_late)) \ VMLINUX_SYMBOL(__end_pci_fixups_suspend_late) = .; \ } \ \ /* Built-in firmware blobs */ \ .builtin_fw : AT(ADDR(.builtin_fw) - LOAD_OFFSET) { \ VMLINUX_SYMBOL(__start_builtin_fw) = .; \ - *(.builtin_fw) \ + KEEP(*(.builtin_fw)) \ VMLINUX_SYMBOL(__end_builtin_fw) = .; \ } \ \ @@ -410,7 +411,7 @@ \ /* Kernel symbol table: strings */ \ __ksymtab_strings : AT(ADDR(__ksymtab_strings) - LOAD_OFFSET) { \ - KEEP(*(__ksymtab_strings)) \ + *(__ksymtab_strings) \ } \ \ /* __*init sections */ \ @@ -423,14 +424,14 @@ /* Built-in module parameters. */ \ __param : AT(ADDR(__param) - LOAD_OFFSET) { \ VMLINUX_SYMBOL(__start___param) = .; \ - *(__param) \ + KEEP(*(__param)) \ VMLINUX_SYMBOL(__stop___param) = .; \ } \ \ /* Built-in module versions. */ \ __modver : AT(ADDR(__modver) - LOAD_OFFSET) { \ VMLINUX_SYMBOL(__start___modver) = .; \ - *(__modver) \ + KEEP(*(__modver)) \ VMLINUX_SYMBOL(__stop___modver) = .; \ . = ALIGN((align)); \ VMLINUX_SYMBOL(__end_rodata) = .; \ @@ -496,7 +497,7 @@ #define ENTRY_TEXT \ ALIGN_FUNCTION(); \ VMLINUX_SYMBOL(__entry_text_start) = .; \ - *(.entry.text) \ + KEEP(*(.entry.text)) \ VMLINUX_SYMBOL(__entry_text_end) = .; #if defined(CONFIG_FUNCTION_GRAPH_TRACER) || defined(CONFIG_KASAN) @@ -534,7 +535,7 @@ . = ALIGN(align); \ __ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) { \ VMLINUX_SYMBOL(__start___ex_table) = .; \ - *(__ex_table) \ + KEEP(*(__ex_table)) \ VMLINUX_SYMBOL(__stop___ex_table) = .; \ } @@ -550,9 +551,9 @@ #ifdef CONFIG_CONSTRUCTORS #define KERNEL_CTORS() . = ALIGN(8); \ VMLINUX_SYMBOL(__ctors_start) = .; \ - *(.ctors) \ + KEEP(*(.ctors)) \ *(SORT(.init_array.*)) \ - *(.init_array) \ + KEEP(*(.init_array)) \ VMLINUX_SYMBOL(__ctors_end) = .; #else #define KERNEL_CTORS() @@ -609,7 +610,7 @@ #define SBSS(sbss_align) \ . = ALIGN(sbss_align); \ .sbss : AT(ADDR(.sbss) - LOAD_OFFSET) { \ - *(.sbss) \ + *(.sbss .sbss.*) \ *(.scommon) \ } @@ -676,7 +677,7 @@ . = ALIGN(8); \ __bug_table : AT(ADDR(__bug_table) - LOAD_OFFSET) { \ VMLINUX_SYMBOL(__start___bug_table) = .; \ - *(__bug_table) \ + KEEP(*(__bug_table)) \ VMLINUX_SYMBOL(__stop___bug_table) = .; \ } #else @@ -688,7 +689,7 @@ . = ALIGN(4); \ .tracedata : AT(ADDR(.tracedata) - LOAD_OFFSET) { \ VMLINUX_SYMBOL(__tracedata_start) = .; \ - *(.tracedata) \ + KEEP(*(.tracedata)) \ VMLINUX_SYMBOL(__tracedata_end) = .; \ } #else @@ -705,7 +706,7 @@ #define INIT_SETUP(initsetup_align) \ . = ALIGN(initsetup_align); \ VMLINUX_SYMBOL(__setup_start) = .; \ - *(.init.setup) \ + KEEP(*(.init.setup)) \ VMLINUX_SYMBOL(__setup_end) = .; #define INIT_CALLS_LEVEL(level) \