aboutsummaryrefslogtreecommitdiffstats
path: root/target/linux/generic/pending-4.19/220-optimize_inlining.patch
diff options
context:
space:
mode:
authorAdrian Schmutzler <freifunk@adrianschmutzler.de>2020-10-09 21:53:35 +0200
committerAdrian Schmutzler <freifunk@adrianschmutzler.de>2020-10-30 19:29:59 +0100
commit278512665094888d3c007fdd74e090496d6c811d (patch)
tree6d4f2cdddef316e07829b89c1c1a790d0db92fc3 /target/linux/generic/pending-4.19/220-optimize_inlining.patch
parent3824fa26d256d162fc0e02e46714eda7816cae4a (diff)
downloadupstream-278512665094888d3c007fdd74e090496d6c811d.tar.gz
upstream-278512665094888d3c007fdd74e090496d6c811d.tar.bz2
upstream-278512665094888d3c007fdd74e090496d6c811d.zip
kernel: remove support for kernel 4.19
We use 5.4 on all targets by default, and 4.19 has never been released in a stable version. There is no reason to keep it. Signed-off-by: Adrian Schmutzler <freifunk@adrianschmutzler.de>
Diffstat (limited to 'target/linux/generic/pending-4.19/220-optimize_inlining.patch')
-rw-r--r--target/linux/generic/pending-4.19/220-optimize_inlining.patch225
1 files changed, 0 insertions, 225 deletions
diff --git a/target/linux/generic/pending-4.19/220-optimize_inlining.patch b/target/linux/generic/pending-4.19/220-optimize_inlining.patch
deleted file mode 100644
index ae032709d2..0000000000
--- a/target/linux/generic/pending-4.19/220-optimize_inlining.patch
+++ /dev/null
@@ -1,225 +0,0 @@
---- a/arch/arm/kernel/atags.h
-+++ b/arch/arm/kernel/atags.h
-@@ -5,7 +5,7 @@ void convert_to_tag_list(struct tag *tag
- const struct machine_desc *setup_machine_tags(phys_addr_t __atags_pointer,
- unsigned int machine_nr);
- #else
--static inline const struct machine_desc *
-+static inline const struct machine_desc * __init __noreturn
- setup_machine_tags(phys_addr_t __atags_pointer, unsigned int machine_nr)
- {
- early_print("no ATAGS support: can't continue\n");
---- a/arch/arm64/include/asm/cpufeature.h
-+++ b/arch/arm64/include/asm/cpufeature.h
-@@ -366,7 +366,7 @@ static inline bool cpu_have_feature(unsi
- }
-
- /* System capability check for constant caps */
--static inline bool __cpus_have_const_cap(int num)
-+static __always_inline bool __cpus_have_const_cap(int num)
- {
- if (num >= ARM64_NCAPS)
- return false;
-@@ -380,7 +380,7 @@ static inline bool cpus_have_cap(unsigne
- return test_bit(num, cpu_hwcaps);
- }
-
--static inline bool cpus_have_const_cap(int num)
-+static __always_inline bool cpus_have_const_cap(int num)
- {
- if (static_branch_likely(&arm64_const_caps_ready))
- return __cpus_have_const_cap(num);
---- a/arch/mips/include/asm/bitops.h
-+++ b/arch/mips/include/asm/bitops.h
-@@ -463,7 +463,7 @@ static inline void __clear_bit_unlock(un
- * Return the bit position (0..63) of the most significant 1 bit in a word
- * Returns -1 if no 1 bit exists
- */
--static inline unsigned long __fls(unsigned long word)
-+static __always_inline unsigned long __fls(unsigned long word)
- {
- int num;
-
-@@ -529,7 +529,7 @@ static inline unsigned long __fls(unsign
- * Returns 0..SZLONG-1
- * Undefined if no bit exists, so code should check against 0 first.
- */
--static inline unsigned long __ffs(unsigned long word)
-+static __always_inline unsigned long __ffs(unsigned long word)
- {
- return __fls(word & -word);
- }
---- a/arch/mips/kernel/cpu-bugs64.c
-+++ b/arch/mips/kernel/cpu-bugs64.c
-@@ -42,8 +42,8 @@ static inline void align_mod(const int a
- : GCC_IMM_ASM() (align), GCC_IMM_ASM() (mod));
- }
-
--static inline void mult_sh_align_mod(long *v1, long *v2, long *w,
-- const int align, const int mod)
-+static __always_inline void mult_sh_align_mod(long *v1, long *v2, long *w,
-+ const int align, const int mod)
- {
- unsigned long flags;
- int m1, m2;
---- a/arch/powerpc/kernel/prom_init.c
-+++ b/arch/powerpc/kernel/prom_init.c
-@@ -498,14 +498,14 @@ static int __init prom_next_node(phandle
- }
- }
-
--static inline int prom_getprop(phandle node, const char *pname,
-- void *value, size_t valuelen)
-+static inline int __init prom_getprop(phandle node, const char *pname,
-+ void *value, size_t valuelen)
- {
- return call_prom("getprop", 4, 1, node, ADDR(pname),
- (u32)(unsigned long) value, (u32) valuelen);
- }
-
--static inline int prom_getproplen(phandle node, const char *pname)
-+static inline int __init prom_getproplen(phandle node, const char *pname)
- {
- return call_prom("getproplen", 2, 1, node, ADDR(pname));
- }
---- a/arch/powerpc/mm/tlb-radix.c
-+++ b/arch/powerpc/mm/tlb-radix.c
-@@ -90,8 +90,8 @@ void radix__tlbiel_all(unsigned int acti
- asm volatile(PPC_INVALIDATE_ERAT "; isync" : : :"memory");
- }
-
--static inline void __tlbiel_pid(unsigned long pid, int set,
-- unsigned long ric)
-+static __always_inline void __tlbiel_pid(unsigned long pid, int set,
-+ unsigned long ric)
- {
- unsigned long rb,rs,prs,r;
-
-@@ -106,7 +106,7 @@ static inline void __tlbiel_pid(unsigned
- trace_tlbie(0, 1, rb, rs, ric, prs, r);
- }
-
--static inline void __tlbie_pid(unsigned long pid, unsigned long ric)
-+static __always_inline void __tlbie_pid(unsigned long pid, unsigned long ric)
- {
- unsigned long rb,rs,prs,r;
-
-@@ -136,7 +136,7 @@ static inline void __tlbiel_lpid(unsigne
- trace_tlbie(lpid, 1, rb, rs, ric, prs, r);
- }
-
--static inline void __tlbie_lpid(unsigned long lpid, unsigned long ric)
-+static __always_inline void __tlbie_lpid(unsigned long lpid, unsigned long ric)
- {
- unsigned long rb,rs,prs,r;
-
-@@ -300,7 +300,7 @@ static inline void fixup_tlbie_lpid(unsi
- /*
- * We use 128 set in radix mode and 256 set in hpt mode.
- */
--static inline void _tlbiel_pid(unsigned long pid, unsigned long ric)
-+static __always_inline void _tlbiel_pid(unsigned long pid, unsigned long ric)
- {
- int set;
-
-@@ -983,7 +983,7 @@ void radix__tlb_flush(struct mmu_gather
- tlb->need_flush_all = 0;
- }
-
--static inline void __radix__flush_tlb_range_psize(struct mm_struct *mm,
-+static __always_inline void __radix__flush_tlb_range_psize(struct mm_struct *mm,
- unsigned long start, unsigned long end,
- int psize, bool also_pwc)
- {
---- a/arch/s390/include/asm/cpacf.h
-+++ b/arch/s390/include/asm/cpacf.h
-@@ -202,7 +202,7 @@ static inline int __cpacf_check_opcode(u
- }
- }
-
--static inline int cpacf_query(unsigned int opcode, cpacf_mask_t *mask)
-+static __always_inline int cpacf_query(unsigned int opcode, cpacf_mask_t *mask)
- {
- if (__cpacf_check_opcode(opcode)) {
- __cpacf_query(opcode, mask);
---- a/arch/x86/Kconfig.debug
-+++ b/arch/x86/Kconfig.debug
-@@ -276,20 +276,6 @@ config CPA_DEBUG
- ---help---
- Do change_page_attr() self-tests every 30 seconds.
-
--config OPTIMIZE_INLINING
-- bool "Allow gcc to uninline functions marked 'inline'"
-- ---help---
-- This option determines if the kernel forces gcc to inline the functions
-- developers have marked 'inline'. Doing so takes away freedom from gcc to
-- do what it thinks is best, which is desirable for the gcc 3.x series of
-- compilers. The gcc 4.x series have a rewritten inlining algorithm and
-- enabling this option will generate a smaller kernel there. Hopefully
-- this algorithm is so good that allowing gcc 4.x and above to make the
-- decision will become the default in the future. Until then this option
-- is there to test gcc for this.
--
-- If unsure, say N.
--
- config DEBUG_ENTRY
- bool "Debug low-level entry code"
- depends on DEBUG_KERNEL
---- a/drivers/mtd/nand/raw/vf610_nfc.c
-+++ b/drivers/mtd/nand/raw/vf610_nfc.c
-@@ -373,7 +373,7 @@ static int vf610_nfc_cmd(struct nand_chi
- {
- const struct nand_op_instr *instr;
- struct vf610_nfc *nfc = chip_to_nfc(chip);
-- int op_id = -1, trfr_sz = 0, offset;
-+ int op_id = -1, trfr_sz = 0, offset = 0;
- u32 col = 0, row = 0, cmd1 = 0, cmd2 = 0, code = 0;
- bool force8bit = false;
-
---- a/lib/Kconfig.debug
-+++ b/lib/Kconfig.debug
-@@ -309,6 +309,20 @@ config HEADERS_CHECK
- exported to $(INSTALL_HDR_PATH) (usually 'usr/include' in
- your build tree), to make sure they're suitable.
-
-+config OPTIMIZE_INLINING
-+ bool "Allow compiler to uninline functions marked 'inline'"
-+ help
-+ This option determines if the kernel forces gcc to inline the functions
-+ developers have marked 'inline'. Doing so takes away freedom from gcc to
-+ do what it thinks is best, which is desirable for the gcc 3.x series of
-+ compilers. The gcc 4.x series have a rewritten inlining algorithm and
-+ enabling this option will generate a smaller kernel there. Hopefully
-+ this algorithm is so good that allowing gcc 4.x and above to make the
-+ decision will become the default in the future. Until then this option
-+ is there to test gcc for this.
-+
-+ If unsure, say N.
-+
- config DEBUG_SECTION_MISMATCH
- bool "Enable full Section mismatch analysis"
- help
---- a/arch/x86/Kconfig
-+++ b/arch/x86/Kconfig
-@@ -306,9 +306,6 @@ config ZONE_DMA32
- config AUDIT_ARCH
- def_bool y if X86_64
-
--config ARCH_SUPPORTS_OPTIMIZED_INLINING
-- def_bool y
--
- config ARCH_SUPPORTS_DEBUG_PAGEALLOC
- def_bool y
-
---- a/include/linux/compiler_types.h
-+++ b/include/linux/compiler_types.h
-@@ -268,8 +268,7 @@ struct ftrace_likely_data {
- * of extern inline functions at link time.
- * A lot of inline functions can cause havoc with function tracing.
- */
--#if !defined(CONFIG_ARCH_SUPPORTS_OPTIMIZED_INLINING) || \
-- !defined(CONFIG_OPTIMIZE_INLINING)
-+#if !defined(CONFIG_OPTIMIZE_INLINING)
- #define inline \
- inline __attribute__((always_inline, unused)) notrace __gnu_inline
- #else