aboutsummaryrefslogtreecommitdiffstats
path: root/target/linux/generic/pending-5.4
diff options
context:
space:
mode:
authorHauke Mehrtens <hauke@hauke-m.de>2020-02-23 13:20:11 +0100
committerKoen Vandeputte <koen.vandeputte@ncentric.com>2020-02-28 17:50:45 +0100
commitc16517d26de30c90dabce1e456615fd7fbdce07c (patch)
treee7371ee12a3c413a064885b634ee4c975ad7f96a /target/linux/generic/pending-5.4
parent955634b473284847e3c8281a6ac85655329d8b06 (diff)
downloadupstream-c16517d26de30c90dabce1e456615fd7fbdce07c.tar.gz
upstream-c16517d26de30c90dabce1e456615fd7fbdce07c.tar.bz2
upstream-c16517d26de30c90dabce1e456615fd7fbdce07c.zip
kernel: copy kernel 4.19 code to 5.4
No changes were done to the patches while coping them. Currently they do not apply on top of kernel 5.4. Signed-off-by: Hauke Mehrtens <hauke@hauke-m.de>
Diffstat (limited to 'target/linux/generic/pending-5.4')
-rw-r--r--target/linux/generic/pending-5.4/0931-w1-gpio-fix-problem-with-platfom-data-in-w1-gpio.patch26
-rw-r--r--target/linux/generic/pending-5.4/102-MIPS-only-process-negative-stack-offsets-on-stack-tr.patch57
-rw-r--r--target/linux/generic/pending-5.4/103-MIPS-perf-ath79-Fix-perfcount-IRQ-assignment.patch110
-rw-r--r--target/linux/generic/pending-5.4/110-ehci_hcd_ignore_oc.patch79
-rw-r--r--target/linux/generic/pending-5.4/120-Fix-alloc_node_mem_map-with-ARCH_PFN_OFFSET-calcu.patch82
-rw-r--r--target/linux/generic/pending-5.4/130-add-linux-spidev-compatible-si3210.patch18
-rw-r--r--target/linux/generic/pending-5.4/131-spi-use-gpio_set_value_cansleep-for-setting-chipsele.patch20
-rw-r--r--target/linux/generic/pending-5.4/132-spi-spi-gpio-fix-crash-when-num-chipselects-is-0.patch64
-rw-r--r--target/linux/generic/pending-5.4/140-jffs2-use-.rename2-and-add-RENAME_WHITEOUT-support.patch62
-rw-r--r--target/linux/generic/pending-5.4/141-jffs2-add-RENAME_EXCHANGE-support.patch73
-rw-r--r--target/linux/generic/pending-5.4/150-bridge_allow_receiption_on_disabled_port.patch45
-rw-r--r--target/linux/generic/pending-5.4/180-net-phy-at803x-add-support-for-AT8032.patch63
-rw-r--r--target/linux/generic/pending-5.4/201-extra_optimization.patch32
-rw-r--r--target/linux/generic/pending-5.4/203-kallsyms_uncompressed.patch119
-rw-r--r--target/linux/generic/pending-5.4/205-backtrace_module_info.patch45
-rw-r--r--target/linux/generic/pending-5.4/220-optimize_inlining.patch225
-rw-r--r--target/linux/generic/pending-5.4/240-remove-unsane-filenames-from-deps_initramfs-list.patch46
-rw-r--r--target/linux/generic/pending-5.4/261-enable_wilink_platform_without_drivers.patch20
-rw-r--r--target/linux/generic/pending-5.4/300-mips_expose_boot_raw.patch40
-rw-r--r--target/linux/generic/pending-5.4/302-mips_no_branch_likely.patch22
-rw-r--r--target/linux/generic/pending-5.4/304-mips_disable_fpu.patch137
-rw-r--r--target/linux/generic/pending-5.4/305-mips_module_reloc.patch371
-rw-r--r--target/linux/generic/pending-5.4/306-mips_mem_functions_performance.patch106
-rw-r--r--target/linux/generic/pending-5.4/307-mips_highmem_offset.patch19
-rw-r--r--target/linux/generic/pending-5.4/308-mips32r2_tune.patch22
-rw-r--r--target/linux/generic/pending-5.4/309-MIPS-Add-CPU-option-reporting-to-proc-cpuinfo.patch142
-rw-r--r--target/linux/generic/pending-5.4/310-arm_module_unresolved_weak_sym.patch22
-rw-r--r--target/linux/generic/pending-5.4/330-MIPS-kexec-Accept-command-line-parameters-from-users.patch274
-rw-r--r--target/linux/generic/pending-5.4/332-arc-add-OWRTDTB-section.patch84
-rw-r--r--target/linux/generic/pending-5.4/333-arc-enable-unaligned-access-in-kernel-mode.patch24
-rw-r--r--target/linux/generic/pending-5.4/341-MIPS-mm-remove-no-op-dma_map_ops-where-possible.patch82
-rw-r--r--target/linux/generic/pending-5.4/342-powerpc-Enable-kernel-XZ-compression-option-on-PPC_8.patch25
-rw-r--r--target/linux/generic/pending-5.4/400-mtd-add-rootfs-split-support.patch107
-rw-r--r--target/linux/generic/pending-5.4/401-mtd-add-support-for-different-partition-parser-types.patch142
-rw-r--r--target/linux/generic/pending-5.4/402-mtd-use-typed-mtd-parsers-for-rootfs-and-firmware-split.patch44
-rw-r--r--target/linux/generic/pending-5.4/403-mtd-hook-mtdsplit-to-Kbuild.patch32
-rw-r--r--target/linux/generic/pending-5.4/404-mtd-add-more-helper-functions.patch76
-rw-r--r--target/linux/generic/pending-5.4/411-mtd-partial_eraseblock_write.patch132
-rw-r--r--target/linux/generic/pending-5.4/412-mtd-partial_eraseblock_unlock.patch40
-rw-r--r--target/linux/generic/pending-5.4/419-mtd-redboot-add-of_match_table-with-DT-binding.patch31
-rw-r--r--target/linux/generic/pending-5.4/420-mtd-redboot_space.patch41
-rw-r--r--target/linux/generic/pending-5.4/430-mtd-add-myloader-partition-parser.patch47
-rw-r--r--target/linux/generic/pending-5.4/431-mtd-bcm47xxpart-check-for-bad-blocks-when-calculatin.patch68
-rw-r--r--target/linux/generic/pending-5.4/432-mtd-bcm47xxpart-detect-T_Meter-partition.patch37
-rw-r--r--target/linux/generic/pending-5.4/440-block2mtd_init.patch116
-rw-r--r--target/linux/generic/pending-5.4/441-block2mtd_probe.patch47
-rw-r--r--target/linux/generic/pending-5.4/450-mtd-spi-nor-allow-NOR-driver-to-write-fewer-bytes-th.patch36
-rw-r--r--target/linux/generic/pending-5.4/460-mtd-cfi_cmdset_0002-no-erase_suspend.patch25
-rw-r--r--target/linux/generic/pending-5.4/461-mtd-cfi_cmdset_0002-add-buffer-write-cmd-timeout.patch17
-rw-r--r--target/linux/generic/pending-5.4/465-m25p80-mx-disable-software-protection.patch18
-rw-r--r--target/linux/generic/pending-5.4/466-Revert-mtd-spi-nor-fix-Spansion-regressions-aliased-.patch37
-rw-r--r--target/linux/generic/pending-5.4/470-mtd-spi-nor-support-limiting-4K-sectors-support-base.patch56
-rw-r--r--target/linux/generic/pending-5.4/475-mtd-spi-nor-Add-Winbond-w25q128jv-support.patch34
-rw-r--r--target/linux/generic/pending-5.4/476-mtd-spi-nor-add-eon-en25q128.patch18
-rw-r--r--target/linux/generic/pending-5.4/477-mtd-add-spi-nor-add-mx25u3235f.patch18
-rw-r--r--target/linux/generic/pending-5.4/479-mtd-spi-nor-add-eon-en25qh64.patch10
-rw-r--r--target/linux/generic/pending-5.4/479-mtd-spi-nor-add-xtx-xt25f128b.patch42
-rw-r--r--target/linux/generic/pending-5.4/480-mtd-set-rootfs-to-be-root-dev.patch38
-rw-r--r--target/linux/generic/pending-5.4/490-ubi-auto-attach-mtd-device-named-ubi-or-data-on-boot.patch97
-rw-r--r--target/linux/generic/pending-5.4/491-ubi-auto-create-ubiblock-device-for-rootfs.patch66
-rw-r--r--target/linux/generic/pending-5.4/492-try-auto-mounting-ubi0-rootfs-in-init-do_mounts.c.patch51
-rw-r--r--target/linux/generic/pending-5.4/493-ubi-set-ROOT_DEV-to-ubiblock-rootfs-if-unset.patch34
-rw-r--r--target/linux/generic/pending-5.4/494-mtd-ubi-add-EOF-marker-support.patch60
-rw-r--r--target/linux/generic/pending-5.4/495-mtd-core-add-get_mtd_device_by_node.patch75
-rw-r--r--target/linux/generic/pending-5.4/496-dt-bindings-add-bindings-for-mtd-concat-devices.patch52
-rw-r--r--target/linux/generic/pending-5.4/497-mtd-mtdconcat-add-dt-driver-for-concat-devices.patch216
-rw-r--r--target/linux/generic/pending-5.4/530-jffs2_make_lzma_available.patch5180
-rw-r--r--target/linux/generic/pending-5.4/532-jffs2_eofdetect.patch65
-rw-r--r--target/linux/generic/pending-5.4/553-ubifs-Add-option-to-create-UBI-FS-version-4-on-empty.patch63
-rw-r--r--target/linux/generic/pending-5.4/600-netfilter_conntrack_flush.patch88
-rw-r--r--target/linux/generic/pending-5.4/610-netfilter_match_bypass_default_checks.patch110
-rw-r--r--target/linux/generic/pending-5.4/611-netfilter_match_bypass_default_table.patch106
-rw-r--r--target/linux/generic/pending-5.4/612-netfilter_match_reduce_memory_access.patch22
-rw-r--r--target/linux/generic/pending-5.4/613-netfilter_optional_tcp_window_check.patch53
-rw-r--r--target/linux/generic/pending-5.4/616-net_optimize_xfrm_calls.patch20
-rw-r--r--target/linux/generic/pending-5.4/620-net_sched-codel-do-not-defer-queue-length-update.patch86
-rw-r--r--target/linux/generic/pending-5.4/630-packet_socket_type.patch138
-rw-r--r--target/linux/generic/pending-5.4/640-netfilter-nf_flow_table-add-hardware-offload-support.patch564
-rw-r--r--target/linux/generic/pending-5.4/641-netfilter-nf_flow_table-support-hw-offload-through-v.patch303
-rw-r--r--target/linux/generic/pending-5.4/642-net-8021q-support-hardware-flow-table-offload.patch60
-rw-r--r--target/linux/generic/pending-5.4/643-net-bridge-support-hardware-flow-table-offload.patch61
-rw-r--r--target/linux/generic/pending-5.4/644-net-pppoe-support-hardware-flow-table-offload.patch125
-rw-r--r--target/linux/generic/pending-5.4/645-netfilter-nf_flow_table-rework-hardware-offload-time.patch37
-rw-r--r--target/linux/generic/pending-5.4/646-netfilter-nf_flow_table-rework-private-driver-data.patch25
-rw-r--r--target/linux/generic/pending-5.4/655-increase_skb_pad.patch20
-rw-r--r--target/linux/generic/pending-5.4/666-Add-support-for-MAP-E-FMRs-mesh-mode.patch500
-rw-r--r--target/linux/generic/pending-5.4/670-ipv6-allow-rejecting-with-source-address-failed-policy.patch247
-rw-r--r--target/linux/generic/pending-5.4/671-net-provide-defines-for-_POLICY_FAILED-until-all-cod.patch50
-rw-r--r--target/linux/generic/pending-5.4/680-NET-skip-GRO-for-foreign-MAC-addresses.patch152
-rw-r--r--target/linux/generic/pending-5.4/681-NET-add-of_get_mac_address_mtd.patch133
-rw-r--r--target/linux/generic/pending-5.4/703-phy-add-detach-callback-to-struct-phy_driver.patch38
-rw-r--r--target/linux/generic/pending-5.4/735-net-phy-at803x-fix-at8033-sgmii-mode.patch51
-rw-r--r--target/linux/generic/pending-5.4/739-net-avoid-tx-fault-with-Nokia-GPON-module.patch108
-rw-r--r--target/linux/generic/pending-5.4/740-net-sfp-remove-incomplete-100BASE-FX-and-100BASE-LX-.patch52
-rw-r--r--target/linux/generic/pending-5.4/741-net-sfp-derive-interface-mode-from-ethtool-link-mode.patch89
-rw-r--r--target/linux/generic/pending-5.4/742-net-sfp-add-more-extended-compliance-codes.patch251
-rw-r--r--target/linux/generic/pending-5.4/743-net-sfp-add-module-start-stop-upstream-notifications.patch131
-rw-r--r--target/linux/generic/pending-5.4/744-net-sfp-move-phy_start-phy_stop-to-phylink.patch72
-rw-r--r--target/linux/generic/pending-5.4/745-net-mdio-i2c-add-support-for-Clause-45-accesses.patch74
-rw-r--r--target/linux/generic/pending-5.4/746-net-phylink-re-split-__phylink_connect_phy.patch93
-rw-r--r--target/linux/generic/pending-5.4/747-net-phylink-support-Clause-45-PHYs-on-SFP-modules.patch89
-rw-r--r--target/linux/generic/pending-5.4/748-net-phylink-split-link_an_mode-configured-and-curren.patch254
-rw-r--r--target/linux/generic/pending-5.4/749-net-phylink-split-phylink_sfp_module_insert.patch120
-rw-r--r--target/linux/generic/pending-5.4/750-net-phylink-delay-MAC-configuration-for-copper-SFP-m.patch204
-rw-r--r--target/linux/generic/pending-5.4/751-net-phylink-make-Broadcom-BCM84881-based-SFPs-work.patch58
-rw-r--r--target/linux/generic/pending-5.4/752-net-phy-add-Broadcom-BCM84881-PHY-driver.patch333
-rw-r--r--target/linux/generic/pending-5.4/753-net-sfp-add-support-for-Clause-45-PHYs.patch94
-rw-r--r--target/linux/generic/pending-5.4/754-net-sfp-fix-unbind.patch28
-rw-r--r--target/linux/generic/pending-5.4/755-net-sfp-fix-hwmon.patch44
-rw-r--r--target/linux/generic/pending-5.4/756-net-sfp-use-a-definition-for-the-fault-recovery-atte.patch55
-rw-r--r--target/linux/generic/pending-5.4/757-net-sfp-rename-sm_retries.patch60
-rw-r--r--target/linux/generic/pending-5.4/758-net-sfp-error-handling-for-phy-probe.patch97
-rw-r--r--target/linux/generic/pending-5.4/759-net-sfp-re-attempt-probing-for-phy.patch132
-rw-r--r--target/linux/generic/pending-5.4/800-bcma-get-SoC-device-struct-copy-its-DMA-params-to-th.patch70
-rw-r--r--target/linux/generic/pending-5.4/810-pci_disable_common_quirks.patch62
-rw-r--r--target/linux/generic/pending-5.4/811-pci_disable_usb_common_quirks.patch115
-rw-r--r--target/linux/generic/pending-5.4/834-ledtrig-libata.patch149
-rw-r--r--target/linux/generic/pending-5.4/920-mangle_bootargs.patch71
118 files changed, 15718 insertions, 0 deletions
diff --git a/target/linux/generic/pending-5.4/0931-w1-gpio-fix-problem-with-platfom-data-in-w1-gpio.patch b/target/linux/generic/pending-5.4/0931-w1-gpio-fix-problem-with-platfom-data-in-w1-gpio.patch
new file mode 100644
index 0000000000..be9ceebc3a
--- /dev/null
+++ b/target/linux/generic/pending-5.4/0931-w1-gpio-fix-problem-with-platfom-data-in-w1-gpio.patch
@@ -0,0 +1,26 @@
+From d9c8bc8c1408f3e8529db6e4e04017b4c579c342 Mon Sep 17 00:00:00 2001
+From: Pawel Dembicki <paweldembicki@gmail.com>
+Date: Sun, 18 Feb 2018 17:08:04 +0100
+Subject: [PATCH] w1: gpio: fix problem with platfom data in w1-gpio
+
+In devices, where fdt is used, is impossible to apply platform data
+without proper fdt node.
+
+This patch allow to use platform data in devices with fdt.
+
+Signed-off-by: Pawel Dembicki <paweldembicki@gmail.com>
+---
+ drivers/w1/masters/w1-gpio.c | 7 +++----
+ 1 file changed, 3 insertions(+), 4 deletions(-)
+
+--- a/drivers/w1/masters/w1-gpio.c
++++ b/drivers/w1/masters/w1-gpio.c
+@@ -79,7 +79,7 @@ static int w1_gpio_probe(struct platform
+ enum gpiod_flags gflags = GPIOD_OUT_LOW_OPEN_DRAIN;
+ int err;
+
+- if (of_have_populated_dt()) {
++ if (of_have_populated_dt() && !dev_get_platdata(&pdev->dev)) {
+ pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
+ if (!pdata)
+ return -ENOMEM;
diff --git a/target/linux/generic/pending-5.4/102-MIPS-only-process-negative-stack-offsets-on-stack-tr.patch b/target/linux/generic/pending-5.4/102-MIPS-only-process-negative-stack-offsets-on-stack-tr.patch
new file mode 100644
index 0000000000..0b87f493ec
--- /dev/null
+++ b/target/linux/generic/pending-5.4/102-MIPS-only-process-negative-stack-offsets-on-stack-tr.patch
@@ -0,0 +1,57 @@
+From: Felix Fietkau <nbd@nbd.name>
+Date: Wed, 18 Apr 2018 10:50:05 +0200
+Subject: [PATCH] MIPS: only process negative stack offsets on stack traces
+
+Fixes endless back traces in cases where the compiler emits a stack
+pointer increase in a branch delay slot (probably for some form of
+function return).
+
+[ 3.475442] BUG: MAX_STACK_TRACE_ENTRIES too low!
+[ 3.480070] turning off the locking correctness validator.
+[ 3.485521] CPU: 0 PID: 1 Comm: swapper/0 Not tainted 4.14.34 #0
+[ 3.491475] Stack : 00000000 00000000 00000000 00000000 80e0fce2 00000034 00000000 00000000
+[ 3.499764] 87c3838c 80696377 8061047c 00000000 00000001 00000001 87c2d850 6534689f
+[ 3.508059] 00000000 00000000 80e10000 00000000 00000000 000000cf 0000000f 00000000
+[ 3.516353] 00000000 806a0000 00076891 00000000 00000000 00000000 ffffffff 00000000
+[ 3.524648] 806c0000 00000004 80e10000 806a0000 00000003 80690000 00000000 80700000
+[ 3.532942] ...
+[ 3.535362] Call Trace:
+[ 3.537818] [<80010a48>] show_stack+0x58/0x100
+[ 3.542207] [<804c2f78>] dump_stack+0xe8/0x170
+[ 3.546613] [<80079f90>] save_trace+0xf0/0x110
+[ 3.551010] [<8007b1ec>] mark_lock+0x33c/0x78c
+[ 3.555413] [<8007bf48>] __lock_acquire+0x2ac/0x1a08
+[ 3.560337] [<8007de60>] lock_acquire+0x64/0x8c
+[ 3.564846] [<804e1570>] _raw_spin_lock_irqsave+0x54/0x78
+[ 3.570186] [<801b618c>] kernfs_notify+0x94/0xac
+[ 3.574770] [<801b7b10>] sysfs_notify+0x74/0xa0
+[ 3.579257] [<801b618c>] kernfs_notify+0x94/0xac
+[ 3.583839] [<801b7b10>] sysfs_notify+0x74/0xa0
+[ 3.588329] [<801b618c>] kernfs_notify+0x94/0xac
+[ 3.592911] [<801b7b10>] sysfs_notify+0x74/0xa0
+[ 3.597401] [<801b618c>] kernfs_notify+0x94/0xac
+[ 3.601983] [<801b7b10>] sysfs_notify+0x74/0xa0
+[ 3.606473] [<801b618c>] kernfs_notify+0x94/0xac
+[ 3.611055] [<801b7b10>] sysfs_notify+0x74/0xa0
+[ 3.615545] [<801b618c>] kernfs_notify+0x94/0xac
+[ 3.620125] [<801b7b10>] sysfs_notify+0x74/0xa0
+[ 3.624619] [<801b618c>] kernfs_notify+0x94/0xac
+[ 3.629197] [<801b7b10>] sysfs_notify+0x74/0xa0
+[ 3.633691] [<801b618c>] kernfs_notify+0x94/0xac
+[ 3.638269] [<801b7b10>] sysfs_notify+0x74/0xa0
+[ 3.642763] [<801b618c>] kernfs_notify+0x94/0xac
+
+Signed-off-by: Felix Fietkau <nbd@nbd.name>
+---
+
+--- a/arch/mips/kernel/process.c
++++ b/arch/mips/kernel/process.c
+@@ -361,6 +361,8 @@ static inline int is_sp_move_ins(union m
+
+ if (ip->i_format.opcode == addiu_op ||
+ ip->i_format.opcode == daddiu_op) {
++ if (ip->i_format.simmediate > 0)
++ return 0;
+ *frame_size = -ip->i_format.simmediate;
+ return 1;
+ }
diff --git a/target/linux/generic/pending-5.4/103-MIPS-perf-ath79-Fix-perfcount-IRQ-assignment.patch b/target/linux/generic/pending-5.4/103-MIPS-perf-ath79-Fix-perfcount-IRQ-assignment.patch
new file mode 100644
index 0000000000..6b74c7da1d
--- /dev/null
+++ b/target/linux/generic/pending-5.4/103-MIPS-perf-ath79-Fix-perfcount-IRQ-assignment.patch
@@ -0,0 +1,110 @@
+From 852a88f35f4b7e5ebb717fed3c3a3330d5ad4336 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Petr=20=C5=A0tetiar?= <ynezz@true.cz>
+Date: Wed, 10 Apr 2019 16:43:27 +0200
+Subject: [PATCH v2] MIPS: perf: ath79: Fix perfcount IRQ assignment
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Currently it's not possible to use perf on ath79 due to genirq flags
+mismatch happening on static virtual IRQ 13 which is used for
+performance counters hardware IRQ 5.
+
+On TP-Link Archer C7v5:
+
+ CPU0
+ 2: 0 MIPS 2 ath9k
+ 4: 318 MIPS 4 19000000.eth
+ 7: 55034 MIPS 7 timer
+ 8: 1236 MISC 3 ttyS0
+ 12: 0 INTC 1 ehci_hcd:usb1
+ 13: 0 gpio-ath79 2 keys
+ 14: 0 gpio-ath79 5 keys
+ 15: 31 AR724X PCI 1 ath10k_pci
+
+ $ perf top
+ genirq: Flags mismatch irq 13. 00014c83 (mips_perf_pmu) vs. 00002003 (keys)
+
+On TP-Link Archer C7v4:
+
+ CPU0
+ 4: 0 MIPS 4 19000000.eth
+ 5: 7135 MIPS 5 1a000000.eth
+ 7: 98379 MIPS 7 timer
+ 8: 30 MISC 3 ttyS0
+ 12: 90028 INTC 0 ath9k
+ 13: 5520 INTC 1 ehci_hcd:usb1
+ 14: 4623 INTC 2 ehci_hcd:usb2
+ 15: 32844 AR724X PCI 1 ath10k_pci
+ 16: 0 gpio-ath79 16 keys
+ 23: 0 gpio-ath79 23 keys
+
+ $ perf top
+ genirq: Flags mismatch irq 13. 00014c80 (mips_perf_pmu) vs. 00000080 (ehci_hcd:usb1)
+
+This problem is happening, because currently statically assigned virtual
+IRQ 13 for performance counters is not claimed during the initialization
+of MIPS PMU during the bootup, so the IRQ subsystem doesn't know, that
+this interrupt isn't available for further use.
+
+So this patch fixes the issue by simply booking hardware IRQ 5 for MIPS PMU.
+
+Tested-by: Kevin 'ldir' Darbyshire-Bryant <ldir@darbyshire-bryant.me.uk>
+Signed-off-by: Petr Štetiar <ynezz@true.cz>
+---
+
+Changes since v1:
+
+ I've incorporated two comments which I've received on IRC from blogic and
+ I've also reworded the commit message to match the changes in v2 of this
+ patch.
+
+ * use actual hardware perfcount IRQ 5 instead of the virtual IRQ 13
+ * dropped the CONFIG_PERF_EVENTS ifdef around irq_create_mapping
+
+ arch/mips/ath79/setup.c | 6 ------
+ drivers/irqchip/irq-ath79-misc.c | 11 +++++++++++
+ 2 files changed, 11 insertions(+), 6 deletions(-)
+
+--- a/arch/mips/ath79/setup.c
++++ b/arch/mips/ath79/setup.c
+@@ -211,12 +211,6 @@ const char *get_system_type(void)
+ return ath79_sys_type;
+ }
+
+-int get_c0_perfcount_int(void)
+-{
+- return ATH79_MISC_IRQ(5);
+-}
+-EXPORT_SYMBOL_GPL(get_c0_perfcount_int);
+-
+ unsigned int get_c0_compare_int(void)
+ {
+ return CP0_LEGACY_COMPARE_IRQ;
+--- a/drivers/irqchip/irq-ath79-misc.c
++++ b/drivers/irqchip/irq-ath79-misc.c
+@@ -22,6 +22,15 @@
+ #define AR71XX_RESET_REG_MISC_INT_ENABLE 4
+
+ #define ATH79_MISC_IRQ_COUNT 32
++#define ATH79_MISC_PERF_IRQ 5
++
++static int ath79_perfcount_irq;
++
++int get_c0_perfcount_int(void)
++{
++ return ath79_perfcount_irq;
++}
++EXPORT_SYMBOL_GPL(get_c0_perfcount_int);
+
+ static void ath79_misc_irq_handler(struct irq_desc *desc)
+ {
+@@ -113,6 +122,8 @@ static void __init ath79_misc_intc_domai
+ {
+ void __iomem *base = domain->host_data;
+
++ ath79_perfcount_irq = irq_create_mapping(domain, ATH79_MISC_PERF_IRQ);
++
+ /* Disable and clear all interrupts */
+ __raw_writel(0, base + AR71XX_RESET_REG_MISC_INT_ENABLE);
+ __raw_writel(0, base + AR71XX_RESET_REG_MISC_INT_STATUS);
diff --git a/target/linux/generic/pending-5.4/110-ehci_hcd_ignore_oc.patch b/target/linux/generic/pending-5.4/110-ehci_hcd_ignore_oc.patch
new file mode 100644
index 0000000000..cad00b5527
--- /dev/null
+++ b/target/linux/generic/pending-5.4/110-ehci_hcd_ignore_oc.patch
@@ -0,0 +1,79 @@
+From: Florian Fainelli <florian@openwrt.org>
+Subject: USB: EHCI: add ignore_oc flag to disable overcurrent checking
+
+This patch adds an ignore_oc flag which can be set by EHCI controller
+not supporting or wanting to disable overcurrent checking. The EHCI
+platform data in include/linux/usb/ehci_pdriver.h is also augmented to
+take advantage of this new flag.
+
+Signed-off-by: Florian Fainelli <florian@openwrt.org>
+---
+ drivers/usb/host/ehci-hcd.c | 2 +-
+ drivers/usb/host/ehci-hub.c | 4 ++--
+ drivers/usb/host/ehci-platform.c | 1 +
+ drivers/usb/host/ehci.h | 1 +
+ include/linux/usb/ehci_pdriver.h | 1 +
+ 5 files changed, 6 insertions(+), 3 deletions(-)
+
+--- a/drivers/usb/host/ehci-hcd.c
++++ b/drivers/usb/host/ehci-hcd.c
+@@ -638,7 +638,7 @@ static int ehci_run (struct usb_hcd *hcd
+ "USB %x.%x started, EHCI %x.%02x%s\n",
+ ((ehci->sbrn & 0xf0)>>4), (ehci->sbrn & 0x0f),
+ temp >> 8, temp & 0xff,
+- ignore_oc ? ", overcurrent ignored" : "");
++ (ignore_oc || ehci->ignore_oc) ? ", overcurrent ignored" : "");
+
+ ehci_writel(ehci, INTR_MASK,
+ &ehci->regs->intr_enable); /* Turn On Interrupts */
+--- a/drivers/usb/host/ehci-hub.c
++++ b/drivers/usb/host/ehci-hub.c
+@@ -641,7 +641,7 @@ ehci_hub_status_data (struct usb_hcd *hc
+ * always set, seem to clear PORT_OCC and PORT_CSC when writing to
+ * PORT_POWER; that's surprising, but maybe within-spec.
+ */
+- if (!ignore_oc)
++ if (!ignore_oc && !ehci->ignore_oc)
+ mask = PORT_CSC | PORT_PEC | PORT_OCC;
+ else
+ mask = PORT_CSC | PORT_PEC;
+@@ -1011,7 +1011,7 @@ int ehci_hub_control(
+ if (temp & PORT_PEC)
+ status |= USB_PORT_STAT_C_ENABLE << 16;
+
+- if ((temp & PORT_OCC) && !ignore_oc){
++ if ((temp & PORT_OCC) && (!ignore_oc && !ehci->ignore_oc)){
+ status |= USB_PORT_STAT_C_OVERCURRENT << 16;
+
+ /*
+--- a/drivers/usb/host/ehci-platform.c
++++ b/drivers/usb/host/ehci-platform.c
+@@ -208,6 +208,8 @@ static int ehci_platform_probe(struct pl
+ hcd->has_tt = 1;
+ if (pdata->reset_on_resume)
+ priv->reset_on_resume = true;
++ if (pdata->ignore_oc)
++ ehci->ignore_oc = 1;
+
+ #ifndef CONFIG_USB_EHCI_BIG_ENDIAN_MMIO
+ if (ehci->big_endian_mmio) {
+--- a/drivers/usb/host/ehci.h
++++ b/drivers/usb/host/ehci.h
+@@ -218,6 +218,7 @@ struct ehci_hcd { /* one per controlle
+ unsigned frame_index_bug:1; /* MosChip (AKA NetMos) */
+ unsigned need_oc_pp_cycle:1; /* MPC834X port power */
+ unsigned imx28_write_fix:1; /* For Freescale i.MX28 */
++ unsigned ignore_oc:1;
+
+ /* required for usb32 quirk */
+ #define OHCI_CTRL_HCFS (3 << 6)
+--- a/include/linux/usb/ehci_pdriver.h
++++ b/include/linux/usb/ehci_pdriver.h
+@@ -50,6 +50,7 @@ struct usb_ehci_pdata {
+ unsigned no_io_watchdog:1;
+ unsigned reset_on_resume:1;
+ unsigned dma_mask_64:1;
++ unsigned ignore_oc:1;
+
+ /* Turn on all power and clocks */
+ int (*power_on)(struct platform_device *pdev);
diff --git a/target/linux/generic/pending-5.4/120-Fix-alloc_node_mem_map-with-ARCH_PFN_OFFSET-calcu.patch b/target/linux/generic/pending-5.4/120-Fix-alloc_node_mem_map-with-ARCH_PFN_OFFSET-calcu.patch
new file mode 100644
index 0000000000..ec541a865c
--- /dev/null
+++ b/target/linux/generic/pending-5.4/120-Fix-alloc_node_mem_map-with-ARCH_PFN_OFFSET-calcu.patch
@@ -0,0 +1,82 @@
+From: Tobias Wolf <dev-NTEO@vplace.de>
+Subject: mm: Fix alloc_node_mem_map with ARCH_PFN_OFFSET calculation
+
+An rt288x (ralink) based router (Belkin F5D8235 v1) does not boot with any
+kernel beyond version 4.3 resulting in:
+
+BUG: Bad page state in process swapper pfn:086ac
+
+bisect resulted in:
+
+a1c34a3bf00af2cede839879502e12dc68491ad5 is the first bad commit
+commit a1c34a3bf00af2cede839879502e12dc68491ad5
+Author: Laura Abbott <laura@labbott.name>
+Date: Thu Nov 5 18:48:46 2015 -0800
+
+ mm: Don't offset memmap for flatmem
+
+ Srinivas Kandagatla reported bad page messages when trying to remove the
+ bottom 2MB on an ARM based IFC6410 board
+
+ BUG: Bad page state in process swapper pfn:fffa8
+ page:ef7fb500 count:0 mapcount:0 mapping: (null) index:0x0
+ flags: 0x96640253(locked|error|dirty|active|arch_1|reclaim|mlocked)
+ page dumped because: PAGE_FLAGS_CHECK_AT_FREE flag(s) set
+ bad because of flags:
+ flags: 0x200041(locked|active|mlocked)
+ Modules linked in:
+ CPU: 0 PID: 0 Comm: swapper Not tainted 3.19.0-rc3-00007-g412f9ba-dirty
+#816
+ Hardware name: Qualcomm (Flattened Device Tree)
+ unwind_backtrace
+ show_stack
+ dump_stack
+ bad_page
+ free_pages_prepare
+ free_hot_cold_page
+ __free_pages
+ free_highmem_page
+ mem_init
+ start_kernel
+ Disabling lock debugging due to kernel taint
+ [...]
+:040000 040000 2de013c372345fd471cd58f0553c9b38b0ef1cc4
+0a8156f848733dfa21e16c196dfb6c0a76290709 M mm
+
+This fix for ARM does not account ARCH_PFN_OFFSET for mem_map as later used by
+page_to_pfn anymore.
+
+The following output was generated with two hacked in printk statements:
+
+printk("before %p vs. %p or %p\n", mem_map, mem_map - offset, mem_map -
+(pgdat->node_start_pfn - ARCH_PFN_OFFSET));
+ if (page_to_pfn(mem_map) != pgdat->node_start_pfn)
+ mem_map -= offset + (pgdat->node_start_pfn - ARCH_PFN_OFFSET);
+printk("after %p\n", mem_map);
+
+Output:
+
+[ 0.000000] before 8861b280 vs. 8861b280 or 8851b280
+[ 0.000000] after 8851b280
+
+As seen in the first line mem_map with subtraction of offset does not equal the
+mem_map after subtraction of ARCH_PFN_OFFSET.
+
+After adding the offset of ARCH_PFN_OFFSET as well to mem_map as the
+previously calculated offset is zero for the named platform it is able to boot
+4.4 and 4.9-rc7 again.
+
+Signed-off-by: Tobias Wolf <dev-NTEO@vplace.de>
+---
+
+--- a/mm/page_alloc.c
++++ b/mm/page_alloc.c
+@@ -6400,7 +6400,7 @@ static void __ref alloc_node_mem_map(str
+ mem_map = NODE_DATA(0)->node_mem_map;
+ #if defined(CONFIG_HAVE_MEMBLOCK_NODE_MAP) || defined(CONFIG_FLATMEM)
+ if (page_to_pfn(mem_map) != pgdat->node_start_pfn)
+- mem_map -= offset;
++ mem_map -= offset + (pgdat->node_start_pfn - ARCH_PFN_OFFSET);
+ #endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
+ }
+ #endif
diff --git a/target/linux/generic/pending-5.4/130-add-linux-spidev-compatible-si3210.patch b/target/linux/generic/pending-5.4/130-add-linux-spidev-compatible-si3210.patch
new file mode 100644
index 0000000000..3a42182c97
--- /dev/null
+++ b/target/linux/generic/pending-5.4/130-add-linux-spidev-compatible-si3210.patch
@@ -0,0 +1,18 @@
+From: Giuseppe Lippolis <giu.lippolis@gmail.com>
+Subject: Add the linux,spidev compatible in spidev Several device in ramips have this binding in the dts
+
+Signed-off-by: Giuseppe Lippolis <giu.lippolis@gmail.com>
+---
+ drivers/spi/spidev.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/drivers/spi/spidev.c
++++ b/drivers/spi/spidev.c
+@@ -672,6 +672,7 @@ static const struct of_device_id spidev_
+ { .compatible = "lineartechnology,ltc2488" },
+ { .compatible = "ge,achc" },
+ { .compatible = "semtech,sx1301" },
++ { .compatible = "siliconlabs,si3210" },
+ {},
+ };
+ MODULE_DEVICE_TABLE(of, spidev_dt_ids);
diff --git a/target/linux/generic/pending-5.4/131-spi-use-gpio_set_value_cansleep-for-setting-chipsele.patch b/target/linux/generic/pending-5.4/131-spi-use-gpio_set_value_cansleep-for-setting-chipsele.patch
new file mode 100644
index 0000000000..bd5ed6f888
--- /dev/null
+++ b/target/linux/generic/pending-5.4/131-spi-use-gpio_set_value_cansleep-for-setting-chipsele.patch
@@ -0,0 +1,20 @@
+From: Felix Fietkau <nbd@nbd.name>
+Subject: spi: use gpio_set_value_cansleep for setting chipselect GPIO
+
+Sleeping is safe inside spi_transfer_one_message, and some GPIO chips
+need to sleep for setting values
+
+Signed-off-by: Felix Fietkau <nbd@nbd.name>
+---
+
+--- a/drivers/spi/spi.c
++++ b/drivers/spi/spi.c
+@@ -733,7 +733,7 @@ static void spi_set_cs(struct spi_device
+ enable = !enable;
+
+ if (gpio_is_valid(spi->cs_gpio)) {
+- gpio_set_value(spi->cs_gpio, !enable);
++ gpio_set_value_cansleep(spi->cs_gpio, !enable);
+ /* Some SPI masters need both GPIO CS & slave_select */
+ if ((spi->controller->flags & SPI_MASTER_GPIO_SS) &&
+ spi->controller->set_cs)
diff --git a/target/linux/generic/pending-5.4/132-spi-spi-gpio-fix-crash-when-num-chipselects-is-0.patch b/target/linux/generic/pending-5.4/132-spi-spi-gpio-fix-crash-when-num-chipselects-is-0.patch
new file mode 100644
index 0000000000..ed94f937c9
--- /dev/null
+++ b/target/linux/generic/pending-5.4/132-spi-spi-gpio-fix-crash-when-num-chipselects-is-0.patch
@@ -0,0 +1,64 @@
+From 0f76408d8c1cb0979f4286dd23912e6e59784b35 Mon Sep 17 00:00:00 2001
+From: DENG Qingfang <dqfext@gmail.com>
+Date: Thu, 19 Sep 2019 11:42:55 +0200
+Subject: [PATCH] spi: spi-gpio: fix crash when num-chipselects is 0
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+If an spi-gpio was specified with num-chipselects = <0> in dts, kernel will crash:
+
+Unable to handle kernel paging request at virtual address 32697073
+pgd = (ptrval)
+[32697073] *pgd=00000000
+Internal error: Oops: 5 [# 1] SMP ARM
+Modules linked in:
+CPU: 2 PID: 1 Comm: swapper/0 Not tainted 4.19.72 #0
+Hardware name: Generic DT based system
+PC is at validate_desc+0x28/0x80
+LR is at gpiod_direction_output+0x14/0x128
+...
+[<c0544db4>] (validate_desc) from [<c0545228>] (gpiod_direction_output+0x14/0x128)
+[<c0545228>] (gpiod_direction_output) from [<c05fa714>] (spi_gpio_setup+0x58/0x64)
+[<c05fa714>] (spi_gpio_setup) from [<c05f7258>] (spi_setup+0x12c/0x148)
+[<c05f7258>] (spi_setup) from [<c05f7330>] (spi_add_device+0xbc/0x12c)
+[<c05f7330>] (spi_add_device) from [<c05f7f74>] (spi_register_controller+0x838/0x924)
+[<c05f7f74>] (spi_register_controller) from [<c05fa494>] (spi_bitbang_start+0x108/0x120)
+[<c05fa494>] (spi_bitbang_start) from [<c05faa34>] (spi_gpio_probe+0x314/0x338)
+[<c05faa34>] (spi_gpio_probe) from [<c05a844c>] (platform_drv_probe+0x34/0x70)
+
+The cause is spi_gpio_setup() did not check if the spi-gpio has chipselect pins
+before setting their direction and results in derefing an invalid pointer.
+
+The bug is spotted in kernel 4.19.72 on OpenWrt, and does not occur in 4.14.
+
+There is a similar fix upstream 249e2632dcd0509b8f8f296f5aabf4d48dfd6da8.
+
+Ref: https://patchwork.kernel.org/patch/11150619/
+Cc: Linus Walleij <linus.walleij@linaro.org>
+Cc: stable@vger.kernel.org
+Signed-off-by: DENG Qingfang <dqfext@gmail.com>
+Signed-off-by: Petr Štetiar <ynezz@true.cz>
+---
+ drivers/spi/spi-gpio.c | 10 ++++++----
+ 1 file changed, 6 insertions(+), 4 deletions(-)
+
+--- a/drivers/spi/spi-gpio.c
++++ b/drivers/spi/spi-gpio.c
+@@ -242,10 +242,12 @@ static int spi_gpio_setup(struct spi_dev
+ * The CS GPIOs have already been
+ * initialized from the descriptor lookup.
+ */
+- cs = spi_gpio->cs_gpios[spi->chip_select];
+- if (!spi->controller_state && cs)
+- status = gpiod_direction_output(cs,
+- !(spi->mode & SPI_CS_HIGH));
++ if (spi_gpio->has_cs) {
++ cs = spi_gpio->cs_gpios[spi->chip_select];
++ if (!spi->controller_state && cs)
++ status = gpiod_direction_output(cs,
++ !(spi->mode & SPI_CS_HIGH));
++ }
+
+ if (!status)
+ status = spi_bitbang_setup(spi);
diff --git a/target/linux/generic/pending-5.4/140-jffs2-use-.rename2-and-add-RENAME_WHITEOUT-support.patch b/target/linux/generic/pending-5.4/140-jffs2-use-.rename2-and-add-RENAME_WHITEOUT-support.patch
new file mode 100644
index 0000000000..c97e93250b
--- /dev/null
+++ b/target/linux/generic/pending-5.4/140-jffs2-use-.rename2-and-add-RENAME_WHITEOUT-support.patch
@@ -0,0 +1,62 @@
+From: Felix Fietkau <nbd@nbd.name>
+Subject: jffs2: use .rename2 and add RENAME_WHITEOUT support
+
+It is required for renames on overlayfs
+
+Signed-off-by: Felix Fietkau <nbd@nbd.name>
+---
+
+--- a/fs/jffs2/dir.c
++++ b/fs/jffs2/dir.c
+@@ -752,6 +752,24 @@ static int jffs2_mknod (struct inode *di
+ return ret;
+ }
+
++static int jffs2_whiteout (struct inode *old_dir, struct dentry *old_dentry)
++{
++ struct dentry *wh;
++ int err;
++
++ wh = d_alloc(old_dentry->d_parent, &old_dentry->d_name);
++ if (!wh)
++ return -ENOMEM;
++
++ err = jffs2_mknod(old_dir, wh, S_IFCHR | WHITEOUT_MODE,
++ WHITEOUT_DEV);
++ if (err)
++ return err;
++
++ d_rehash(wh);
++ return 0;
++}
++
+ static int jffs2_rename (struct inode *old_dir_i, struct dentry *old_dentry,
+ struct inode *new_dir_i, struct dentry *new_dentry,
+ unsigned int flags)
+@@ -762,7 +780,7 @@ static int jffs2_rename (struct inode *o
+ uint8_t type;
+ uint32_t now;
+
+- if (flags & ~RENAME_NOREPLACE)
++ if (flags & ~(RENAME_NOREPLACE|RENAME_WHITEOUT))
+ return -EINVAL;
+
+ /* The VFS will check for us and prevent trying to rename a
+@@ -828,9 +846,14 @@ static int jffs2_rename (struct inode *o
+ if (d_is_dir(old_dentry) && !victim_f)
+ inc_nlink(new_dir_i);
+
+- /* Unlink the original */
+- ret = jffs2_do_unlink(c, JFFS2_INODE_INFO(old_dir_i),
+- old_dentry->d_name.name, old_dentry->d_name.len, NULL, now);
++ if (flags & RENAME_WHITEOUT)
++ /* Replace with whiteout */
++ ret = jffs2_whiteout(old_dir_i, old_dentry);
++ else
++ /* Unlink the original */
++ ret = jffs2_do_unlink(c, JFFS2_INODE_INFO(old_dir_i),
++ old_dentry->d_name.name,
++ old_dentry->d_name.len, NULL, now);
+
+ /* We don't touch inode->i_nlink */
+
diff --git a/target/linux/generic/pending-5.4/141-jffs2-add-RENAME_EXCHANGE-support.patch b/target/linux/generic/pending-5.4/141-jffs2-add-RENAME_EXCHANGE-support.patch
new file mode 100644
index 0000000000..093a73ab66
--- /dev/null
+++ b/target/linux/generic/pending-5.4/141-jffs2-add-RENAME_EXCHANGE-support.patch
@@ -0,0 +1,73 @@
+From: Felix Fietkau <nbd@nbd.name>
+Subject: jffs2: add RENAME_EXCHANGE support
+
+Signed-off-by: Felix Fietkau <nbd@nbd.name>
+---
+
+--- a/fs/jffs2/dir.c
++++ b/fs/jffs2/dir.c
+@@ -777,18 +777,31 @@ static int jffs2_rename (struct inode *o
+ int ret;
+ struct jffs2_sb_info *c = JFFS2_SB_INFO(old_dir_i->i_sb);
+ struct jffs2_inode_info *victim_f = NULL;
++ struct inode *fst_inode = d_inode(old_dentry);
++ struct inode *snd_inode = d_inode(new_dentry);
+ uint8_t type;
+ uint32_t now;
+
+- if (flags & ~(RENAME_NOREPLACE|RENAME_WHITEOUT))
++ if (flags & ~(RENAME_NOREPLACE|RENAME_WHITEOUT|RENAME_EXCHANGE))
+ return -EINVAL;
+
++ if ((flags & RENAME_EXCHANGE) && (old_dir_i != new_dir_i)) {
++ if (S_ISDIR(fst_inode->i_mode) && !S_ISDIR(snd_inode->i_mode)) {
++ inc_nlink(new_dir_i);
++ drop_nlink(old_dir_i);
++ }
++ else if (!S_ISDIR(fst_inode->i_mode) && S_ISDIR(snd_inode->i_mode)) {
++ drop_nlink(new_dir_i);
++ inc_nlink(old_dir_i);
++ }
++ }
++
+ /* The VFS will check for us and prevent trying to rename a
+ * file over a directory and vice versa, but if it's a directory,
+ * the VFS can't check whether the victim is empty. The filesystem
+ * needs to do that for itself.
+ */
+- if (d_really_is_positive(new_dentry)) {
++ if (d_really_is_positive(new_dentry) && !(flags & RENAME_EXCHANGE)) {
+ victim_f = JFFS2_INODE_INFO(d_inode(new_dentry));
+ if (d_is_dir(new_dentry)) {
+ struct jffs2_full_dirent *fd;
+@@ -823,7 +836,7 @@ static int jffs2_rename (struct inode *o
+ if (ret)
+ return ret;
+
+- if (victim_f) {
++ if (victim_f && !(flags & RENAME_EXCHANGE)) {
+ /* There was a victim. Kill it off nicely */
+ if (d_is_dir(new_dentry))
+ clear_nlink(d_inode(new_dentry));
+@@ -849,6 +862,12 @@ static int jffs2_rename (struct inode *o
+ if (flags & RENAME_WHITEOUT)
+ /* Replace with whiteout */
+ ret = jffs2_whiteout(old_dir_i, old_dentry);
++ else if (flags & RENAME_EXCHANGE)
++ /* Replace the original */
++ ret = jffs2_do_link(c, JFFS2_INODE_INFO(old_dir_i),
++ d_inode(new_dentry)->i_ino, type,
++ old_dentry->d_name.name, old_dentry->d_name.len,
++ now);
+ else
+ /* Unlink the original */
+ ret = jffs2_do_unlink(c, JFFS2_INODE_INFO(old_dir_i),
+@@ -880,7 +899,7 @@ static int jffs2_rename (struct inode *o
+ return ret;
+ }
+
+- if (d_is_dir(old_dentry))
++ if (d_is_dir(old_dentry) && !(flags & RENAME_EXCHANGE))
+ drop_nlink(old_dir_i);
+
+ new_dir_i->i_mtime = new_dir_i->i_ctime = old_dir_i->i_mtime = old_dir_i->i_ctime = ITIME(now);
diff --git a/target/linux/generic/pending-5.4/150-bridge_allow_receiption_on_disabled_port.patch b/target/linux/generic/pending-5.4/150-bridge_allow_receiption_on_disabled_port.patch
new file mode 100644
index 0000000000..583c34a448
--- /dev/null
+++ b/target/linux/generic/pending-5.4/150-bridge_allow_receiption_on_disabled_port.patch
@@ -0,0 +1,45 @@
+From: Stephen Hemminger <stephen@networkplumber.org>
+Subject: bridge: allow receiption on disabled port
+
+When an ethernet device is enslaved to a bridge, and the bridge STP
+detects loss of carrier (or operational state down), then normally
+packet receiption is blocked.
+
+This breaks control applications like WPA which maybe expecting to
+receive packets to negotiate to bring link up. The bridge needs to
+block forwarding packets from these disabled ports, but there is no
+hard requirement to not allow local packet delivery.
+
+Signed-off-by: Stephen Hemminger <stephen@networkplumber.org>
+Signed-off-by: Felix Fietkau <nbd@nbd.name>
+
+--- a/net/bridge/br_input.c
++++ b/net/bridge/br_input.c
+@@ -193,6 +193,9 @@ static void __br_handle_local_finish(str
+ /* note: already called with rcu_read_lock */
+ static int br_handle_local_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
+ {
++ struct net_bridge_port *p = br_port_get_rcu(skb->dev);
++
++ if (p->state != BR_STATE_DISABLED)
+ __br_handle_local_finish(skb);
+
+ /* return 1 to signal the okfn() was called so it's ok to use the skb */
+@@ -289,6 +292,17 @@ rx_handler_result_t br_handle_frame(stru
+
+ forward:
+ switch (p->state) {
++ case BR_STATE_DISABLED:
++ if (ether_addr_equal(p->br->dev->dev_addr, dest))
++ skb->pkt_type = PACKET_HOST;
++
++ if (NF_HOOK(NFPROTO_BRIDGE, NF_BR_PRE_ROUTING,
++ dev_net(skb->dev), NULL, skb, skb->dev, NULL,
++ br_handle_local_finish) == 1) {
++ return RX_HANDLER_PASS;
++ }
++ break;
++
+ case BR_STATE_FORWARDING:
+ rhook = rcu_dereference(br_should_route_hook);
+ if (rhook) {
diff --git a/target/linux/generic/pending-5.4/180-net-phy-at803x-add-support-for-AT8032.patch b/target/linux/generic/pending-5.4/180-net-phy-at803x-add-support-for-AT8032.patch
new file mode 100644
index 0000000000..e496301e4e
--- /dev/null
+++ b/target/linux/generic/pending-5.4/180-net-phy-at803x-add-support-for-AT8032.patch
@@ -0,0 +1,63 @@
+From: Felix Fietkau <nbd@nbd.name>
+Subject: net: phy: at803x: add support for AT8032
+
+Like AT8030, this PHY needs the GPIO reset workaround
+
+Signed-off-by: Felix Fietkau <nbd@nbd.name>
+---
+
+--- a/drivers/net/phy/at803x.c
++++ b/drivers/net/phy/at803x.c
+@@ -62,8 +62,10 @@
+
+ #define ATH8030_PHY_ID 0x004dd076
+ #define ATH8031_PHY_ID 0x004dd074
++#define ATH8032_PHY_ID 0x004dd023
+ #define ATH8035_PHY_ID 0x004dd072
+ #define AT803X_PHY_ID_MASK 0xffffffef
++#define AT8032_PHY_ID_MASK 0xffffffff
+
+ MODULE_DESCRIPTION("Atheros 803x PHY driver");
+ MODULE_AUTHOR("Matus Ujhelyi");
+@@ -308,7 +310,7 @@ static void at803x_link_change_notify(st
+ struct at803x_priv *priv = phydev->priv;
+
+ /*
+- * Conduct a hardware reset for AT8030 every time a link loss is
++ * Conduct a hardware reset for AT8030/2 every time a link loss is
+ * signalled. This is necessary to circumvent a hardware bug that
+ * occurs when the cable is unplugged while TX packets are pending
+ * in the FIFO. In such cases, the FIFO enters an error mode it
+@@ -414,6 +416,24 @@ static struct phy_driver at803x_driver[]
+ .aneg_done = at803x_aneg_done,
+ .ack_interrupt = &at803x_ack_interrupt,
+ .config_intr = &at803x_config_intr,
++}, {
++ /* ATHEROS 8032 */
++ .phy_id = ATH8032_PHY_ID,
++ .name = "Atheros 8032 ethernet",
++ .phy_id_mask = AT8032_PHY_ID_MASK,
++ .probe = at803x_probe,
++ .config_init = at803x_config_init,
++ .link_change_notify = at803x_link_change_notify,
++ .set_wol = at803x_set_wol,
++ .get_wol = at803x_get_wol,
++ .suspend = at803x_suspend,
++ .resume = at803x_resume,
++ .features = PHY_BASIC_FEATURES,
++ .flags = PHY_HAS_INTERRUPT,
++ .config_aneg = genphy_config_aneg,
++ .read_status = genphy_read_status,
++ .ack_interrupt = at803x_ack_interrupt,
++ .config_intr = at803x_config_intr,
+ } };
+
+ module_phy_driver(at803x_driver);
+@@ -421,6 +441,7 @@ module_phy_driver(at803x_driver);
+ static struct mdio_device_id __maybe_unused atheros_tbl[] = {
+ { ATH8030_PHY_ID, AT803X_PHY_ID_MASK },
+ { ATH8031_PHY_ID, AT803X_PHY_ID_MASK },
++ { ATH8032_PHY_ID, AT8032_PHY_ID_MASK },
+ { ATH8035_PHY_ID, AT803X_PHY_ID_MASK },
+ { }
+ };
diff --git a/target/linux/generic/pending-5.4/201-extra_optimization.patch b/target/linux/generic/pending-5.4/201-extra_optimization.patch
new file mode 100644
index 0000000000..cafccd42c9
--- /dev/null
+++ b/target/linux/generic/pending-5.4/201-extra_optimization.patch
@@ -0,0 +1,32 @@
+From: Felix Fietkau <nbd@nbd.name>
+Subject: Upgrade to Linux 2.6.19
+
+- Includes large parts of the patch from #1021 by dpalffy
+- Includes RB532 NAND driver changes by n0-1
+
+[john@phrozen.org: feix will add this to his upstream queue]
+
+lede-commit: bff468813f78f81e36ebb2a3f4354de7365e640f
+Signed-off-by: Felix Fietkau <nbd@nbd.name>
+---
+ Makefile | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+--- a/Makefile
++++ b/Makefile
+@@ -657,12 +657,12 @@ KBUILD_CFLAGS += $(call cc-disable-warni
+ KBUILD_CFLAGS += $(call cc-disable-warning, address-of-packed-member)
+
+ ifdef CONFIG_CC_OPTIMIZE_FOR_SIZE
+-KBUILD_CFLAGS += -Os $(call cc-disable-warning,maybe-uninitialized,)
++KBUILD_CFLAGS += -Os $(call cc-disable-warning,maybe-uninitialized,) $(EXTRA_OPTIMIZATION)
+ else
+ ifdef CONFIG_PROFILE_ALL_BRANCHES
+-KBUILD_CFLAGS += -O2 $(call cc-disable-warning,maybe-uninitialized,)
++KBUILD_CFLAGS += -O2 $(call cc-disable-warning,maybe-uninitialized,) $(EXTRA_OPTIMIZATION)
+ else
+-KBUILD_CFLAGS += -O2
++KBUILD_CFLAGS += -O2 -fno-reorder-blocks -fno-tree-ch $(EXTRA_OPTIMIZATION)
+ endif
+ endif
+
diff --git a/target/linux/generic/pending-5.4/203-kallsyms_uncompressed.patch b/target/linux/generic/pending-5.4/203-kallsyms_uncompressed.patch
new file mode 100644
index 0000000000..9230279fca
--- /dev/null
+++ b/target/linux/generic/pending-5.4/203-kallsyms_uncompressed.patch
@@ -0,0 +1,119 @@
+From: Felix Fietkau <nbd@nbd.name>
+Subject: kernel: add a config option for keeping the kallsyms table uncompressed, saving ~9kb kernel size after lzma on ar71xx
+
+[john@phrozen.org: added to my upstream queue 30.12.2016]
+lede-commit: e0e3509b5ce2ccf93d4d67ea907613f5f7ec2eed
+Signed-off-by: Felix Fietkau <nbd@nbd.name>
+---
+ init/Kconfig | 11 +++++++++++
+ kernel/kallsyms.c | 8 ++++++++
+ scripts/kallsyms.c | 12 ++++++++++++
+ scripts/link-vmlinux.sh | 4 ++++
+ 4 files changed, 35 insertions(+)
+
+--- a/init/Kconfig
++++ b/init/Kconfig
+@@ -1149,6 +1149,17 @@ config SYSCTL_ARCH_UNALIGN_ALLOW
+ the unaligned access emulation.
+ see arch/parisc/kernel/unaligned.c for reference
+
++config KALLSYMS_UNCOMPRESSED
++ bool "Keep kallsyms uncompressed"
++ depends on KALLSYMS
++ help
++ Normally kallsyms contains compressed symbols (using a token table),
++ reducing the uncompressed kernel image size. Keeping the symbol table
++ uncompressed significantly improves the size of this part in compressed
++ kernel images.
++
++ Say N unless you need compressed kernel images to be small.
++
+ config HAVE_PCSPKR_PLATFORM
+ bool
+
+--- a/kernel/kallsyms.c
++++ b/kernel/kallsyms.c
+@@ -74,6 +74,11 @@ static unsigned int kallsyms_expand_symb
+ * For every byte on the compressed symbol data, copy the table
+ * entry for that byte.
+ */
++#ifdef CONFIG_KALLSYMS_UNCOMPRESSED
++ memcpy(result, data + 1, len - 1);
++ result += len - 1;
++ len = 0;
++#endif
+ while (len) {
+ tptr = &kallsyms_token_table[kallsyms_token_index[*data]];
+ data++;
+@@ -106,6 +111,9 @@ tail:
+ */
+ static char kallsyms_get_symbol_type(unsigned int off)
+ {
++#ifdef CONFIG_KALLSYMS_UNCOMPRESSED
++ return kallsyms_names[off + 1];
++#endif
+ /*
+ * Get just the first code, look it up in the token table,
+ * and return the first char from this token.
+--- a/scripts/kallsyms.c
++++ b/scripts/kallsyms.c
+@@ -61,6 +61,7 @@ static struct addr_range percpu_range =
+ static struct sym_entry *table;
+ static unsigned int table_size, table_cnt;
+ static int all_symbols = 0;
++static int uncompressed = 0;
+ static int absolute_percpu = 0;
+ static int base_relative = 0;
+
+@@ -442,6 +443,9 @@ static void write_src(void)
+
+ free(markers);
+
++ if (uncompressed)
++ return;
++
+ output_label("kallsyms_token_table");
+ off = 0;
+ for (i = 0; i < 256; i++) {
+@@ -502,6 +506,9 @@ static void *find_token(unsigned char *s
+ {
+ int i;
+
++ if (uncompressed)
++ return NULL;
++
+ for (i = 0; i < len - 1; i++) {
+ if (str[i] == token[0] && str[i+1] == token[1])
+ return &str[i];
+@@ -574,6 +581,9 @@ static void optimize_result(void)
+ {
+ int i, best;
+
++ if (uncompressed)
++ return;
++
+ /* using the '\0' symbol last allows compress_symbols to use standard
+ * fast string functions */
+ for (i = 255; i >= 0; i--) {
+@@ -756,6 +766,8 @@ int main(int argc, char **argv)
+ absolute_percpu = 1;
+ else if (strcmp(argv[i], "--base-relative") == 0)
+ base_relative = 1;
++ else if (strcmp(argv[i], "--uncompressed") == 0)
++ uncompressed = 1;
+ else
+ usage();
+ }
+--- a/scripts/link-vmlinux.sh
++++ b/scripts/link-vmlinux.sh
+@@ -133,6 +133,10 @@ kallsyms()
+ kallsymopt="${kallsymopt} --base-relative"
+ fi
+
++ if [ -n "${CONFIG_KALLSYMS_UNCOMPRESSED}" ]; then
++ kallsymopt="${kallsymopt} --uncompressed"
++ fi
++
+ local aflags="${KBUILD_AFLAGS} ${KBUILD_AFLAGS_KERNEL} \
+ ${NOSTDINC_FLAGS} ${LINUXINCLUDE} ${KBUILD_CPPFLAGS}"
+
diff --git a/target/linux/generic/pending-5.4/205-backtrace_module_info.patch b/target/linux/generic/pending-5.4/205-backtrace_module_info.patch
new file mode 100644
index 0000000000..5671173d45
--- /dev/null
+++ b/target/linux/generic/pending-5.4/205-backtrace_module_info.patch
@@ -0,0 +1,45 @@
+From: Felix Fietkau <nbd@nbd.name>
+Subject: kernel: when KALLSYMS is disabled, print module address + size for matching backtrace entries
+
+[john@phrozen.org: felix will add this to his upstream queue]
+
+lede-commit 53827cdc824556cda910b23ce5030c363b8f1461
+Signed-off-by: Felix Fietkau <nbd@nbd.name>
+---
+ lib/vsprintf.c | 15 +++++++++++----
+ 1 file changed, 11 insertions(+), 4 deletions(-)
+
+--- a/lib/vsprintf.c
++++ b/lib/vsprintf.c
+@@ -681,8 +681,10 @@ char *symbol_string(char *buf, char *end
+ struct printf_spec spec, const char *fmt)
+ {
+ unsigned long value;
+-#ifdef CONFIG_KALLSYMS
+ char sym[KSYM_SYMBOL_LEN];
++#ifndef CONFIG_KALLSYMS
++ struct module *mod;
++ int len;
+ #endif
+
+ if (fmt[1] == 'R')
+@@ -696,11 +698,16 @@ char *symbol_string(char *buf, char *end
+ sprint_symbol(sym, value);
+ else
+ sprint_symbol_no_offset(sym, value);
+-
+- return string(buf, end, sym, spec);
+ #else
+- return special_hex_number(buf, end, value, sizeof(void *));
++ len = snprintf(sym, sizeof(sym), "0x%lx", value);
++
++ mod = __module_address(value);
++ if (mod)
++ snprintf(sym + len, sizeof(sym) - len, " [%s@%p+0x%x]",
++ mod->name, mod->core_layout.base,
++ mod->core_layout.size);
+ #endif
++ return string(buf, end, sym, spec);
+ }
+
+ static const struct printf_spec default_str_spec = {
diff --git a/target/linux/generic/pending-5.4/220-optimize_inlining.patch b/target/linux/generic/pending-5.4/220-optimize_inlining.patch
new file mode 100644
index 0000000000..ae032709d2
--- /dev/null
+++ b/target/linux/generic/pending-5.4/220-optimize_inlining.patch
@@ -0,0 +1,225 @@
+--- a/arch/arm/kernel/atags.h
++++ b/arch/arm/kernel/atags.h
+@@ -5,7 +5,7 @@ void convert_to_tag_list(struct tag *tag
+ const struct machine_desc *setup_machine_tags(phys_addr_t __atags_pointer,
+ unsigned int machine_nr);
+ #else
+-static inline const struct machine_desc *
++static inline const struct machine_desc * __init __noreturn
+ setup_machine_tags(phys_addr_t __atags_pointer, unsigned int machine_nr)
+ {
+ early_print("no ATAGS support: can't continue\n");
+--- a/arch/arm64/include/asm/cpufeature.h
++++ b/arch/arm64/include/asm/cpufeature.h
+@@ -366,7 +366,7 @@ static inline bool cpu_have_feature(unsi
+ }
+
+ /* System capability check for constant caps */
+-static inline bool __cpus_have_const_cap(int num)
++static __always_inline bool __cpus_have_const_cap(int num)
+ {
+ if (num >= ARM64_NCAPS)
+ return false;
+@@ -380,7 +380,7 @@ static inline bool cpus_have_cap(unsigne
+ return test_bit(num, cpu_hwcaps);
+ }
+
+-static inline bool cpus_have_const_cap(int num)
++static __always_inline bool cpus_have_const_cap(int num)
+ {
+ if (static_branch_likely(&arm64_const_caps_ready))
+ return __cpus_have_const_cap(num);
+--- a/arch/mips/include/asm/bitops.h
++++ b/arch/mips/include/asm/bitops.h
+@@ -463,7 +463,7 @@ static inline void __clear_bit_unlock(un
+ * Return the bit position (0..63) of the most significant 1 bit in a word
+ * Returns -1 if no 1 bit exists
+ */
+-static inline unsigned long __fls(unsigned long word)
++static __always_inline unsigned long __fls(unsigned long word)
+ {
+ int num;
+
+@@ -529,7 +529,7 @@ static inline unsigned long __fls(unsign
+ * Returns 0..SZLONG-1
+ * Undefined if no bit exists, so code should check against 0 first.
+ */
+-static inline unsigned long __ffs(unsigned long word)
++static __always_inline unsigned long __ffs(unsigned long word)
+ {
+ return __fls(word & -word);
+ }
+--- a/arch/mips/kernel/cpu-bugs64.c
++++ b/arch/mips/kernel/cpu-bugs64.c
+@@ -42,8 +42,8 @@ static inline void align_mod(const int a
+ : GCC_IMM_ASM() (align), GCC_IMM_ASM() (mod));
+ }
+
+-static inline void mult_sh_align_mod(long *v1, long *v2, long *w,
+- const int align, const int mod)
++static __always_inline void mult_sh_align_mod(long *v1, long *v2, long *w,
++ const int align, const int mod)
+ {
+ unsigned long flags;
+ int m1, m2;
+--- a/arch/powerpc/kernel/prom_init.c
++++ b/arch/powerpc/kernel/prom_init.c
+@@ -498,14 +498,14 @@ static int __init prom_next_node(phandle
+ }
+ }
+
+-static inline int prom_getprop(phandle node, const char *pname,
+- void *value, size_t valuelen)
++static inline int __init prom_getprop(phandle node, const char *pname,
++ void *value, size_t valuelen)
+ {
+ return call_prom("getprop", 4, 1, node, ADDR(pname),
+ (u32)(unsigned long) value, (u32) valuelen);
+ }
+
+-static inline int prom_getproplen(phandle node, const char *pname)
++static inline int __init prom_getproplen(phandle node, const char *pname)
+ {
+ return call_prom("getproplen", 2, 1, node, ADDR(pname));
+ }
+--- a/arch/powerpc/mm/tlb-radix.c
++++ b/arch/powerpc/mm/tlb-radix.c
+@@ -90,8 +90,8 @@ void radix__tlbiel_all(unsigned int acti
+ asm volatile(PPC_INVALIDATE_ERAT "; isync" : : :"memory");
+ }
+
+-static inline void __tlbiel_pid(unsigned long pid, int set,
+- unsigned long ric)
++static __always_inline void __tlbiel_pid(unsigned long pid, int set,
++ unsigned long ric)
+ {
+ unsigned long rb,rs,prs,r;
+
+@@ -106,7 +106,7 @@ static inline void __tlbiel_pid(unsigned
+ trace_tlbie(0, 1, rb, rs, ric, prs, r);
+ }
+
+-static inline void __tlbie_pid(unsigned long pid, unsigned long ric)
++static __always_inline void __tlbie_pid(unsigned long pid, unsigned long ric)
+ {
+ unsigned long rb,rs,prs,r;
+
+@@ -136,7 +136,7 @@ static inline void __tlbiel_lpid(unsigne
+ trace_tlbie(lpid, 1, rb, rs, ric, prs, r);
+ }
+
+-static inline void __tlbie_lpid(unsigned long lpid, unsigned long ric)
++static __always_inline void __tlbie_lpid(unsigned long lpid, unsigned long ric)
+ {
+ unsigned long rb,rs,prs,r;
+
+@@ -300,7 +300,7 @@ static inline void fixup_tlbie_lpid(unsi
+ /*
+ * We use 128 set in radix mode and 256 set in hpt mode.
+ */
+-static inline void _tlbiel_pid(unsigned long pid, unsigned long ric)
++static __always_inline void _tlbiel_pid(unsigned long pid, unsigned long ric)
+ {
+ int set;
+
+@@ -983,7 +983,7 @@ void radix__tlb_flush(struct mmu_gather
+ tlb->need_flush_all = 0;
+ }
+
+-static inline void __radix__flush_tlb_range_psize(struct mm_struct *mm,
++static __always_inline void __radix__flush_tlb_range_psize(struct mm_struct *mm,
+ unsigned long start, unsigned long end,
+ int psize, bool also_pwc)
+ {
+--- a/arch/s390/include/asm/cpacf.h
++++ b/arch/s390/include/asm/cpacf.h
+@@ -202,7 +202,7 @@ static inline int __cpacf_check_opcode(u
+ }
+ }
+
+-static inline int cpacf_query(unsigned int opcode, cpacf_mask_t *mask)
++static __always_inline int cpacf_query(unsigned int opcode, cpacf_mask_t *mask)
+ {
+ if (__cpacf_check_opcode(opcode)) {
+ __cpacf_query(opcode, mask);
+--- a/arch/x86/Kconfig.debug
++++ b/arch/x86/Kconfig.debug
+@@ -276,20 +276,6 @@ config CPA_DEBUG
+ ---help---
+ Do change_page_attr() self-tests every 30 seconds.
+
+-config OPTIMIZE_INLINING
+- bool "Allow gcc to uninline functions marked 'inline'"
+- ---help---
+- This option determines if the kernel forces gcc to inline the functions
+- developers have marked 'inline'. Doing so takes away freedom from gcc to
+- do what it thinks is best, which is desirable for the gcc 3.x series of
+- compilers. The gcc 4.x series have a rewritten inlining algorithm and
+- enabling this option will generate a smaller kernel there. Hopefully
+- this algorithm is so good that allowing gcc 4.x and above to make the
+- decision will become the default in the future. Until then this option
+- is there to test gcc for this.
+-
+- If unsure, say N.
+-
+ config DEBUG_ENTRY
+ bool "Debug low-level entry code"
+ depends on DEBUG_KERNEL
+--- a/drivers/mtd/nand/raw/vf610_nfc.c
++++ b/drivers/mtd/nand/raw/vf610_nfc.c
+@@ -373,7 +373,7 @@ static int vf610_nfc_cmd(struct nand_chi
+ {
+ const struct nand_op_instr *instr;
+ struct vf610_nfc *nfc = chip_to_nfc(chip);
+- int op_id = -1, trfr_sz = 0, offset;
++ int op_id = -1, trfr_sz = 0, offset = 0;
+ u32 col = 0, row = 0, cmd1 = 0, cmd2 = 0, code = 0;
+ bool force8bit = false;
+
+--- a/lib/Kconfig.debug
++++ b/lib/Kconfig.debug
+@@ -309,6 +309,20 @@ config HEADERS_CHECK
+ exported to $(INSTALL_HDR_PATH) (usually 'usr/include' in
+ your build tree), to make sure they're suitable.
+
++config OPTIMIZE_INLINING
++ bool "Allow compiler to uninline functions marked 'inline'"
++ help
++ This option determines if the kernel forces gcc to inline the functions
++ developers have marked 'inline'. Doing so takes away freedom from gcc to
++ do what it thinks is best, which is desirable for the gcc 3.x series of
++ compilers. The gcc 4.x series have a rewritten inlining algorithm and
++ enabling this option will generate a smaller kernel there. Hopefully
++ this algorithm is so good that allowing gcc 4.x and above to make the
++ decision will become the default in the future. Until then this option
++ is there to test gcc for this.
++
++ If unsure, say N.
++
+ config DEBUG_SECTION_MISMATCH
+ bool "Enable full Section mismatch analysis"
+ help
+--- a/arch/x86/Kconfig
++++ b/arch/x86/Kconfig
+@@ -306,9 +306,6 @@ config ZONE_DMA32
+ config AUDIT_ARCH
+ def_bool y if X86_64
+
+-config ARCH_SUPPORTS_OPTIMIZED_INLINING
+- def_bool y
+-
+ config ARCH_SUPPORTS_DEBUG_PAGEALLOC
+ def_bool y
+
+--- a/include/linux/compiler_types.h
++++ b/include/linux/compiler_types.h
+@@ -268,8 +268,7 @@ struct ftrace_likely_data {
+ * of extern inline functions at link time.
+ * A lot of inline functions can cause havoc with function tracing.
+ */
+-#if !defined(CONFIG_ARCH_SUPPORTS_OPTIMIZED_INLINING) || \
+- !defined(CONFIG_OPTIMIZE_INLINING)
++#if !defined(CONFIG_OPTIMIZE_INLINING)
+ #define inline \
+ inline __attribute__((always_inline, unused)) notrace __gnu_inline
+ #else
diff --git a/target/linux/generic/pending-5.4/240-remove-unsane-filenames-from-deps_initramfs-list.patch b/target/linux/generic/pending-5.4/240-remove-unsane-filenames-from-deps_initramfs-list.patch
new file mode 100644
index 0000000000..c0dfe49c9b
--- /dev/null
+++ b/target/linux/generic/pending-5.4/240-remove-unsane-filenames-from-deps_initramfs-list.patch
@@ -0,0 +1,46 @@
+From: Gabor Juhos <juhosg@openwrt.org>
+Subject: usr: sanitize deps_initramfs list
+
+If any filename in the intramfs dependency
+list contains a colon, that causes a kernel
+build error like this:
+
+/devel/openwrt/build_dir/linux-ar71xx_generic/linux-3.6.6/usr/Makefile:58: *** multiple target patterns. Stop.
+make[5]: *** [usr] Error 2
+
+Fix it by removing such filenames from the
+deps_initramfs list.
+
+Signed-off-by: Gabor Juhos <juhosg@openwrt.org>
+---
+ usr/Makefile | 8 +++++---
+ 1 file changed, 5 insertions(+), 3 deletions(-)
+
+--- a/usr/Makefile
++++ b/usr/Makefile
+@@ -42,20 +42,22 @@ ifneq ($(wildcard $(obj)/$(datafile_d_y)
+ include $(obj)/$(datafile_d_y)
+ endif
+
++deps_initramfs_sane := $(foreach v,$(deps_initramfs),$(if $(findstring :,$(v)),,$(v)))
++
+ quiet_cmd_initfs = GEN $@
+ cmd_initfs = $(initramfs) -o $@ $(ramfs-args) $(ramfs-input)
+
+ targets := $(datafile_y)
+
+ # do not try to update files included in initramfs
+-$(deps_initramfs): ;
++$(deps_initramfs_sane): ;
+
+-$(deps_initramfs): klibcdirs
++$(deps_initramfs_sane): klibcdirs
+ # We rebuild initramfs_data.cpio if:
+ # 1) Any included file is newer then initramfs_data.cpio
+ # 2) There are changes in which files are included (added or deleted)
+ # 3) If gen_init_cpio are newer than initramfs_data.cpio
+ # 4) arguments to gen_initramfs.sh changes
+-$(obj)/$(datafile_y): $(obj)/gen_init_cpio $(deps_initramfs) klibcdirs
++$(obj)/$(datafile_y): $(obj)/gen_init_cpio $(deps_initramfs_sane) klibcdirs
+ $(Q)$(initramfs) -l $(ramfs-input) > $(obj)/$(datafile_d_y)
+ $(call if_changed,initfs)
diff --git a/target/linux/generic/pending-5.4/261-enable_wilink_platform_without_drivers.patch b/target/linux/generic/pending-5.4/261-enable_wilink_platform_without_drivers.patch
new file mode 100644
index 0000000000..9955ab3c0b
--- /dev/null
+++ b/target/linux/generic/pending-5.4/261-enable_wilink_platform_without_drivers.patch
@@ -0,0 +1,20 @@
+From: Imre Kaloz <kaloz@openwrt.org>
+Subject: [PATCH] hack: net: wireless: make the wl12xx glue code available with
+ compat-wireless, too
+
+Signed-off-by: Imre Kaloz <kaloz@openwrt.org>
+---
+ drivers/net/wireless/ti/Kconfig | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/net/wireless/ti/Kconfig
++++ b/drivers/net/wireless/ti/Kconfig
+@@ -19,7 +19,7 @@ source "drivers/net/wireless/ti/wlcore/K
+
+ config WILINK_PLATFORM_DATA
+ bool "TI WiLink platform data"
+- depends on WLCORE_SDIO || WL1251_SDIO
++ depends on WLCORE_SDIO || WL1251_SDIO || ARCH_OMAP2PLUS
+ default y
+ ---help---
+ Small platform data bit needed to pass data to the sdio modules.
diff --git a/target/linux/generic/pending-5.4/300-mips_expose_boot_raw.patch b/target/linux/generic/pending-5.4/300-mips_expose_boot_raw.patch
new file mode 100644
index 0000000000..e19cec1400
--- /dev/null
+++ b/target/linux/generic/pending-5.4/300-mips_expose_boot_raw.patch
@@ -0,0 +1,40 @@
+From: Mark Miller <mark@mirell.org>
+Subject: mips: expose CONFIG_BOOT_RAW
+
+This exposes the CONFIG_BOOT_RAW symbol in Kconfig. This is needed on
+certain Broadcom chipsets running CFE in order to load the kernel.
+
+Signed-off-by: Mark Miller <mark@mirell.org>
+Acked-by: Rob Landley <rob@landley.net>
+---
+--- a/arch/mips/Kconfig
++++ b/arch/mips/Kconfig
+@@ -1063,9 +1063,6 @@ config FW_ARC
+ config ARCH_MAY_HAVE_PC_FDC
+ bool
+
+-config BOOT_RAW
+- bool
+-
+ config CEVT_BCM1480
+ bool
+
+@@ -2969,6 +2966,18 @@ choice
+ bool "Extend builtin kernel arguments with bootloader arguments"
+ endchoice
+
++config BOOT_RAW
++ bool "Enable the kernel to be executed from the load address"
++ default n
++ help
++ Allow the kernel to be executed from the load address for
++ bootloaders which cannot read the ELF format. This places
++ a jump to start_kernel at the load address.
++
++ If unsure, say N.
++
++
++
+ endmenu
+
+ config LOCKDEP_SUPPORT
diff --git a/target/linux/generic/pending-5.4/302-mips_no_branch_likely.patch b/target/linux/generic/pending-5.4/302-mips_no_branch_likely.patch
new file mode 100644
index 0000000000..6192c417d3
--- /dev/null
+++ b/target/linux/generic/pending-5.4/302-mips_no_branch_likely.patch
@@ -0,0 +1,22 @@
+From: Felix Fietkau <nbd@nbd.name>
+Subject: mips: use -mno-branch-likely for kernel and userspace
+
+saves ~11k kernel size after lzma and ~12k squashfs size in the
+
+lede-commit: 41a039f46450ffae9483d6216422098669da2900
+Signed-off-by: Felix Fietkau <nbd@nbd.name>
+---
+ arch/mips/Makefile | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/mips/Makefile
++++ b/arch/mips/Makefile
+@@ -91,7 +91,7 @@ all-$(CONFIG_SYS_SUPPORTS_ZBOOT)+= vmlin
+ # machines may also. Since BFD is incredibly buggy with respect to
+ # crossformat linking we rely on the elf2ecoff tool for format conversion.
+ #
+-cflags-y += -G 0 -mno-abicalls -fno-pic -pipe
++cflags-y += -G 0 -mno-abicalls -fno-pic -pipe -mno-branch-likely
+ cflags-y += -msoft-float
+ LDFLAGS_vmlinux += -G 0 -static -n -nostdlib
+ KBUILD_AFLAGS_MODULE += -mlong-calls
diff --git a/target/linux/generic/pending-5.4/304-mips_disable_fpu.patch b/target/linux/generic/pending-5.4/304-mips_disable_fpu.patch
new file mode 100644
index 0000000000..332bb71e22
--- /dev/null
+++ b/target/linux/generic/pending-5.4/304-mips_disable_fpu.patch
@@ -0,0 +1,137 @@
+From: Manuel Lauss <manuel.lauss@gmail.com>
+Subject: [RFC PATCH v4 2/2] MIPS: make FPU emulator optional
+
+This small patch makes the MIPS FPU emulator optional. The kernel
+kills float-users on systems without a hardware FPU by sending a SIGILL.
+
+Disabling the emulator shrinks vmlinux by about 54kBytes (32bit,
+optimizing for size).
+
+Signed-off-by: Manuel Lauss <manuel.lauss@gmail.com>
+---
+v4: rediffed because of patch 1/2, should now work with micromips as well
+v3: updated patch description with size savings.
+v2: incorporated changes suggested by Jonas Gorski
+ force the fpu emulator on for micromips: relocating the parts
+ of the mmips code in the emulator to other areas would be a
+ much larger change; I went the cheap route instead with this.
+
+ arch/mips/Kbuild | 2 +-
+ arch/mips/Kconfig | 14 ++++++++++++++
+ arch/mips/include/asm/fpu.h | 5 +++--
+ arch/mips/include/asm/fpu_emulator.h | 15 +++++++++++++++
+ 4 files changed, 33 insertions(+), 3 deletions(-)
+
+--- a/arch/mips/Kconfig
++++ b/arch/mips/Kconfig
+@@ -2893,6 +2893,20 @@ config MIPS_O32_FP64_SUPPORT
+
+ If unsure, say N.
+
++config MIPS_FPU_EMULATOR
++ bool "MIPS FPU Emulator"
++ default y
++ help
++ This option lets you disable the built-in MIPS FPU (Coprocessor 1)
++ emulator, which handles floating-point instructions on processors
++ without a hardware FPU. It is generally a good idea to keep the
++ emulator built-in, unless you are perfectly sure you have a
++ complete soft-float environment. With the emulator disabled, all
++ users of float operations will be killed with an illegal instr-
++ uction exception.
++
++ Say Y, please.
++
+ config USE_OF
+ bool
+ select OF
+--- a/arch/mips/Makefile
++++ b/arch/mips/Makefile
+@@ -323,7 +323,7 @@ OBJCOPYFLAGS += --remove-section=.regin
+ head-y := arch/mips/kernel/head.o
+
+ libs-y += arch/mips/lib/
+-libs-y += arch/mips/math-emu/
++libs-$(CONFIG_MIPS_FPU_EMULATOR) += arch/mips/math-emu/
+
+ # See arch/mips/Kbuild for content of core part of the kernel
+ core-y += arch/mips/
+--- a/arch/mips/include/asm/fpu.h
++++ b/arch/mips/include/asm/fpu.h
+@@ -230,8 +230,10 @@ static inline int init_fpu(void)
+ /* Restore FRE */
+ write_c0_config5(config5);
+ enable_fpu_hazard();
+- } else
++ } else if (IS_ENABLED(CONFIG_MIPS_FPU_EMULATOR))
+ fpu_emulator_init_fpu();
++ else
++ ret = SIGILL;
+
+ return ret;
+ }
+--- a/arch/mips/include/asm/fpu_emulator.h
++++ b/arch/mips/include/asm/fpu_emulator.h
+@@ -30,6 +30,7 @@
+ #include <asm/local.h>
+ #include <asm/processor.h>
+
++#ifdef CONFIG_MIPS_FPU_EMULATOR
+ #ifdef CONFIG_DEBUG_FS
+
+ struct mips_fpu_emulator_stats {
+@@ -179,6 +180,16 @@ do { \
+ extern int fpu_emulator_cop1Handler(struct pt_regs *xcp,
+ struct mips_fpu_struct *ctx, int has_fpu,
+ void __user **fault_addr);
++#else /* no CONFIG_MIPS_FPU_EMULATOR */
++static inline int fpu_emulator_cop1Handler(struct pt_regs *xcp,
++ struct mips_fpu_struct *ctx, int has_fpu,
++ void __user **fault_addr)
++{
++ *fault_addr = NULL;
++ return SIGILL; /* we don't speak MIPS FPU */
++}
++#endif /* CONFIG_MIPS_FPU_EMULATOR */
++
+ void force_fcr31_sig(unsigned long fcr31, void __user *fault_addr,
+ struct task_struct *tsk);
+ int process_fpemu_return(int sig, void __user *fault_addr,
+--- a/arch/mips/include/asm/dsemul.h
++++ b/arch/mips/include/asm/dsemul.h
+@@ -41,6 +41,7 @@ struct task_struct;
+ extern int mips_dsemul(struct pt_regs *regs, mips_instruction ir,
+ unsigned long branch_pc, unsigned long cont_pc);
+
++#ifdef CONFIG_MIPS_FPU_EMULATOR
+ /**
+ * do_dsemulret() - Return from a delay slot 'emulation' frame
+ * @xcp: User thread register context.
+@@ -88,5 +89,27 @@ extern bool dsemul_thread_rollback(struc
+ * before @mm is freed in order to avoid memory leaks.
+ */
+ extern void dsemul_mm_cleanup(struct mm_struct *mm);
++#else
++static inline bool do_dsemulret(struct pt_regs *xcp)
++{
++ return false;
++}
++
++static inline bool dsemul_thread_cleanup(struct task_struct *tsk)
++{
++ return false;
++}
++
++static inline bool dsemul_thread_rollback(struct pt_regs *regs)
++{
++ return false;
++}
++
++static inline void dsemul_mm_cleanup(struct mm_struct *mm)
++{
++
++}
++
++#endif
+
+ #endif /* __MIPS_ASM_DSEMUL_H__ */
diff --git a/target/linux/generic/pending-5.4/305-mips_module_reloc.patch b/target/linux/generic/pending-5.4/305-mips_module_reloc.patch
new file mode 100644
index 0000000000..a8e4e78c91
--- /dev/null
+++ b/target/linux/generic/pending-5.4/305-mips_module_reloc.patch
@@ -0,0 +1,371 @@
+From: Felix Fietkau <nbd@nbd.name>
+Subject: mips: replace -mlong-calls with -mno-long-calls to make function calls faster in kernel modules to achieve this, try to
+
+lede-commit: 3b3d64743ba2a874df9d70cd19e242205b0a788c
+Signed-off-by: Felix Fietkau <nbd@nbd.name>
+---
+ arch/mips/Makefile | 5 +
+ arch/mips/include/asm/module.h | 5 +
+ arch/mips/kernel/module.c | 279 ++++++++++++++++++++++++++++++++++++++++-
+ 3 files changed, 284 insertions(+), 5 deletions(-)
+
+--- a/arch/mips/Makefile
++++ b/arch/mips/Makefile
+@@ -94,8 +94,18 @@ all-$(CONFIG_SYS_SUPPORTS_ZBOOT)+= vmlin
+ cflags-y += -G 0 -mno-abicalls -fno-pic -pipe -mno-branch-likely
+ cflags-y += -msoft-float
+ LDFLAGS_vmlinux += -G 0 -static -n -nostdlib
++ifdef CONFIG_64BIT
+ KBUILD_AFLAGS_MODULE += -mlong-calls
+ KBUILD_CFLAGS_MODULE += -mlong-calls
++else
++ ifdef CONFIG_DYNAMIC_FTRACE
++ KBUILD_AFLAGS_MODULE += -mlong-calls
++ KBUILD_CFLAGS_MODULE += -mlong-calls
++ else
++ KBUILD_AFLAGS_MODULE += -mno-long-calls
++ KBUILD_CFLAGS_MODULE += -mno-long-calls
++ endif
++endif
+
+ ifeq ($(CONFIG_RELOCATABLE),y)
+ LDFLAGS_vmlinux += --emit-relocs
+--- a/arch/mips/include/asm/module.h
++++ b/arch/mips/include/asm/module.h
+@@ -12,6 +12,11 @@ struct mod_arch_specific {
+ const struct exception_table_entry *dbe_start;
+ const struct exception_table_entry *dbe_end;
+ struct mips_hi16 *r_mips_hi16_list;
++
++ void *phys_plt_tbl;
++ void *virt_plt_tbl;
++ unsigned int phys_plt_offset;
++ unsigned int virt_plt_offset;
+ };
+
+ typedef uint8_t Elf64_Byte; /* Type for a 8-bit quantity. */
+--- a/arch/mips/kernel/module.c
++++ b/arch/mips/kernel/module.c
+@@ -44,14 +44,221 @@ struct mips_hi16 {
+ static LIST_HEAD(dbe_list);
+ static DEFINE_SPINLOCK(dbe_lock);
+
+-#ifdef MODULE_START
++/*
++ * Get the potential max trampolines size required of the init and
++ * non-init sections. Only used if we cannot find enough contiguous
++ * physically mapped memory to put the module into.
++ */
++static unsigned int
++get_plt_size(const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs,
++ const char *secstrings, unsigned int symindex, bool is_init)
++{
++ unsigned long ret = 0;
++ unsigned int i, j;
++ Elf_Sym *syms;
++
++ /* Everything marked ALLOC (this includes the exported symbols) */
++ for (i = 1; i < hdr->e_shnum; ++i) {
++ unsigned int info = sechdrs[i].sh_info;
++
++ if (sechdrs[i].sh_type != SHT_REL
++ && sechdrs[i].sh_type != SHT_RELA)
++ continue;
++
++ /* Not a valid relocation section? */
++ if (info >= hdr->e_shnum)
++ continue;
++
++ /* Don't bother with non-allocated sections */
++ if (!(sechdrs[info].sh_flags & SHF_ALLOC))
++ continue;
++
++ /* If it's called *.init*, and we're not init, we're
++ not interested */
++ if ((strstr(secstrings + sechdrs[i].sh_name, ".init") != 0)
++ != is_init)
++ continue;
++
++ syms = (Elf_Sym *) sechdrs[symindex].sh_addr;
++ if (sechdrs[i].sh_type == SHT_REL) {
++ Elf_Mips_Rel *rel = (void *) sechdrs[i].sh_addr;
++ unsigned int size = sechdrs[i].sh_size / sizeof(*rel);
++
++ for (j = 0; j < size; ++j) {
++ Elf_Sym *sym;
++
++ if (ELF_MIPS_R_TYPE(rel[j]) != R_MIPS_26)
++ continue;
++
++ sym = syms + ELF_MIPS_R_SYM(rel[j]);
++ if (!is_init && sym->st_shndx != SHN_UNDEF)
++ continue;
++
++ ret += 4 * sizeof(int);
++ }
++ } else {
++ Elf_Mips_Rela *rela = (void *) sechdrs[i].sh_addr;
++ unsigned int size = sechdrs[i].sh_size / sizeof(*rela);
++
++ for (j = 0; j < size; ++j) {
++ Elf_Sym *sym;
++
++ if (ELF_MIPS_R_TYPE(rela[j]) != R_MIPS_26)
++ continue;
++
++ sym = syms + ELF_MIPS_R_SYM(rela[j]);
++ if (!is_init && sym->st_shndx != SHN_UNDEF)
++ continue;
++
++ ret += 4 * sizeof(int);
++ }
++ }
++ }
++
++ return ret;
++}
++
++#ifndef MODULE_START
++static void *alloc_phys(unsigned long size)
++{
++ unsigned order;
++ struct page *page;
++ struct page *p;
++
++ size = PAGE_ALIGN(size);
++ order = get_order(size);
++
++ page = alloc_pages(GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN |
++ __GFP_THISNODE, order);
++ if (!page)
++ return NULL;
++
++ split_page(page, order);
++
++ /* mark all pages except for the last one */
++ for (p = page; p + 1 < page + (size >> PAGE_SHIFT); ++p)
++ set_bit(PG_owner_priv_1, &p->flags);
++
++ for (p = page + (size >> PAGE_SHIFT); p < page + (1 << order); ++p)
++ __free_page(p);
++
++ return page_address(page);
++}
++#endif
++
++static void free_phys(void *ptr)
++{
++ struct page *page;
++ bool free;
++
++ page = virt_to_page(ptr);
++ do {
++ free = test_and_clear_bit(PG_owner_priv_1, &page->flags);
++ __free_page(page);
++ page++;
++ } while (free);
++}
++
++
+ void *module_alloc(unsigned long size)
+ {
++#ifdef MODULE_START
+ return __vmalloc_node_range(size, 1, MODULE_START, MODULE_END,
+ GFP_KERNEL, PAGE_KERNEL, 0, NUMA_NO_NODE,
+ __builtin_return_address(0));
++#else
++ void *ptr;
++
++ if (size == 0)
++ return NULL;
++
++ ptr = alloc_phys(size);
++
++ /* If we failed to allocate physically contiguous memory,
++ * fall back to regular vmalloc. The module loader code will
++ * create jump tables to handle long jumps */
++ if (!ptr)
++ return vmalloc(size);
++
++ return ptr;
++#endif
+ }
++
++static inline bool is_phys_addr(void *ptr)
++{
++#ifdef CONFIG_64BIT
++ return (KSEGX((unsigned long)ptr) == CKSEG0);
++#else
++ return (KSEGX(ptr) == KSEG0);
+ #endif
++}
++
++/* Free memory returned from module_alloc */
++void module_memfree(void *module_region)
++{
++ if (is_phys_addr(module_region))
++ free_phys(module_region);
++ else
++ vfree(module_region);
++}
++
++static void *__module_alloc(int size, bool phys)
++{
++ void *ptr;
++
++ if (phys)
++ ptr = kmalloc(size, GFP_KERNEL);
++ else
++ ptr = vmalloc(size);
++ return ptr;
++}
++
++static void __module_free(void *ptr)
++{
++ if (is_phys_addr(ptr))
++ kfree(ptr);
++ else
++ vfree(ptr);
++}
++
++int module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
++ char *secstrings, struct module *mod)
++{
++ unsigned int symindex = 0;
++ unsigned int core_size, init_size;
++ int i;
++
++ mod->arch.phys_plt_offset = 0;
++ mod->arch.virt_plt_offset = 0;
++ mod->arch.phys_plt_tbl = NULL;
++ mod->arch.virt_plt_tbl = NULL;
++
++ if (IS_ENABLED(CONFIG_64BIT))
++ return 0;
++
++ for (i = 1; i < hdr->e_shnum; i++)
++ if (sechdrs[i].sh_type == SHT_SYMTAB)
++ symindex = i;
++
++ core_size = get_plt_size(hdr, sechdrs, secstrings, symindex, false);
++ init_size = get_plt_size(hdr, sechdrs, secstrings, symindex, true);
++
++ if ((core_size + init_size) == 0)
++ return 0;
++
++ mod->arch.phys_plt_tbl = __module_alloc(core_size + init_size, 1);
++ if (!mod->arch.phys_plt_tbl)
++ return -ENOMEM;
++
++ mod->arch.virt_plt_tbl = __module_alloc(core_size + init_size, 0);
++ if (!mod->arch.virt_plt_tbl) {
++ __module_free(mod->arch.phys_plt_tbl);
++ mod->arch.phys_plt_tbl = NULL;
++ return -ENOMEM;
++ }
++
++ return 0;
++}
+
+ static int apply_r_mips_none(struct module *me, u32 *location,
+ u32 base, Elf_Addr v, bool rela)
+@@ -67,9 +274,40 @@ static int apply_r_mips_32(struct module
+ return 0;
+ }
+
++static Elf_Addr add_plt_entry_to(unsigned *plt_offset,
++ void *start, Elf_Addr v)
++{
++ unsigned *tramp = start + *plt_offset;
++ *plt_offset += 4 * sizeof(int);
++
++ /* adjust carry for addiu */
++ if (v & 0x00008000)
++ v += 0x10000;
++
++ tramp[0] = 0x3c190000 | (v >> 16); /* lui t9, hi16 */
++ tramp[1] = 0x27390000 | (v & 0xffff); /* addiu t9, t9, lo16 */
++ tramp[2] = 0x03200008; /* jr t9 */
++ tramp[3] = 0x00000000; /* nop */
++
++ return (Elf_Addr) tramp;
++}
++
++static Elf_Addr add_plt_entry(struct module *me, void *location, Elf_Addr v)
++{
++ if (is_phys_addr(location))
++ return add_plt_entry_to(&me->arch.phys_plt_offset,
++ me->arch.phys_plt_tbl, v);
++ else
++ return add_plt_entry_to(&me->arch.virt_plt_offset,
++ me->arch.virt_plt_tbl, v);
++
++}
++
+ static int apply_r_mips_26(struct module *me, u32 *location,
+ u32 base, Elf_Addr v, bool rela)
+ {
++ u32 ofs = base & 0x03ffffff;
++
+ if (v % 4) {
+ pr_err("module %s: dangerous R_MIPS_26 relocation\n",
+ me->name);
+@@ -77,13 +315,17 @@ static int apply_r_mips_26(struct module
+ }
+
+ if ((v & 0xf0000000) != (((unsigned long)location + 4) & 0xf0000000)) {
+- pr_err("module %s: relocation overflow\n",
+- me->name);
+- return -ENOEXEC;
++ v = add_plt_entry(me, location, v + (ofs << 2));
++ if (!v) {
++ pr_err("module %s: relocation overflow\n",
++ me->name);
++ return -ENOEXEC;
++ }
++ ofs = 0;
+ }
+
+ *location = (*location & ~0x03ffffff) |
+- ((base + (v >> 2)) & 0x03ffffff);
++ ((ofs + (v >> 2)) & 0x03ffffff);
+
+ return 0;
+ }
+@@ -459,9 +701,36 @@ int module_finalize(const Elf_Ehdr *hdr,
+ list_add(&me->arch.dbe_list, &dbe_list);
+ spin_unlock_irq(&dbe_lock);
+ }
++
++ /* Get rid of the fixup trampoline if we're running the module
++ * from physically mapped address space */
++ if (me->arch.phys_plt_offset == 0) {
++ __module_free(me->arch.phys_plt_tbl);
++ me->arch.phys_plt_tbl = NULL;
++ }
++ if (me->arch.virt_plt_offset == 0) {
++ __module_free(me->arch.virt_plt_tbl);
++ me->arch.virt_plt_tbl = NULL;
++ }
++
+ return 0;
+ }
+
++void module_arch_freeing_init(struct module *mod)
++{
++ if (mod->state == MODULE_STATE_LIVE)
++ return;
++
++ if (mod->arch.phys_plt_tbl) {
++ __module_free(mod->arch.phys_plt_tbl);
++ mod->arch.phys_plt_tbl = NULL;
++ }
++ if (mod->arch.virt_plt_tbl) {
++ __module_free(mod->arch.virt_plt_tbl);
++ mod->arch.virt_plt_tbl = NULL;
++ }
++}
++
+ void module_arch_cleanup(struct module *mod)
+ {
+ spin_lock_irq(&dbe_lock);
diff --git a/target/linux/generic/pending-5.4/306-mips_mem_functions_performance.patch b/target/linux/generic/pending-5.4/306-mips_mem_functions_performance.patch
new file mode 100644
index 0000000000..e73cfd61b3
--- /dev/null
+++ b/target/linux/generic/pending-5.4/306-mips_mem_functions_performance.patch
@@ -0,0 +1,106 @@
+From: Felix Fietkau <nbd@nbd.name>
+Subject: [PATCH] mips: allow the compiler to optimize memset, memcmp, memcpy for better performance and (in some instances) smaller code
+
+lede-commit: 07e59c7bc7f375f792ec9734be42fe4fa391a8bb
+Signed-off-by: Felix Fietkau <nbd@nbd.name>
+---
+ arch/mips/boot/compressed/Makefile | 3 ++-
+ arch/mips/include/asm/string.h | 38 ++++++++++++++++++++++++++++++++++++++
+ arch/mips/lib/Makefile | 2 +-
+ arch/mips/lib/memcmp.c | 22 ++++++++++++++++++++++
+ 4 files changed, 63 insertions(+), 2 deletions(-)
+ create mode 100644 arch/mips/lib/memcmp.c
+
+--- a/arch/mips/boot/compressed/Makefile
++++ b/arch/mips/boot/compressed/Makefile
+@@ -23,7 +23,8 @@ KBUILD_CFLAGS := $(filter-out -pg, $(KBU
+ KBUILD_CFLAGS := $(filter-out -fstack-protector, $(KBUILD_CFLAGS))
+
+ KBUILD_CFLAGS := $(KBUILD_CFLAGS) -D__KERNEL__ \
+- -DBOOT_HEAP_SIZE=$(BOOT_HEAP_SIZE) -D"VMLINUX_LOAD_ADDRESS_ULL=$(VMLINUX_LOAD_ADDRESS)ull"
++ -DBOOT_HEAP_SIZE=$(BOOT_HEAP_SIZE) -D"VMLINUX_LOAD_ADDRESS_ULL=$(VMLINUX_LOAD_ADDRESS)ull" \
++ -D__ZBOOT__
+
+ KBUILD_AFLAGS := $(KBUILD_AFLAGS) -D__ASSEMBLY__ \
+ -DBOOT_HEAP_SIZE=$(BOOT_HEAP_SIZE) \
+--- a/arch/mips/include/asm/string.h
++++ b/arch/mips/include/asm/string.h
+@@ -140,4 +140,42 @@ extern void *memcpy(void *__to, __const_
+ #define __HAVE_ARCH_MEMMOVE
+ extern void *memmove(void *__dest, __const__ void *__src, size_t __n);
+
++#ifndef __ZBOOT__
++#define memset(__s, __c, len) \
++({ \
++ size_t __len = (len); \
++ void *__ret; \
++ if (__builtin_constant_p(len) && __len >= 64) \
++ __ret = memset((__s), (__c), __len); \
++ else \
++ __ret = __builtin_memset((__s), (__c), __len); \
++ __ret; \
++})
++
++#define memcpy(dst, src, len) \
++({ \
++ size_t __len = (len); \
++ void *__ret; \
++ if (__builtin_constant_p(len) && __len >= 64) \
++ __ret = memcpy((dst), (src), __len); \
++ else \
++ __ret = __builtin_memcpy((dst), (src), __len); \
++ __ret; \
++})
++
++#define memmove(dst, src, len) \
++({ \
++ size_t __len = (len); \
++ void *__ret; \
++ if (__builtin_constant_p(len) && __len >= 64) \
++ __ret = memmove((dst), (src), __len); \
++ else \
++ __ret = __builtin_memmove((dst), (src), __len); \
++ __ret; \
++})
++
++#define __HAVE_ARCH_MEMCMP
++#define memcmp(src1, src2, len) __builtin_memcmp((src1), (src2), (len))
++#endif
++
+ #endif /* _ASM_STRING_H */
+--- a/arch/mips/lib/Makefile
++++ b/arch/mips/lib/Makefile
+@@ -5,7 +5,7 @@
+
+ lib-y += bitops.o csum_partial.o delay.o memcpy.o memset.o \
+ mips-atomic.o strncpy_user.o \
+- strnlen_user.o uncached.o
++ strnlen_user.o uncached.o memcmp.o
+
+ obj-y += iomap.o iomap_copy.o
+ obj-$(CONFIG_PCI) += iomap-pci.o
+--- /dev/null
++++ b/arch/mips/lib/memcmp.c
+@@ -0,0 +1,22 @@
++/*
++ * copied from linux/lib/string.c
++ *
++ * Copyright (C) 1991, 1992 Linus Torvalds
++ */
++
++#include <linux/module.h>
++#include <linux/string.h>
++
++#undef memcmp
++int memcmp(const void *cs, const void *ct, size_t count)
++{
++ const unsigned char *su1, *su2;
++ int res = 0;
++
++ for (su1 = cs, su2 = ct; 0 < count; ++su1, ++su2, count--)
++ if ((res = *su1 - *su2) != 0)
++ break;
++ return res;
++}
++EXPORT_SYMBOL(memcmp);
++
diff --git a/target/linux/generic/pending-5.4/307-mips_highmem_offset.patch b/target/linux/generic/pending-5.4/307-mips_highmem_offset.patch
new file mode 100644
index 0000000000..9dd2fa9863
--- /dev/null
+++ b/target/linux/generic/pending-5.4/307-mips_highmem_offset.patch
@@ -0,0 +1,19 @@
+From: Felix Fietkau <nbd@nbd.name>
+Subject: kernel: adjust mips highmem offset to avoid the need for -mlong-calls on systems with >256M RAM
+
+Signed-off-by: Felix Fietkau <nbd@nbd.name>
+---
+ arch/mips/include/asm/mach-generic/spaces.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/mips/include/asm/mach-generic/spaces.h
++++ b/arch/mips/include/asm/mach-generic/spaces.h
+@@ -50,7 +50,7 @@
+ * Memory above this physical address will be considered highmem.
+ */
+ #ifndef HIGHMEM_START
+-#define HIGHMEM_START _AC(0x20000000, UL)
++#define HIGHMEM_START _AC(0x10000000, UL)
+ #endif
+
+ #endif /* CONFIG_32BIT */
diff --git a/target/linux/generic/pending-5.4/308-mips32r2_tune.patch b/target/linux/generic/pending-5.4/308-mips32r2_tune.patch
new file mode 100644
index 0000000000..8636511464
--- /dev/null
+++ b/target/linux/generic/pending-5.4/308-mips32r2_tune.patch
@@ -0,0 +1,22 @@
+From: Felix Fietkau <nbd@nbd.name>
+Subject: kernel: add -mtune=34kc to MIPS CFLAGS when building for mips32r2
+
+This provides a good tradeoff across at least 24Kc-74Kc, while also
+producing smaller code.
+
+Signed-off-by: Felix Fietkau <nbd@nbd.name>
+---
+ arch/mips/Makefile | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/mips/Makefile
++++ b/arch/mips/Makefile
+@@ -177,7 +177,7 @@ cflags-$(CONFIG_CPU_VR41XX) += -march=r4
+ cflags-$(CONFIG_CPU_R4X00) += -march=r4600 -Wa,--trap
+ cflags-$(CONFIG_CPU_TX49XX) += -march=r4600 -Wa,--trap
+ cflags-$(CONFIG_CPU_MIPS32_R1) += -march=mips32 -Wa,--trap
+-cflags-$(CONFIG_CPU_MIPS32_R2) += -march=mips32r2 -Wa,--trap
++cflags-$(CONFIG_CPU_MIPS32_R2) += -march=mips32r2 -mtune=34kc -Wa,--trap
+ cflags-$(CONFIG_CPU_MIPS32_R6) += -march=mips32r6 -Wa,--trap -modd-spreg
+ cflags-$(CONFIG_CPU_MIPS64_R1) += -march=mips64 -Wa,--trap
+ cflags-$(CONFIG_CPU_MIPS64_R2) += -march=mips64r2 -Wa,--trap
diff --git a/target/linux/generic/pending-5.4/309-MIPS-Add-CPU-option-reporting-to-proc-cpuinfo.patch b/target/linux/generic/pending-5.4/309-MIPS-Add-CPU-option-reporting-to-proc-cpuinfo.patch
new file mode 100644
index 0000000000..e4075a24bd
--- /dev/null
+++ b/target/linux/generic/pending-5.4/309-MIPS-Add-CPU-option-reporting-to-proc-cpuinfo.patch
@@ -0,0 +1,142 @@
+From 87ec87c2ad615c1a177cd08ef5fa29fc739f6e50 Mon Sep 17 00:00:00 2001
+From: Hauke Mehrtens <hauke@hauke-m.de>
+Date: Sun, 23 Dec 2018 18:06:53 +0100
+Subject: [PATCH] MIPS: Add CPU option reporting to /proc/cpuinfo
+
+Many MIPS CPUs have optional CPU features which are not activates for
+all CPU cores. Print the CPU options which are implemented in the core
+in /proc/cpuinfo. This makes it possible to see what features are
+supported and which are not supported. This should cover all standard
+MIPS extensions, before it only printed information about the main MIPS
+ASEs.
+
+Signed-off-by: Hauke Mehrtens <hauke@hauke-m.de>
+---
+ arch/mips/kernel/proc.c | 116 ++++++++++++++++++++++++++++++++++++++++
+ 1 file changed, 116 insertions(+)
+
+--- a/arch/mips/kernel/proc.c
++++ b/arch/mips/kernel/proc.c
+@@ -134,6 +134,122 @@ static int show_cpuinfo(struct seq_file
+ seq_printf(m, "micromips kernel\t: %s\n",
+ (read_c0_config3() & MIPS_CONF3_ISA_OE) ? "yes" : "no");
+ }
++
++ seq_printf(m, "Options implemented\t:");
++ if (cpu_has_tlb)
++ seq_printf(m, "%s", " tlb");
++ if (cpu_has_ftlb)
++ seq_printf(m, "%s", " ftlb");
++ if (cpu_has_tlbinv)
++ seq_printf(m, "%s", " tlbinv");
++ if (cpu_has_segments)
++ seq_printf(m, "%s", " segments");
++ if (cpu_has_rixiex)
++ seq_printf(m, "%s", " rixiex");
++ if (cpu_has_ldpte)
++ seq_printf(m, "%s", " ldpte");
++ if (cpu_has_maar)
++ seq_printf(m, "%s", " maar");
++ if (cpu_has_rw_llb)
++ seq_printf(m, "%s", " rw_llb");
++ if (cpu_has_4kex)
++ seq_printf(m, "%s", " 4kex");
++ if (cpu_has_3k_cache)
++ seq_printf(m, "%s", " 3k_cache");
++ if (cpu_has_4k_cache)
++ seq_printf(m, "%s", " 4k_cache");
++ if (cpu_has_6k_cache)
++ seq_printf(m, "%s", " 6k_cache");
++ if (cpu_has_8k_cache)
++ seq_printf(m, "%s", " 8k_cache");
++ if (cpu_has_tx39_cache)
++ seq_printf(m, "%s", " tx39_cache");
++ if (cpu_has_octeon_cache)
++ seq_printf(m, "%s", " octeon_cache");
++ if (cpu_has_fpu)
++ seq_printf(m, "%s", " fpu");
++ if (cpu_has_32fpr)
++ seq_printf(m, "%s", " 32fpr");
++ if (cpu_has_cache_cdex_p)
++ seq_printf(m, "%s", " cache_cdex_p");
++ if (cpu_has_cache_cdex_s)
++ seq_printf(m, "%s", " cache_cdex_s");
++ if (cpu_has_prefetch)
++ seq_printf(m, "%s", " prefetch");
++ if (cpu_has_mcheck)
++ seq_printf(m, "%s", " mcheck");
++ if (cpu_has_ejtag)
++ seq_printf(m, "%s", " ejtag");
++ if (cpu_has_llsc)
++ seq_printf(m, "%s", " llsc");
++ if (cpu_has_bp_ghist)
++ seq_printf(m, "%s", " bp_ghist");
++ if (cpu_has_guestctl0ext)
++ seq_printf(m, "%s", " guestctl0ext");
++ if (cpu_has_guestctl1)
++ seq_printf(m, "%s", " guestctl1");
++ if (cpu_has_guestctl2)
++ seq_printf(m, "%s", " guestctl2");
++ if (cpu_has_guestid)
++ seq_printf(m, "%s", " guestid");
++ if (cpu_has_drg)
++ seq_printf(m, "%s", " drg");
++ if (cpu_has_rixi)
++ seq_printf(m, "%s", " rixi");
++ if (cpu_has_lpa)
++ seq_printf(m, "%s", " lpa");
++ if (cpu_has_mvh)
++ seq_printf(m, "%s", " mvh");
++ if (cpu_has_vtag_icache)
++ seq_printf(m, "%s", " vtag_icache");
++ if (cpu_has_dc_aliases)
++ seq_printf(m, "%s", " dc_aliases");
++ if (cpu_has_ic_fills_f_dc)
++ seq_printf(m, "%s", " ic_fills_f_dc");
++ if (cpu_has_pindexed_dcache)
++ seq_printf(m, "%s", " pindexed_dcache");
++ if (cpu_has_userlocal)
++ seq_printf(m, "%s", " userlocal");
++ if (cpu_has_nofpuex)
++ seq_printf(m, "%s", " nofpuex");
++ if (cpu_has_vint)
++ seq_printf(m, "%s", " vint");
++ if (cpu_has_veic)
++ seq_printf(m, "%s", " veic");
++ if (cpu_has_inclusive_pcaches)
++ seq_printf(m, "%s", " inclusive_pcaches");
++ if (cpu_has_perf_cntr_intr_bit)
++ seq_printf(m, "%s", " perf_cntr_intr_bit");
++ if (cpu_has_ufr)
++ seq_printf(m, "%s", " ufr");
++ if (cpu_has_fre)
++ seq_printf(m, "%s", " fre");
++ if (cpu_has_cdmm)
++ seq_printf(m, "%s", " cdmm");
++ if (cpu_has_small_pages)
++ seq_printf(m, "%s", " small_pages");
++ if (cpu_has_nan_legacy)
++ seq_printf(m, "%s", " nan_legacy");
++ if (cpu_has_nan_2008)
++ seq_printf(m, "%s", " nan_2008");
++ if (cpu_has_ebase_wg)
++ seq_printf(m, "%s", " ebase_wg");
++ if (cpu_has_badinstr)
++ seq_printf(m, "%s", " badinstr");
++ if (cpu_has_badinstrp)
++ seq_printf(m, "%s", " badinstrp");
++ if (cpu_has_contextconfig)
++ seq_printf(m, "%s", " contextconfig");
++ if (cpu_has_perf)
++ seq_printf(m, "%s", " perf");
++ if (cpu_has_shared_ftlb_ram)
++ seq_printf(m, "%s", " shared_ftlb_ram");
++ if (cpu_has_shared_ftlb_entries)
++ seq_printf(m, "%s", " shared_ftlb_entries");
++ if (cpu_has_mipsmt_pertccounters)
++ seq_printf(m, "%s", " mipsmt_pertccounters");
++ seq_printf(m, "\n");
++
+ seq_printf(m, "shadow register sets\t: %d\n",
+ cpu_data[n].srsets);
+ seq_printf(m, "kscratch registers\t: %d\n",
diff --git a/target/linux/generic/pending-5.4/310-arm_module_unresolved_weak_sym.patch b/target/linux/generic/pending-5.4/310-arm_module_unresolved_weak_sym.patch
new file mode 100644
index 0000000000..bc9f0a4c4d
--- /dev/null
+++ b/target/linux/generic/pending-5.4/310-arm_module_unresolved_weak_sym.patch
@@ -0,0 +1,22 @@
+From: Felix Fietkau <nbd@nbd.name>
+Subject: fix errors in unresolved weak symbols on arm
+
+lede-commit: 570699d4838a907c3ef9f2819bf19eb72997b32f
+Signed-off-by: Felix Fietkau <nbd@nbd.name>
+---
+ arch/arm/kernel/module.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+--- a/arch/arm/kernel/module.c
++++ b/arch/arm/kernel/module.c
+@@ -95,6 +95,10 @@ apply_relocate(Elf32_Shdr *sechdrs, cons
+ return -ENOEXEC;
+ }
+
++ if ((IS_ERR_VALUE(sym->st_value) || !sym->st_value) &&
++ ELF_ST_BIND(sym->st_info) == STB_WEAK)
++ continue;
++
+ loc = dstsec->sh_addr + rel->r_offset;
+
+ switch (ELF32_R_TYPE(rel->r_info)) {
diff --git a/target/linux/generic/pending-5.4/330-MIPS-kexec-Accept-command-line-parameters-from-users.patch b/target/linux/generic/pending-5.4/330-MIPS-kexec-Accept-command-line-parameters-from-users.patch
new file mode 100644
index 0000000000..44d85b5b86
--- /dev/null
+++ b/target/linux/generic/pending-5.4/330-MIPS-kexec-Accept-command-line-parameters-from-users.patch
@@ -0,0 +1,274 @@
+From: Yousong Zhou <yszhou4tech@gmail.com>
+Subject: MIPS: kexec: Accept command line parameters from userspace.
+
+Signed-off-by: Yousong Zhou <yszhou4tech@gmail.com>
+---
+ arch/mips/kernel/machine_kexec.c | 153 +++++++++++++++++++++++++++++++-----
+ arch/mips/kernel/machine_kexec.h | 20 +++++
+ arch/mips/kernel/relocate_kernel.S | 21 +++--
+ 3 files changed, 167 insertions(+), 27 deletions(-)
+ create mode 100644 arch/mips/kernel/machine_kexec.h
+
+--- a/arch/mips/kernel/machine_kexec.c
++++ b/arch/mips/kernel/machine_kexec.c
+@@ -10,14 +10,11 @@
+ #include <linux/mm.h>
+ #include <linux/delay.h>
+
++#include <asm/bootinfo.h>
+ #include <asm/cacheflush.h>
+ #include <asm/page.h>
+-
+-extern const unsigned char relocate_new_kernel[];
+-extern const size_t relocate_new_kernel_size;
+-
+-extern unsigned long kexec_start_address;
+-extern unsigned long kexec_indirection_page;
++#include <linux/uaccess.h>
++#include "machine_kexec.h"
+
+ int (*_machine_kexec_prepare)(struct kimage *) = NULL;
+ void (*_machine_kexec_shutdown)(void) = NULL;
+@@ -28,6 +25,101 @@ atomic_t kexec_ready_to_reboot = ATOMIC_
+ void (*_crash_smp_send_stop)(void) = NULL;
+ #endif
+
++static void machine_kexec_print_args(void)
++{
++ unsigned long argc = (int)kexec_args[0];
++ int i;
++
++ pr_info("kexec_args[0] (argc): %lu\n", argc);
++ pr_info("kexec_args[1] (argv): %p\n", (void *)kexec_args[1]);
++ pr_info("kexec_args[2] (env ): %p\n", (void *)kexec_args[2]);
++ pr_info("kexec_args[3] (desc): %p\n", (void *)kexec_args[3]);
++
++ for (i = 0; i < argc; i++) {
++ pr_info("kexec_argv[%d] = %p, %s\n",
++ i, kexec_argv[i], kexec_argv[i]);
++ }
++}
++
++static void machine_kexec_init_argv(struct kimage *image)
++{
++ void __user *buf = NULL;
++ size_t bufsz;
++ size_t size;
++ int i;
++
++ bufsz = 0;
++ for (i = 0; i < image->nr_segments; i++) {
++ struct kexec_segment *seg;
++
++ seg = &image->segment[i];
++ if (seg->bufsz < 6)
++ continue;
++
++ if (strncmp((char *) seg->buf, "kexec ", 6))
++ continue;
++
++ buf = seg->buf;
++ bufsz = seg->bufsz;
++ break;
++ }
++
++ if (!buf)
++ return;
++
++ size = KEXEC_COMMAND_LINE_SIZE;
++ size = min(size, bufsz);
++ if (size < bufsz)
++ pr_warn("kexec command line truncated to %zd bytes\n", size);
++
++ /* Copy to kernel space */
++ if (copy_from_user(kexec_argv_buf, buf, size))
++ pr_warn("kexec command line copy to kernel space failed\n");
++
++ kexec_argv_buf[size - 1] = 0;
++}
++
++static void machine_kexec_parse_argv(struct kimage *image)
++{
++ char *reboot_code_buffer;
++ int reloc_delta;
++ char *ptr;
++ int argc;
++ int i;
++
++ ptr = kexec_argv_buf;
++ argc = 0;
++
++ /*
++ * convert command line string to array of parameters
++ * (as bootloader does).
++ */
++ while (ptr && *ptr && (KEXEC_MAX_ARGC > argc)) {
++ if (*ptr == ' ') {
++ *ptr++ = '\0';
++ continue;
++ }
++
++ kexec_argv[argc++] = ptr;
++ ptr = strchr(ptr, ' ');
++ }
++
++ if (!argc)
++ return;
++
++ kexec_args[0] = argc;
++ kexec_args[1] = (unsigned long)kexec_argv;
++ kexec_args[2] = 0;
++ kexec_args[3] = 0;
++
++ reboot_code_buffer = page_address(image->control_code_page);
++ reloc_delta = reboot_code_buffer - (char *)kexec_relocate_new_kernel;
++
++ kexec_args[1] += reloc_delta;
++ for (i = 0; i < argc; i++)
++ kexec_argv[i] += reloc_delta;
++}
++
+ static void kexec_image_info(const struct kimage *kimage)
+ {
+ unsigned long i;
+@@ -52,6 +144,18 @@ int
+ machine_kexec_prepare(struct kimage *kimage)
+ {
+ kexec_image_info(kimage);
++ /*
++ * Whenever arguments passed from kexec-tools, Init the arguments as
++ * the original ones to try avoiding booting failure.
++ */
++
++ kexec_args[0] = fw_arg0;
++ kexec_args[1] = fw_arg1;
++ kexec_args[2] = fw_arg2;
++ kexec_args[3] = fw_arg3;
++
++ machine_kexec_init_argv(kimage);
++ machine_kexec_parse_argv(kimage);
+
+ if (_machine_kexec_prepare)
+ return _machine_kexec_prepare(kimage);
+@@ -89,10 +193,12 @@ machine_kexec(struct kimage *image)
+ unsigned long *ptr;
+
+ reboot_code_buffer =
+- (unsigned long)page_address(image->control_code_page);
++ (unsigned long)page_address(image->control_code_page);
++ pr_info("reboot_code_buffer = %p\n", (void *)reboot_code_buffer);
+
+ kexec_start_address =
+ (unsigned long) phys_to_virt(image->start);
++ pr_info("kexec_start_address = %p\n", (void *)kexec_start_address);
+
+ if (image->type == KEXEC_TYPE_DEFAULT) {
+ kexec_indirection_page =
+@@ -100,9 +206,19 @@ machine_kexec(struct kimage *image)
+ } else {
+ kexec_indirection_page = (unsigned long)&image->head;
+ }
++ pr_info("kexec_indirection_page = %p\n", (void *)kexec_indirection_page);
+
+- memcpy((void*)reboot_code_buffer, relocate_new_kernel,
+- relocate_new_kernel_size);
++ pr_info("Where is memcpy: %p\n", memcpy);
++ pr_info("kexec_relocate_new_kernel = %p, kexec_relocate_new_kernel_end = %p\n",
++ (void *)kexec_relocate_new_kernel, &kexec_relocate_new_kernel_end);
++ pr_info("Copy %lu bytes from %p to %p\n", KEXEC_RELOCATE_NEW_KERNEL_SIZE,
++ (void *)kexec_relocate_new_kernel, (void *)reboot_code_buffer);
++ memcpy((void*)reboot_code_buffer, kexec_relocate_new_kernel,
++ KEXEC_RELOCATE_NEW_KERNEL_SIZE);
++
++ pr_info("Before _print_args().\n");
++ machine_kexec_print_args();
++ pr_info("Before eval loop.\n");
+
+ /*
+ * The generic kexec code builds a page list with physical
+@@ -124,15 +240,16 @@ machine_kexec(struct kimage *image)
+ /*
+ * we do not want to be bothered.
+ */
++ pr_info("Before irq_disable.\n");
+ local_irq_disable();
+
+- printk("Will call new kernel at %08lx\n", image->start);
+- printk("Bye ...\n");
++ pr_info("Will call new kernel at %08lx\n", image->start);
++ pr_info("Bye ...\n");
+ __flush_cache_all();
+ #ifdef CONFIG_SMP
+ /* All secondary cpus now may jump to kexec_wait cycle */
+ relocated_kexec_smp_wait = reboot_code_buffer +
+- (void *)(kexec_smp_wait - relocate_new_kernel);
++ (void *)(kexec_smp_wait - kexec_relocate_new_kernel);
+ smp_wmb();
+ atomic_set(&kexec_ready_to_reboot, 1);
+ #endif
+--- /dev/null
++++ b/arch/mips/kernel/machine_kexec.h
+@@ -0,0 +1,20 @@
++#ifndef _MACHINE_KEXEC_H
++#define _MACHINE_KEXEC_H
++
++#ifndef __ASSEMBLY__
++extern const unsigned char kexec_relocate_new_kernel[];
++extern unsigned long kexec_relocate_new_kernel_end;
++extern unsigned long kexec_start_address;
++extern unsigned long kexec_indirection_page;
++
++extern char kexec_argv_buf[];
++extern char *kexec_argv[];
++
++#define KEXEC_RELOCATE_NEW_KERNEL_SIZE ((unsigned long)&kexec_relocate_new_kernel_end - (unsigned long)kexec_relocate_new_kernel)
++#endif /* !__ASSEMBLY__ */
++
++#define KEXEC_COMMAND_LINE_SIZE 256
++#define KEXEC_ARGV_SIZE (KEXEC_COMMAND_LINE_SIZE / 16)
++#define KEXEC_MAX_ARGC (KEXEC_ARGV_SIZE / sizeof(long))
++
++#endif
+--- a/arch/mips/kernel/relocate_kernel.S
++++ b/arch/mips/kernel/relocate_kernel.S
+@@ -12,8 +12,9 @@
+ #include <asm/mipsregs.h>
+ #include <asm/stackframe.h>
+ #include <asm/addrspace.h>
++#include "machine_kexec.h"
+
+-LEAF(relocate_new_kernel)
++LEAF(kexec_relocate_new_kernel)
+ PTR_L a0, arg0
+ PTR_L a1, arg1
+ PTR_L a2, arg2
+@@ -98,7 +99,7 @@ done:
+ #endif
+ /* jump to kexec_start_address */
+ j s1
+- END(relocate_new_kernel)
++ END(kexec_relocate_new_kernel)
+
+ #ifdef CONFIG_SMP
+ /*
+@@ -184,9 +185,15 @@ kexec_indirection_page:
+ PTR 0
+ .size kexec_indirection_page, PTRSIZE
+
+-relocate_new_kernel_end:
++kexec_argv_buf:
++ EXPORT(kexec_argv_buf)
++ .skip KEXEC_COMMAND_LINE_SIZE
++ .size kexec_argv_buf, KEXEC_COMMAND_LINE_SIZE
++
++kexec_argv:
++ EXPORT(kexec_argv)
++ .skip KEXEC_ARGV_SIZE
++ .size kexec_argv, KEXEC_ARGV_SIZE
+
+-relocate_new_kernel_size:
+- EXPORT(relocate_new_kernel_size)
+- PTR relocate_new_kernel_end - relocate_new_kernel
+- .size relocate_new_kernel_size, PTRSIZE
++kexec_relocate_new_kernel_end:
++ EXPORT(kexec_relocate_new_kernel_end)
diff --git a/target/linux/generic/pending-5.4/332-arc-add-OWRTDTB-section.patch b/target/linux/generic/pending-5.4/332-arc-add-OWRTDTB-section.patch
new file mode 100644
index 0000000000..ec89547726
--- /dev/null
+++ b/target/linux/generic/pending-5.4/332-arc-add-OWRTDTB-section.patch
@@ -0,0 +1,84 @@
+From 34ef04f3845ed2b47d57dd9d3b795b16e1f8185a Mon Sep 17 00:00:00 2001
+From: Evgeniy Didin <Evgeniy.Didin@synopsys.com>
+Date: Fri, 15 Mar 2019 18:53:38 +0300
+Subject: [PATCH] arc add OWRTDTB section
+
+This change allows OpenWRT to patch resulting kernel binary with
+external .dtb.
+
+That allows us to re-use exactky the same vmlinux on different boards
+given its ARC core configurations match (at least cache line sizes etc).
+
+""patch-dtb" searches for ASCII "OWRTDTB:" strign and copies external
+.dtb right after it, keeping the string in place.
+
+Signed-off-by: Eugeniy Paltsev <Eugeniy.Paltsev@synopsys.com>
+Signed-off-by: Alexey Brodkin <abrodkin@synopsys.com>
+Signed-off-by: Evgeniy Didin <Evgeniy.Didin@synopsys.com>
+---
+ arch/arc/kernel/head.S | 10 ++++++++++
+ arch/arc/kernel/setup.c | 4 +++-
+ arch/arc/kernel/vmlinux.lds.S | 13 +++++++++++++
+ 3 files changed, 26 insertions(+), 1 deletion(-)
+
+--- a/arch/arc/kernel/head.S
++++ b/arch/arc/kernel/head.S
+@@ -59,6 +59,16 @@
+ #endif
+ .endm
+
++ ; Here "patch-dtb" will embed external .dtb
++ ; Note "patch-dtb" searches for ASCII "OWRTDTB:" string
++ ; and pastes .dtb right after it, hense the string precedes
++ ; __image_dtb symbol.
++ .section .owrt, "aw",@progbits
++ .ascii "OWRTDTB:"
++ENTRY(__image_dtb)
++ .fill 0x4000
++END(__image_dtb)
++
+ .section .init.text, "ax",@progbits
+
+ ;----------------------------------------------------------------
+--- a/arch/arc/kernel/setup.c
++++ b/arch/arc/kernel/setup.c
+@@ -526,7 +526,7 @@ void __init handle_uboot_args(void)
+ ignore_uboot_args:
+
+ if (use_embedded_dtb) {
+- machine_desc = setup_machine_fdt(__dtb_start);
++ machine_desc = setup_machine_fdt(&__image_dtb);
+ if (!machine_desc)
+ panic("Embedded DT invalid\n");
+ }
+@@ -542,6 +542,8 @@ ignore_uboot_args:
+ }
+ }
+
++extern struct boot_param_header __image_dtb;
++
+ void __init setup_arch(char **cmdline_p)
+ {
+ handle_uboot_args();
+--- a/arch/arc/kernel/vmlinux.lds.S
++++ b/arch/arc/kernel/vmlinux.lds.S
+@@ -30,6 +30,19 @@ SECTIONS
+
+ . = CONFIG_LINUX_LINK_BASE;
+
++ /*
++ * In OpenWRT we want to patch built binary embedding .dtb of choice.
++ * This is implemented with "patch-dtb" utility which searches for
++ * "OWRTDTB:" string in first 16k of image and if it is found
++ * copies .dtb right after mentioned string.
++ *
++ * Note: "OWRTDTB:" won't be overwritten with .dtb, .dtb will follow it.
++ */
++ .owrt : {
++ *(.owrt)
++ . = ALIGN(PAGE_SIZE);
++ }
++
+ _int_vec_base_lds = .;
+ .vector : {
+ *(.vector)
diff --git a/target/linux/generic/pending-5.4/333-arc-enable-unaligned-access-in-kernel-mode.patch b/target/linux/generic/pending-5.4/333-arc-enable-unaligned-access-in-kernel-mode.patch
new file mode 100644
index 0000000000..4e0265aef5
--- /dev/null
+++ b/target/linux/generic/pending-5.4/333-arc-enable-unaligned-access-in-kernel-mode.patch
@@ -0,0 +1,24 @@
+From: Alexey Brodkin <abrodkin@synopsys.com>
+Subject: arc: enable unaligned access in kernel mode
+
+This enables misaligned access handling even in kernel mode.
+Some wireless drivers (ath9k-htc and mt7601u) use misaligned accesses
+here and there and to cope with that without fixing stuff in the drivers
+we're just gracefully handling it on ARC.
+
+Signed-off-by: Alexey Brodkin <abrodkin@synopsys.com>
+---
+ arch/arc/kernel/unaligned.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/arc/kernel/unaligned.c
++++ b/arch/arc/kernel/unaligned.c
+@@ -206,7 +206,7 @@ int misaligned_fixup(unsigned long addre
+ char buf[TASK_COMM_LEN];
+
+ /* handle user mode only and only if enabled by sysadmin */
+- if (!user_mode(regs) || !unaligned_enabled)
++ if (!unaligned_enabled)
+ return 1;
+
+ if (no_unaligned_warning) {
diff --git a/target/linux/generic/pending-5.4/341-MIPS-mm-remove-no-op-dma_map_ops-where-possible.patch b/target/linux/generic/pending-5.4/341-MIPS-mm-remove-no-op-dma_map_ops-where-possible.patch
new file mode 100644
index 0000000000..07a6da453f
--- /dev/null
+++ b/target/linux/generic/pending-5.4/341-MIPS-mm-remove-no-op-dma_map_ops-where-possible.patch
@@ -0,0 +1,82 @@
+From 203f17906ff45705fbdaa0430dbbc71142c2640f Mon Sep 17 00:00:00 2001
+From: Hauke Mehrtens <hauke@hauke-m.de>
+Date: Sat, 8 Dec 2018 21:45:53 +0100
+Subject: [PATCH 1/3] MIPS: Compile post DMA flush only when needed
+
+dma_sync_phys() is only called for some CPUs when a mapping is removed.
+Add ARCH_HAS_SYNC_DMA_FOR_CPU only for the CPUs listed in
+cpu_needs_post_dma_flush() which need this extra call and do not compile
+this code in for other CPUs. We need this for R10000, R12000, BMIPS5000
+CPUs and CPUs supporting MAAR which was introduced in MIPS32r5.
+
+This will hopefully improve the performance of the not affected devices.
+
+Signed-off-by: Hauke Mehrtens <hauke@hauke-m.de>
+---
+ arch/mips/Kconfig | 6 +++++-
+ arch/mips/mm/dma-noncoherent.c | 2 ++
+ 2 files changed, 7 insertions(+), 1 deletion(-)
+
+--- a/arch/mips/Kconfig
++++ b/arch/mips/Kconfig
+@@ -1117,7 +1117,6 @@ config DMA_PERDEV_COHERENT
+ config DMA_NONCOHERENT
+ bool
+ select ARCH_HAS_SYNC_DMA_FOR_DEVICE
+- select ARCH_HAS_SYNC_DMA_FOR_CPU
+ select NEED_DMA_MAP_STATE
+ select DMA_NONCOHERENT_MMAP
+ select DMA_NONCOHERENT_CACHE_SYNC
+@@ -1898,9 +1897,11 @@ config SYS_HAS_CPU_MIPS32_R3_5
+
+ config SYS_HAS_CPU_MIPS32_R5
+ bool
++ select ARCH_HAS_SYNC_DMA_FOR_CPU
+
+ config SYS_HAS_CPU_MIPS32_R6
+ bool
++ select ARCH_HAS_SYNC_DMA_FOR_CPU
+
+ config SYS_HAS_CPU_MIPS64_R1
+ bool
+@@ -1910,6 +1911,7 @@ config SYS_HAS_CPU_MIPS64_R2
+
+ config SYS_HAS_CPU_MIPS64_R6
+ bool
++ select ARCH_HAS_SYNC_DMA_FOR_CPU
+
+ config SYS_HAS_CPU_R3000
+ bool
+@@ -1946,6 +1948,7 @@ config SYS_HAS_CPU_R8000
+
+ config SYS_HAS_CPU_R10000
+ bool
++ select ARCH_HAS_SYNC_DMA_FOR_CPU
+
+ config SYS_HAS_CPU_RM7000
+ bool
+@@ -1974,6 +1977,7 @@ config SYS_HAS_CPU_BMIPS4380
+ config SYS_HAS_CPU_BMIPS5000
+ bool
+ select SYS_HAS_CPU_BMIPS
++ select ARCH_HAS_SYNC_DMA_FOR_CPU
+
+ config SYS_HAS_CPU_XLR
+ bool
+--- a/arch/mips/mm/dma-noncoherent.c
++++ b/arch/mips/mm/dma-noncoherent.c
+@@ -191,12 +191,14 @@ void arch_sync_dma_for_device(struct dev
+ dma_sync_phys(paddr, size, dir);
+ }
+
++#ifdef CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU
+ void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
+ size_t size, enum dma_data_direction dir)
+ {
+ if (cpu_needs_post_dma_flush(dev))
+ dma_sync_phys(paddr, size, dir);
+ }
++#endif
+
+ void arch_dma_cache_sync(struct device *dev, void *vaddr, size_t size,
+ enum dma_data_direction direction)
diff --git a/target/linux/generic/pending-5.4/342-powerpc-Enable-kernel-XZ-compression-option-on-PPC_8.patch b/target/linux/generic/pending-5.4/342-powerpc-Enable-kernel-XZ-compression-option-on-PPC_8.patch
new file mode 100644
index 0000000000..035a84b1e4
--- /dev/null
+++ b/target/linux/generic/pending-5.4/342-powerpc-Enable-kernel-XZ-compression-option-on-PPC_8.patch
@@ -0,0 +1,25 @@
+From 66770a004afe10df11d3902e16eaa0c2c39436bb Mon Sep 17 00:00:00 2001
+From: Pawel Dembicki <paweldembicki@gmail.com>
+Date: Fri, 24 May 2019 17:56:19 +0200
+Subject: [PATCH] powerpc: Enable kernel XZ compression option on PPC_85xx
+
+Enable kernel XZ compression option on PPC_85xx. Tested with
+simpleImage on TP-Link TL-WDR4900 (Freescale P1014 processor).
+
+Suggested-by: Christian Lamparter <chunkeey@gmail.com>
+Signed-off-by: Pawel Dembicki <paweldembicki@gmail.com>
+---
+ arch/powerpc/Kconfig | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/powerpc/Kconfig
++++ b/arch/powerpc/Kconfig
+@@ -197,7 +197,7 @@ config PPC
+ select HAVE_IOREMAP_PROT
+ select HAVE_IRQ_EXIT_ON_IRQ_STACK
+ select HAVE_KERNEL_GZIP
+- select HAVE_KERNEL_XZ if PPC_BOOK3S
++ select HAVE_KERNEL_XZ if PPC_BOOK3S || PPC_85xx
+ select HAVE_KPROBES
+ select HAVE_KPROBES_ON_FTRACE
+ select HAVE_KRETPROBES
diff --git a/target/linux/generic/pending-5.4/400-mtd-add-rootfs-split-support.patch b/target/linux/generic/pending-5.4/400-mtd-add-rootfs-split-support.patch
new file mode 100644
index 0000000000..bc783deb40
--- /dev/null
+++ b/target/linux/generic/pending-5.4/400-mtd-add-rootfs-split-support.patch
@@ -0,0 +1,107 @@
+From: Felix Fietkau <nbd@nbd.name>
+Subject: make rootfs split/detection more generic - patch can be moved to generic-2.6 after testing on other platforms
+
+lede-commit: 328e660b31f0937d52c5ae3d6e7029409918a9df
+Signed-off-by: Felix Fietkau <nbd@nbd.name>
+---
+ drivers/mtd/Kconfig | 17 +++++++++++++++++
+ drivers/mtd/mtdpart.c | 35 +++++++++++++++++++++++++++++++++++
+ include/linux/mtd/partitions.h | 2 ++
+ 3 files changed, 54 insertions(+)
+
+--- a/drivers/mtd/Kconfig
++++ b/drivers/mtd/Kconfig
+@@ -11,6 +11,23 @@ menuconfig MTD
+
+ if MTD
+
++menu "OpenWrt specific MTD options"
++
++config MTD_ROOTFS_ROOT_DEV
++ bool "Automatically set 'rootfs' partition to be root filesystem"
++ default y
++
++config MTD_SPLIT_FIRMWARE
++ bool "Automatically split firmware partition for kernel+rootfs"
++ default y
++
++config MTD_SPLIT_FIRMWARE_NAME
++ string "Firmware partition name"
++ depends on MTD_SPLIT_FIRMWARE
++ default "firmware"
++
++endmenu
++
+ config MTD_TESTS
+ tristate "MTD tests support (DANGEROUS)"
+ depends on m
+--- a/drivers/mtd/mtdpart.c
++++ b/drivers/mtd/mtdpart.c
+@@ -29,10 +29,12 @@
+ #include <linux/kmod.h>
+ #include <linux/mtd/mtd.h>
+ #include <linux/mtd/partitions.h>
++#include <linux/magic.h>
+ #include <linux/err.h>
+ #include <linux/of.h>
+
+ #include "mtdcore.h"
++#include "mtdsplit/mtdsplit.h"
+
+ /* Our partition linked list */
+ static LIST_HEAD(mtd_partitions);
+@@ -52,6 +54,8 @@ struct mtd_part {
+ struct list_head list;
+ };
+
++static void mtd_partition_split(struct mtd_info *master, struct mtd_part *part);
++
+ /*
+ * Given a pointer to the MTD object in the mtd_part structure, we can retrieve
+ * the pointer to that structure.
+@@ -626,6 +630,7 @@ int mtd_add_partition(struct mtd_info *p
+ if (ret)
+ goto err_remove_part;
+
++ mtd_partition_split(parent, new);
+ mtd_add_partition_attrs(new);
+
+ return 0;
+@@ -712,6 +717,29 @@ int mtd_del_partition(struct mtd_info *m
+ }
+ EXPORT_SYMBOL_GPL(mtd_del_partition);
+
++#ifdef CONFIG_MTD_SPLIT_FIRMWARE_NAME
++#define SPLIT_FIRMWARE_NAME CONFIG_MTD_SPLIT_FIRMWARE_NAME
++#else
++#define SPLIT_FIRMWARE_NAME "unused"
++#endif
++
++static void split_firmware(struct mtd_info *master, struct mtd_part *part)
++{
++}
++
++static void mtd_partition_split(struct mtd_info *master, struct mtd_part *part)
++{
++ static int rootfs_found = 0;
++
++ if (rootfs_found)
++ return;
++
++ if (IS_ENABLED(CONFIG_MTD_SPLIT_FIRMWARE) &&
++ !strcmp(part->mtd.name, SPLIT_FIRMWARE_NAME) &&
++ !of_find_property(mtd_get_of_node(&part->mtd), "compatible", NULL))
++ split_firmware(master, part);
++}
++
+ /*
+ * This function, given a master MTD object and a partition table, creates
+ * and registers slave MTD objects which are bound to the master according to
+@@ -752,6 +780,7 @@ int add_mtd_partitions(struct mtd_info *
+ goto err_del_partitions;
+ }
+
++ mtd_partition_split(master, slave);
+ mtd_add_partition_attrs(slave);
+ /* Look for subpartitions */
+ parse_mtd_partitions(&slave->mtd, parts[i].types, NULL);
diff --git a/target/linux/generic/pending-5.4/401-mtd-add-support-for-different-partition-parser-types.patch b/target/linux/generic/pending-5.4/401-mtd-add-support-for-different-partition-parser-types.patch
new file mode 100644
index 0000000000..a59659cda6
--- /dev/null
+++ b/target/linux/generic/pending-5.4/401-mtd-add-support-for-different-partition-parser-types.patch
@@ -0,0 +1,142 @@
+From: Gabor Juhos <juhosg@openwrt.org>
+Subject: mtd: add support for different partition parser types
+
+Signed-off-by: Gabor Juhos <juhosg@openwrt.org>
+---
+ drivers/mtd/mtdpart.c | 56 ++++++++++++++++++++++++++++++++++++++++
+ include/linux/mtd/partitions.h | 11 ++++++++
+ 2 files changed, 67 insertions(+)
+
+--- a/drivers/mtd/mtdpart.c
++++ b/drivers/mtd/mtdpart.c
+@@ -55,6 +55,10 @@ struct mtd_part {
+ };
+
+ static void mtd_partition_split(struct mtd_info *master, struct mtd_part *part);
++static int parse_mtd_partitions_by_type(struct mtd_info *master,
++ enum mtd_parser_type type,
++ const struct mtd_partition **pparts,
++ struct mtd_part_parser_data *data);
+
+ /*
+ * Given a pointer to the MTD object in the mtd_part structure, we can retrieve
+@@ -717,6 +721,36 @@ int mtd_del_partition(struct mtd_info *m
+ }
+ EXPORT_SYMBOL_GPL(mtd_del_partition);
+
++static int
++run_parsers_by_type(struct mtd_part *slave, enum mtd_parser_type type)
++{
++ struct mtd_partition *parts;
++ int nr_parts;
++ int i;
++
++ nr_parts = parse_mtd_partitions_by_type(&slave->mtd, type, (const struct mtd_partition **)&parts,
++ NULL);
++ if (nr_parts <= 0)
++ return nr_parts;
++
++ if (WARN_ON(!parts))
++ return 0;
++
++ for (i = 0; i < nr_parts; i++) {
++ /* adjust partition offsets */
++ parts[i].offset += slave->offset;
++
++ mtd_add_partition(slave->parent,
++ parts[i].name,
++ parts[i].offset,
++ parts[i].size);
++ }
++
++ kfree(parts);
++
++ return nr_parts;
++}
++
+ #ifdef CONFIG_MTD_SPLIT_FIRMWARE_NAME
+ #define SPLIT_FIRMWARE_NAME CONFIG_MTD_SPLIT_FIRMWARE_NAME
+ #else
+@@ -1066,6 +1100,61 @@ void mtd_part_parser_cleanup(struct mtd_
+ }
+ }
+
++static struct mtd_part_parser *
++get_partition_parser_by_type(enum mtd_parser_type type,
++ struct mtd_part_parser *start)
++{
++ struct mtd_part_parser *p, *ret = NULL;
++
++ spin_lock(&part_parser_lock);
++
++ p = list_prepare_entry(start, &part_parsers, list);
++ if (start)
++ mtd_part_parser_put(start);
++
++ list_for_each_entry_continue(p, &part_parsers, list) {
++ if (p->type == type && try_module_get(p->owner)) {
++ ret = p;
++ break;
++ }
++ }
++
++ spin_unlock(&part_parser_lock);
++
++ return ret;
++}
++
++static int parse_mtd_partitions_by_type(struct mtd_info *master,
++ enum mtd_parser_type type,
++ const struct mtd_partition **pparts,
++ struct mtd_part_parser_data *data)
++{
++ struct mtd_part_parser *prev = NULL;
++ int ret = 0;
++
++ while (1) {
++ struct mtd_part_parser *parser;
++
++ parser = get_partition_parser_by_type(type, prev);
++ if (!parser)
++ break;
++
++ ret = (*parser->parse_fn)(master, pparts, data);
++
++ if (ret > 0) {
++ mtd_part_parser_put(parser);
++ printk(KERN_NOTICE
++ "%d %s partitions found on MTD device %s\n",
++ ret, parser->name, master->name);
++ break;
++ }
++
++ prev = parser;
++ }
++
++ return ret;
++}
++
+ int mtd_is_partition(const struct mtd_info *mtd)
+ {
+ struct mtd_part *part;
+--- a/include/linux/mtd/partitions.h
++++ b/include/linux/mtd/partitions.h
+@@ -73,6 +73,10 @@ struct mtd_part_parser_data {
+ * Functions dealing with the various ways of partitioning the space
+ */
+
++enum mtd_parser_type {
++ MTD_PARSER_TYPE_DEVICE = 0,
++};
++
+ struct mtd_part_parser {
+ struct list_head list;
+ struct module *owner;
+@@ -81,6 +85,7 @@ struct mtd_part_parser {
+ int (*parse_fn)(struct mtd_info *, const struct mtd_partition **,
+ struct mtd_part_parser_data *);
+ void (*cleanup)(const struct mtd_partition *pparts, int nr_parts);
++ enum mtd_parser_type type;
+ };
+
+ /* Container for passing around a set of parsed partitions */
diff --git a/target/linux/generic/pending-5.4/402-mtd-use-typed-mtd-parsers-for-rootfs-and-firmware-split.patch b/target/linux/generic/pending-5.4/402-mtd-use-typed-mtd-parsers-for-rootfs-and-firmware-split.patch
new file mode 100644
index 0000000000..58d2e44574
--- /dev/null
+++ b/target/linux/generic/pending-5.4/402-mtd-use-typed-mtd-parsers-for-rootfs-and-firmware-split.patch
@@ -0,0 +1,44 @@
+From: Gabor Juhos <juhosg@openwrt.org>
+Subject: kernel/3.10: allow to use partition parsers for rootfs and firmware split
+
+lede-commit: 3b71cd94bc9517bc25267dccb393b07d4b54564e
+Signed-off-by: Gabor Juhos <juhosg@openwrt.org>
+---
+ drivers/mtd/mtdpart.c | 37 +++++++++++++++++++++++++++++++++++++
+ include/linux/mtd/partitions.h | 2 ++
+ 2 files changed, 39 insertions(+)
+
+--- a/drivers/mtd/mtdpart.c
++++ b/drivers/mtd/mtdpart.c
+@@ -759,6 +759,7 @@ run_parsers_by_type(struct mtd_part *sla
+
+ static void split_firmware(struct mtd_info *master, struct mtd_part *part)
+ {
++ run_parsers_by_type(part, MTD_PARSER_TYPE_FIRMWARE);
+ }
+
+ static void mtd_partition_split(struct mtd_info *master, struct mtd_part *part)
+@@ -768,6 +769,12 @@ static void mtd_partition_split(struct m
+ if (rootfs_found)
+ return;
+
++ if (!strcmp(part->mtd.name, "rootfs")) {
++ run_parsers_by_type(part, MTD_PARSER_TYPE_ROOTFS);
++
++ rootfs_found = 1;
++ }
++
+ if (IS_ENABLED(CONFIG_MTD_SPLIT_FIRMWARE) &&
+ !strcmp(part->mtd.name, SPLIT_FIRMWARE_NAME) &&
+ !of_find_property(mtd_get_of_node(&part->mtd), "compatible", NULL))
+--- a/include/linux/mtd/partitions.h
++++ b/include/linux/mtd/partitions.h
+@@ -75,6 +75,8 @@ struct mtd_part_parser_data {
+
+ enum mtd_parser_type {
+ MTD_PARSER_TYPE_DEVICE = 0,
++ MTD_PARSER_TYPE_ROOTFS,
++ MTD_PARSER_TYPE_FIRMWARE,
+ };
+
+ struct mtd_part_parser {
diff --git a/target/linux/generic/pending-5.4/403-mtd-hook-mtdsplit-to-Kbuild.patch b/target/linux/generic/pending-5.4/403-mtd-hook-mtdsplit-to-Kbuild.patch
new file mode 100644
index 0000000000..60cf043e9b
--- /dev/null
+++ b/target/linux/generic/pending-5.4/403-mtd-hook-mtdsplit-to-Kbuild.patch
@@ -0,0 +1,32 @@
+From: Gabor Juhos <juhosg@openwrt.org>
+Subject: [PATCH] kernel/3.10: move squashfs check from rootfs split code into a separate file
+
+lede-commit: d89bea92b31b4e157a0fa438e75370f089f73427
+Signed-off-by: Gabor Juhos <juhosg@openwrt.org>
+---
+ drivers/mtd/Kconfig | 2 ++
+ drivers/mtd/Makefile | 2 ++
+ 2 files changed, 4 insertions(+)
+
+--- a/drivers/mtd/Kconfig
++++ b/drivers/mtd/Kconfig
+@@ -26,6 +26,8 @@ config MTD_SPLIT_FIRMWARE_NAME
+ depends on MTD_SPLIT_FIRMWARE
+ default "firmware"
+
++source "drivers/mtd/mtdsplit/Kconfig"
++
+ endmenu
+
+ config MTD_TESTS
+--- a/drivers/mtd/Makefile
++++ b/drivers/mtd/Makefile
+@@ -7,6 +7,8 @@
+ obj-$(CONFIG_MTD) += mtd.o
+ mtd-y := mtdcore.o mtdsuper.o mtdconcat.o mtdpart.o mtdchar.o
+
++obj-$(CONFIG_MTD_SPLIT) += mtdsplit/
++
+ obj-$(CONFIG_MTD_OF_PARTS) += ofpart.o
+ obj-$(CONFIG_MTD_REDBOOT_PARTS) += redboot.o
+ obj-$(CONFIG_MTD_CMDLINE_PARTS) += cmdlinepart.o
diff --git a/target/linux/generic/pending-5.4/404-mtd-add-more-helper-functions.patch b/target/linux/generic/pending-5.4/404-mtd-add-more-helper-functions.patch
new file mode 100644
index 0000000000..e404ec1fb2
--- /dev/null
+++ b/target/linux/generic/pending-5.4/404-mtd-add-more-helper-functions.patch
@@ -0,0 +1,76 @@
+From: Gabor Juhos <juhosg@openwrt.org>
+Subject: kernel/3.10: add separate rootfs partition parser
+
+lede-commit: daec7ad7688415156e2730e401503d09bd3acf91
+Signed-off-by: Gabor Juhos <juhosg@openwrt.org>
+---
+ drivers/mtd/mtdpart.c | 29 +++++++++++++++++++++++++++++
+ include/linux/mtd/mtd.h | 18 ++++++++++++++++++
+ include/linux/mtd/partitions.h | 2 ++
+ 3 files changed, 49 insertions(+)
+
+--- a/drivers/mtd/mtdpart.c
++++ b/drivers/mtd/mtdpart.c
+@@ -1179,6 +1179,24 @@ int mtd_is_partition(const struct mtd_in
+ }
+ EXPORT_SYMBOL_GPL(mtd_is_partition);
+
++struct mtd_info *mtdpart_get_master(const struct mtd_info *mtd)
++{
++ if (!mtd_is_partition(mtd))
++ return (struct mtd_info *)mtd;
++
++ return mtd_to_part(mtd)->parent;
++}
++EXPORT_SYMBOL_GPL(mtdpart_get_master);
++
++uint64_t mtdpart_get_offset(const struct mtd_info *mtd)
++{
++ if (!mtd_is_partition(mtd))
++ return 0;
++
++ return mtd_to_part(mtd)->offset;
++}
++EXPORT_SYMBOL_GPL(mtdpart_get_offset);
++
+ /* Returns the size of the entire flash chip */
+ uint64_t mtd_get_device_size(const struct mtd_info *mtd)
+ {
+--- a/include/linux/mtd/mtd.h
++++ b/include/linux/mtd/mtd.h
+@@ -507,6 +507,24 @@ static inline void mtd_align_erase_req(s
+ req->len += mtd->erasesize - mod;
+ }
+
++static inline uint64_t mtd_roundup_to_eb(uint64_t sz, struct mtd_info *mtd)
++{
++ if (mtd_mod_by_eb(sz, mtd) == 0)
++ return sz;
++
++ /* Round up to next erase block */
++ return (mtd_div_by_eb(sz, mtd) + 1) * mtd->erasesize;
++}
++
++static inline uint64_t mtd_rounddown_to_eb(uint64_t sz, struct mtd_info *mtd)
++{
++ if (mtd_mod_by_eb(sz, mtd) == 0)
++ return sz;
++
++ /* Round down to the start of the current erase block */
++ return (mtd_div_by_eb(sz, mtd)) * mtd->erasesize;
++}
++
+ static inline uint32_t mtd_div_by_ws(uint64_t sz, struct mtd_info *mtd)
+ {
+ if (mtd->writesize_shift)
+--- a/include/linux/mtd/partitions.h
++++ b/include/linux/mtd/partitions.h
+@@ -116,6 +116,8 @@ int mtd_is_partition(const struct mtd_in
+ int mtd_add_partition(struct mtd_info *master, const char *name,
+ long long offset, long long length);
+ int mtd_del_partition(struct mtd_info *master, int partno);
++struct mtd_info *mtdpart_get_master(const struct mtd_info *mtd);
++uint64_t mtdpart_get_offset(const struct mtd_info *mtd);
+ uint64_t mtd_get_device_size(const struct mtd_info *mtd);
+
+ #endif
diff --git a/target/linux/generic/pending-5.4/411-mtd-partial_eraseblock_write.patch b/target/linux/generic/pending-5.4/411-mtd-partial_eraseblock_write.patch
new file mode 100644
index 0000000000..bf9822fda8
--- /dev/null
+++ b/target/linux/generic/pending-5.4/411-mtd-partial_eraseblock_write.patch
@@ -0,0 +1,132 @@
+From: Felix Fietkau <nbd@nbd.name>
+Subject: mtd: implement write support for partitions covering only a part of an eraseblock (buffer data that would otherwise be erased)
+
+lede-commit: 87a8e8ac1067f58ba831c4aae443f3655c31cd80
+Signed-off-by: Felix Fietkau <nbd@nbd.name>
+---
+ drivers/mtd/mtdpart.c | 90 ++++++++++++++++++++++++++++++++++++++++++++-----
+ include/linux/mtd/mtd.h | 4 +++
+ 2 files changed, 85 insertions(+), 9 deletions(-)
+
+--- a/drivers/mtd/mtdpart.c
++++ b/drivers/mtd/mtdpart.c
+@@ -36,6 +36,8 @@
+ #include "mtdcore.h"
+ #include "mtdsplit/mtdsplit.h"
+
++#define MTD_ERASE_PARTIAL 0x8000 /* partition only covers parts of an erase block */
++
+ /* Our partition linked list */
+ static LIST_HEAD(mtd_partitions);
+ static DEFINE_MUTEX(mtd_partitions_mutex);
+@@ -220,6 +222,53 @@ static int part_erase(struct mtd_info *m
+ {
+ struct mtd_part *part = mtd_to_part(mtd);
+ int ret;
++ size_t wrlen = 0;
++ u8 *erase_buf = NULL;
++ u32 erase_buf_ofs = 0;
++ bool partial_start = false;
++
++ if (mtd->flags & MTD_ERASE_PARTIAL) {
++ size_t readlen = 0;
++ u64 mtd_ofs;
++
++ erase_buf = kmalloc(part->parent->erasesize, GFP_ATOMIC);
++ if (!erase_buf)
++ return -ENOMEM;
++
++ mtd_ofs = part->offset + instr->addr;
++ erase_buf_ofs = do_div(mtd_ofs, part->parent->erasesize);
++
++ if (erase_buf_ofs > 0) {
++ instr->addr -= erase_buf_ofs;
++ ret = mtd_read(part->parent,
++ instr->addr + part->offset,
++ part->parent->erasesize,
++ &readlen, erase_buf);
++
++ instr->len += erase_buf_ofs;
++ partial_start = true;
++ } else {
++ mtd_ofs = part->offset + part->mtd.size;
++ erase_buf_ofs = part->parent->erasesize -
++ do_div(mtd_ofs, part->parent->erasesize);
++
++ if (erase_buf_ofs > 0) {
++ instr->len += erase_buf_ofs;
++ ret = mtd_read(part->parent,
++ part->offset + instr->addr +
++ instr->len - part->parent->erasesize,
++ part->parent->erasesize, &readlen,
++ erase_buf);
++ } else {
++ ret = 0;
++ }
++ }
++ if (ret < 0) {
++ kfree(erase_buf);
++ return ret;
++ }
++
++ }
+
+ instr->addr += part->offset;
+ ret = part->parent->_erase(part->parent, instr);
+@@ -227,6 +276,24 @@ static int part_erase(struct mtd_info *m
+ instr->fail_addr -= part->offset;
+ instr->addr -= part->offset;
+
++ if (mtd->flags & MTD_ERASE_PARTIAL) {
++ if (partial_start) {
++ part->parent->_write(part->parent,
++ instr->addr, erase_buf_ofs,
++ &wrlen, erase_buf);
++ instr->addr += erase_buf_ofs;
++ } else {
++ instr->len -= erase_buf_ofs;
++ part->parent->_write(part->parent,
++ instr->addr + instr->len,
++ erase_buf_ofs, &wrlen,
++ erase_buf +
++ part->parent->erasesize -
++ erase_buf_ofs);
++ }
++ kfree(erase_buf);
++ }
++
+ return ret;
+ }
+
+@@ -539,19 +606,22 @@ static struct mtd_part *allocate_partiti
+ remainder = do_div(tmp, wr_alignment);
+ if ((slave->mtd.flags & MTD_WRITEABLE) && remainder) {
+ /* Doesn't start on a boundary of major erase size */
+- /* FIXME: Let it be writable if it is on a boundary of
+- * _minor_ erase size though */
+- slave->mtd.flags &= ~MTD_WRITEABLE;
+- printk(KERN_WARNING"mtd: partition \"%s\" doesn't start on an erase/write block boundary -- force read-only\n",
+- part->name);
++ slave->mtd.flags |= MTD_ERASE_PARTIAL;
++ if (((u32)slave->mtd.size) > parent->erasesize)
++ slave->mtd.flags &= ~MTD_WRITEABLE;
++ else
++ slave->mtd.erasesize = slave->mtd.size;
+ }
+
+- tmp = part_absolute_offset(parent) + slave->mtd.size;
++ tmp = part_absolute_offset(parent) + slave->offset + slave->mtd.size;
+ remainder = do_div(tmp, wr_alignment);
+ if ((slave->mtd.flags & MTD_WRITEABLE) && remainder) {
+- slave->mtd.flags &= ~MTD_WRITEABLE;
+- printk(KERN_WARNING"mtd: partition \"%s\" doesn't end on an erase/write block -- force read-only\n",
+- part->name);
++ slave->mtd.flags |= MTD_ERASE_PARTIAL;
++
++ if ((u32)slave->mtd.size > parent->erasesize)
++ slave->mtd.flags &= ~MTD_WRITEABLE;
++ else
++ slave->mtd.erasesize = slave->mtd.size;
+ }
+
+ mtd_set_ooblayout(&slave->mtd, &part_ooblayout_ops);
diff --git a/target/linux/generic/pending-5.4/412-mtd-partial_eraseblock_unlock.patch b/target/linux/generic/pending-5.4/412-mtd-partial_eraseblock_unlock.patch
new file mode 100644
index 0000000000..a54603a0f8
--- /dev/null
+++ b/target/linux/generic/pending-5.4/412-mtd-partial_eraseblock_unlock.patch
@@ -0,0 +1,40 @@
+From: Tim Harvey <tharvey@gateworks.com>
+Subject: mtd: allow partial block unlock
+
+This allows sysupgrade for devices such as the Gateworks Avila/Cambria
+product families based on the ixp4xx using the redboot bootloader with
+combined FIS directory and RedBoot config partitions on larger FLASH
+devices with larger eraseblocks.
+
+This second iteration of this patch addresses previous issues:
+- whitespace breakage fixed
+- unlock in all scenarios
+- simplification and fix logic bug
+
+[john@phrozen.org: this should be moved to the ixp4xx folder]
+
+Signed-off-by: Tim Harvey <tharvey@gateworks.com>
+---
+ drivers/mtd/mtdpart.c | 11 ++++++++++-
+ 1 file changed, 10 insertions(+), 1 deletion(-)
+
+--- a/drivers/mtd/mtdpart.c
++++ b/drivers/mtd/mtdpart.c
+@@ -306,7 +306,16 @@ static int part_lock(struct mtd_info *mt
+ static int part_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
+ {
+ struct mtd_part *part = mtd_to_part(mtd);
+- return part->parent->_unlock(part->parent, ofs + part->offset, len);
++
++ ofs += part->offset;
++
++ if (mtd->flags & MTD_ERASE_PARTIAL) {
++ /* round up len to next erasesize and round down offset to prev block */
++ len = (mtd_div_by_eb(len, part->parent) + 1) * part->parent->erasesize;
++ ofs &= ~(part->parent->erasesize - 1);
++ }
++
++ return part->parent->_unlock(part->parent, ofs, len);
+ }
+
+ static int part_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len)
diff --git a/target/linux/generic/pending-5.4/419-mtd-redboot-add-of_match_table-with-DT-binding.patch b/target/linux/generic/pending-5.4/419-mtd-redboot-add-of_match_table-with-DT-binding.patch
new file mode 100644
index 0000000000..fbf9a0553c
--- /dev/null
+++ b/target/linux/generic/pending-5.4/419-mtd-redboot-add-of_match_table-with-DT-binding.patch
@@ -0,0 +1,31 @@
+From: =?UTF-8?q?Rafa=C5=82=20Mi=C5=82ecki?= <rafal@milecki.pl>
+Subject: [PATCH] mtd: redboot: add of_match_table with DT binding
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+This allows parsing RedBoot compatible partitions for properly described
+flash device in DT.
+
+Signed-off-by: Rafał Miłecki <rafal@milecki.pl>
+---
+
+--- a/drivers/mtd/redboot.c
++++ b/drivers/mtd/redboot.c
+@@ -289,9 +289,16 @@ static int parse_redboot_partitions(stru
+ return ret;
+ }
+
++static const struct of_device_id redboot_parser_of_match_table[] = {
++ { .compatible = "ecoscentric,redboot-fis-partitions" },
++ {},
++};
++MODULE_DEVICE_TABLE(of, redboot_parser_of_match_table);
++
+ static struct mtd_part_parser redboot_parser = {
+ .parse_fn = parse_redboot_partitions,
+ .name = "RedBoot",
++ .of_match_table = redboot_parser_of_match_table,
+ };
+ module_mtd_part_parser(redboot_parser);
+
diff --git a/target/linux/generic/pending-5.4/420-mtd-redboot_space.patch b/target/linux/generic/pending-5.4/420-mtd-redboot_space.patch
new file mode 100644
index 0000000000..85fbe0512d
--- /dev/null
+++ b/target/linux/generic/pending-5.4/420-mtd-redboot_space.patch
@@ -0,0 +1,41 @@
+From: Felix Fietkau <nbd@nbd.name>
+Subject: add patch for including unpartitioned space in the rootfs partition for redboot devices (if applicable)
+
+[john@phrozen.org: used by ixp and others]
+
+lede-commit: 394918851f84e4d00fa16eb900e7700e95091f00
+Signed-off-by: Felix Fietkau <nbd@nbd.name>
+---
+ drivers/mtd/redboot.c | 19 +++++++++++++------
+ 1 file changed, 13 insertions(+), 6 deletions(-)
+
+--- a/drivers/mtd/redboot.c
++++ b/drivers/mtd/redboot.c
+@@ -265,14 +265,21 @@ static int parse_redboot_partitions(stru
+ #endif
+ names += strlen(names)+1;
+
+-#ifdef CONFIG_MTD_REDBOOT_PARTS_UNALLOCATED
+ if(fl->next && fl->img->flash_base + fl->img->size + master->erasesize <= fl->next->img->flash_base) {
+- i++;
+- parts[i].offset = parts[i-1].size + parts[i-1].offset;
+- parts[i].size = fl->next->img->flash_base - parts[i].offset;
+- parts[i].name = nullname;
+- }
++ if (!strcmp(parts[i].name, "rootfs")) {
++ parts[i].size = fl->next->img->flash_base;
++ parts[i].size &= ~(master->erasesize - 1);
++ parts[i].size -= parts[i].offset;
++#ifdef CONFIG_MTD_REDBOOT_PARTS_UNALLOCATED
++ nrparts--;
++ } else {
++ i++;
++ parts[i].offset = parts[i-1].size + parts[i-1].offset;
++ parts[i].size = fl->next->img->flash_base - parts[i].offset;
++ parts[i].name = nullname;
+ #endif
++ }
++ }
+ tmp_fl = fl;
+ fl = fl->next;
+ kfree(tmp_fl);
diff --git a/target/linux/generic/pending-5.4/430-mtd-add-myloader-partition-parser.patch b/target/linux/generic/pending-5.4/430-mtd-add-myloader-partition-parser.patch
new file mode 100644
index 0000000000..bd49893dd8
--- /dev/null
+++ b/target/linux/generic/pending-5.4/430-mtd-add-myloader-partition-parser.patch
@@ -0,0 +1,47 @@
+From: Florian Fainelli <f.fainelli@gmail.com>
+Subject: Add myloader partition table parser
+
+[john@phozen.org: shoud be upstreamable]
+
+lede-commit: d8bf22859b51faa09d22c056fe221a45d2f7a3b8
+Signed-off-by: Florian Fainelli <f.fainelli@gmail.com>
+---
+ drivers/mtd/Kconfig | 16 ++++++++++++++++
+ drivers/mtd/Makefile | 1 +
+ 2 files changed, 17 insertions(+)
+
+--- a/drivers/mtd/Kconfig
++++ b/drivers/mtd/Kconfig
+@@ -177,6 +177,22 @@ menu "Partition parsers"
+ source "drivers/mtd/parsers/Kconfig"
+ endmenu
+
++config MTD_MYLOADER_PARTS
++ tristate "MyLoader partition parsing"
++ depends on ADM5120 || ATH25 || ATH79
++ ---help---
++ MyLoader is a bootloader which allows the user to define partitions
++ in flash devices, by putting a table in the second erase block
++ on the device, similar to a partition table. This table gives the
++ offsets and lengths of the user defined partitions.
++
++ If you need code which can detect and parse these tables, and
++ register MTD 'partitions' corresponding to each image detected,
++ enable this option.
++
++ You will still need the parsing functions to be called by the driver
++ for your particular device. It won't happen automatically.
++
+ comment "User Modules And Translation Layers"
+
+ #
+--- a/drivers/mtd/Makefile
++++ b/drivers/mtd/Makefile
+@@ -16,6 +16,7 @@ obj-$(CONFIG_MTD_AFS_PARTS) += afs.o
+ obj-$(CONFIG_MTD_AR7_PARTS) += ar7part.o
+ obj-$(CONFIG_MTD_BCM63XX_PARTS) += bcm63xxpart.o
+ obj-$(CONFIG_MTD_BCM47XX_PARTS) += bcm47xxpart.o
++obj-$(CONFIG_MTD_MYLOADER_PARTS) += myloader.o
+ obj-y += parsers/
+
+ # 'Users' - code which presents functionality to userspace.
diff --git a/target/linux/generic/pending-5.4/431-mtd-bcm47xxpart-check-for-bad-blocks-when-calculatin.patch b/target/linux/generic/pending-5.4/431-mtd-bcm47xxpart-check-for-bad-blocks-when-calculatin.patch
new file mode 100644
index 0000000000..d54c284c34
--- /dev/null
+++ b/target/linux/generic/pending-5.4/431-mtd-bcm47xxpart-check-for-bad-blocks-when-calculatin.patch
@@ -0,0 +1,68 @@
+From: =?UTF-8?q?Rafa=C5=82=20Mi=C5=82ecki?= <zajec5@gmail.com>
+Subject: [PATCH] mtd: bcm47xxpart: check for bad blocks when calculating offsets
+
+Signed-off-by: Rafał Miłecki <zajec5@gmail.com>
+---
+
+--- a/drivers/mtd/parsers/parser_trx.c
++++ b/drivers/mtd/parsers/parser_trx.c
+@@ -29,6 +29,33 @@ struct trx_header {
+ uint32_t offset[3];
+ } __packed;
+
++/*
++ * Calculate real end offset (address) for a given amount of data. It checks
++ * all blocks skipping bad ones.
++ */
++static size_t parser_trx_real_offset(struct mtd_info *mtd, size_t bytes)
++{
++ size_t real_offset = 0;
++
++ if (mtd_block_isbad(mtd, real_offset))
++ pr_warn("Base offset shouldn't be at bad block");
++
++ while (bytes >= mtd->erasesize) {
++ bytes -= mtd->erasesize;
++ real_offset += mtd->erasesize;
++ while (mtd_block_isbad(mtd, real_offset)) {
++ real_offset += mtd->erasesize;
++
++ if (real_offset >= mtd->size)
++ return real_offset - mtd->erasesize;
++ }
++ }
++
++ real_offset += bytes;
++
++ return real_offset;
++}
++
+ static const char *parser_trx_data_part_name(struct mtd_info *master,
+ size_t offset)
+ {
+@@ -83,21 +110,21 @@ static int parser_trx_parse(struct mtd_i
+ if (trx.offset[2]) {
+ part = &parts[curr_part++];
+ part->name = "loader";
+- part->offset = trx.offset[i];
++ part->offset = parser_trx_real_offset(mtd, trx.offset[i]);
+ i++;
+ }
+
+ if (trx.offset[i]) {
+ part = &parts[curr_part++];
+ part->name = "linux";
+- part->offset = trx.offset[i];
++ part->offset = parser_trx_real_offset(mtd, trx.offset[i]);
+ i++;
+ }
+
+ if (trx.offset[i]) {
+ part = &parts[curr_part++];
+- part->name = parser_trx_data_part_name(mtd, trx.offset[i]);
+- part->offset = trx.offset[i];
++ part->offset = parser_trx_real_offset(mtd, trx.offset[i]);
++ part->name = parser_trx_data_part_name(mtd, part->offset);
+ i++;
+ }
+
diff --git a/target/linux/generic/pending-5.4/432-mtd-bcm47xxpart-detect-T_Meter-partition.patch b/target/linux/generic/pending-5.4/432-mtd-bcm47xxpart-detect-T_Meter-partition.patch
new file mode 100644
index 0000000000..a6d0828b99
--- /dev/null
+++ b/target/linux/generic/pending-5.4/432-mtd-bcm47xxpart-detect-T_Meter-partition.patch
@@ -0,0 +1,37 @@
+From: =?UTF-8?q?Rafa=C5=82=20Mi=C5=82ecki?= <zajec5@gmail.com>
+Subject: mtd: bcm47xxpart: detect T_Meter partition
+
+It can be found on many Netgear devices. It consists of many 0x30 blocks
+starting with 4D 54.
+
+Signed-off-by: Rafał Miłecki <zajec5@gmail.com>
+---
+ drivers/mtd/bcm47xxpart.c | 10 ++++++++++
+ 1 file changed, 10 insertions(+)
+
+--- a/drivers/mtd/bcm47xxpart.c
++++ b/drivers/mtd/bcm47xxpart.c
+@@ -39,6 +39,7 @@
+ #define NVRAM_HEADER 0x48534C46 /* FLSH */
+ #define POT_MAGIC1 0x54544f50 /* POTT */
+ #define POT_MAGIC2 0x504f /* OP */
++#define T_METER_MAGIC 0x4D540000 /* MT */
+ #define ML_MAGIC1 0x39685a42
+ #define ML_MAGIC2 0x26594131
+ #define TRX_MAGIC 0x30524448
+@@ -182,6 +183,15 @@ static int bcm47xxpart_parse(struct mtd_
+ MTD_WRITEABLE);
+ continue;
+ }
++
++ /* T_Meter */
++ if ((le32_to_cpu(buf[0x000 / 4]) & 0xFFFF0000) == T_METER_MAGIC &&
++ (le32_to_cpu(buf[0x030 / 4]) & 0xFFFF0000) == T_METER_MAGIC &&
++ (le32_to_cpu(buf[0x060 / 4]) & 0xFFFF0000) == T_METER_MAGIC) {
++ bcm47xxpart_add_part(&parts[curr_part++], "T_Meter", offset,
++ MTD_WRITEABLE);
++ continue;
++ }
+
+ /* TRX */
+ if (buf[0x000 / 4] == TRX_MAGIC) {
diff --git a/target/linux/generic/pending-5.4/440-block2mtd_init.patch b/target/linux/generic/pending-5.4/440-block2mtd_init.patch
new file mode 100644
index 0000000000..b8ec11f3dd
--- /dev/null
+++ b/target/linux/generic/pending-5.4/440-block2mtd_init.patch
@@ -0,0 +1,116 @@
+From: Felix Fietkau <nbd@nbd.name>
+Subject: block2mtd
+
+Signed-off-by: Felix Fietkau <nbd@nbd.name>
+---
+ drivers/mtd/devices/block2mtd.c | 30 ++++++++++++++++++++----------
+ 1 file changed, 20 insertions(+), 10 deletions(-)
+
+--- a/drivers/mtd/devices/block2mtd.c
++++ b/drivers/mtd/devices/block2mtd.c
+@@ -26,6 +26,7 @@
+ #include <linux/list.h>
+ #include <linux/init.h>
+ #include <linux/mtd/mtd.h>
++#include <linux/mtd/partitions.h>
+ #include <linux/mutex.h>
+ #include <linux/mount.h>
+ #include <linux/slab.h>
+@@ -214,7 +215,7 @@ static void block2mtd_free_device(struct
+
+
+ static struct block2mtd_dev *add_device(char *devname, int erase_size,
+- int timeout)
++ const char *mtdname, int timeout)
+ {
+ #ifndef MODULE
+ int i;
+@@ -222,6 +223,7 @@ static struct block2mtd_dev *add_device(
+ const fmode_t mode = FMODE_READ | FMODE_WRITE | FMODE_EXCL;
+ struct block_device *bdev;
+ struct block2mtd_dev *dev;
++ struct mtd_partition *part;
+ char *name;
+
+ if (!devname)
+@@ -278,13 +280,16 @@ static struct block2mtd_dev *add_device(
+
+ /* Setup the MTD structure */
+ /* make the name contain the block device in */
+- name = kasprintf(GFP_KERNEL, "block2mtd: %s", devname);
++ if (!mtdname)
++ mtdname = devname;
++ name = kmalloc(strlen(mtdname) + 1, GFP_KERNEL);
+ if (!name)
+ goto err_destroy_mutex;
+
++ strcpy(name, mtdname);
+ dev->mtd.name = name;
+
+- dev->mtd.size = dev->blkdev->bd_inode->i_size & PAGE_MASK;
++ dev->mtd.size = dev->blkdev->bd_inode->i_size & PAGE_MASK & ~(erase_size - 1);
+ dev->mtd.erasesize = erase_size;
+ dev->mtd.writesize = 1;
+ dev->mtd.writebufsize = PAGE_SIZE;
+@@ -297,7 +302,11 @@ static struct block2mtd_dev *add_device(
+ dev->mtd.priv = dev;
+ dev->mtd.owner = THIS_MODULE;
+
+- if (mtd_device_register(&dev->mtd, NULL, 0)) {
++ part = kzalloc(sizeof(struct mtd_partition), GFP_KERNEL);
++ part->name = name;
++ part->offset = 0;
++ part->size = dev->mtd.size;
++ if (mtd_device_register(&dev->mtd, part, 1)) {
+ /* Device didn't get added, so free the entry */
+ goto err_destroy_mutex;
+ }
+@@ -305,8 +314,7 @@ static struct block2mtd_dev *add_device(
+ list_add(&dev->list, &blkmtd_device_list);
+ pr_info("mtd%d: [%s] erase_size = %dKiB [%d]\n",
+ dev->mtd.index,
+- dev->mtd.name + strlen("block2mtd: "),
+- dev->mtd.erasesize >> 10, dev->mtd.erasesize);
++ mtdname, dev->mtd.erasesize >> 10, dev->mtd.erasesize);
+ return dev;
+
+ err_destroy_mutex:
+@@ -379,7 +387,7 @@ static int block2mtd_setup2(const char *
+ /* 80 for device, 12 for erase size, 80 for name, 8 for timeout */
+ char buf[80 + 12 + 80 + 8];
+ char *str = buf;
+- char *token[2];
++ char *token[3];
+ char *name;
+ size_t erase_size = PAGE_SIZE;
+ unsigned long timeout = MTD_DEFAULT_TIMEOUT;
+@@ -393,7 +401,7 @@ static int block2mtd_setup2(const char *
+ strcpy(str, val);
+ kill_final_newline(str);
+
+- for (i = 0; i < 2; i++)
++ for (i = 0; i < 3; i++)
+ token[i] = strsep(&str, ",");
+
+ if (str) {
+@@ -419,8 +427,10 @@ static int block2mtd_setup2(const char *
+ return 0;
+ }
+ }
++ if (token[2] && (strlen(token[2]) + 1 > 80))
++ pr_err("mtd device name too long\n");
+
+- add_device(name, erase_size, timeout);
++ add_device(name, erase_size, token[2], timeout);
+
+ return 0;
+ }
+@@ -454,7 +464,7 @@ static int block2mtd_setup(const char *v
+
+
+ module_param_call(block2mtd, block2mtd_setup, NULL, NULL, 0200);
+-MODULE_PARM_DESC(block2mtd, "Device to use. \"block2mtd=<dev>[,<erasesize>]\"");
++MODULE_PARM_DESC(block2mtd, "Device to use. \"block2mtd=<dev>[,<erasesize>[,<name>]]\"");
+
+ static int __init block2mtd_init(void)
+ {
diff --git a/target/linux/generic/pending-5.4/441-block2mtd_probe.patch b/target/linux/generic/pending-5.4/441-block2mtd_probe.patch
new file mode 100644
index 0000000000..7e974e01ed
--- /dev/null
+++ b/target/linux/generic/pending-5.4/441-block2mtd_probe.patch
@@ -0,0 +1,47 @@
+From: Felix Fietkau <nbd@nbd.name>
+Subject: block2mtd
+
+Signed-off-by: Felix Fietkau <nbd@nbd.name>
+---
+ drivers/mtd/devices/block2mtd.c | 9 ++++++---
+ 1 file changed, 6 insertions(+), 3 deletions(-)
+
+--- a/drivers/mtd/devices/block2mtd.c
++++ b/drivers/mtd/devices/block2mtd.c
+@@ -387,7 +387,7 @@ static int block2mtd_setup2(const char *
+ /* 80 for device, 12 for erase size, 80 for name, 8 for timeout */
+ char buf[80 + 12 + 80 + 8];
+ char *str = buf;
+- char *token[3];
++ char *token[4];
+ char *name;
+ size_t erase_size = PAGE_SIZE;
+ unsigned long timeout = MTD_DEFAULT_TIMEOUT;
+@@ -401,7 +401,7 @@ static int block2mtd_setup2(const char *
+ strcpy(str, val);
+ kill_final_newline(str);
+
+- for (i = 0; i < 3; i++)
++ for (i = 0; i < 4; i++)
+ token[i] = strsep(&str, ",");
+
+ if (str) {
+@@ -430,6 +430,9 @@ static int block2mtd_setup2(const char *
+ if (token[2] && (strlen(token[2]) + 1 > 80))
+ pr_err("mtd device name too long\n");
+
++ if (token[3] && kstrtoul(token[3], 0, &timeout))
++ pr_err("invalid timeout\n");
++
+ add_device(name, erase_size, token[2], timeout);
+
+ return 0;
+@@ -464,7 +467,7 @@ static int block2mtd_setup(const char *v
+
+
+ module_param_call(block2mtd, block2mtd_setup, NULL, NULL, 0200);
+-MODULE_PARM_DESC(block2mtd, "Device to use. \"block2mtd=<dev>[,<erasesize>[,<name>]]\"");
++MODULE_PARM_DESC(block2mtd, "Device to use. \"block2mtd=<dev>[,<erasesize>[,<name>[,<timeout>]]]\"");
+
+ static int __init block2mtd_init(void)
+ {
diff --git a/target/linux/generic/pending-5.4/450-mtd-spi-nor-allow-NOR-driver-to-write-fewer-bytes-th.patch b/target/linux/generic/pending-5.4/450-mtd-spi-nor-allow-NOR-driver-to-write-fewer-bytes-th.patch
new file mode 100644
index 0000000000..6981c6d943
--- /dev/null
+++ b/target/linux/generic/pending-5.4/450-mtd-spi-nor-allow-NOR-driver-to-write-fewer-bytes-th.patch
@@ -0,0 +1,36 @@
+From: Felix Fietkau <nbd@nbd.name>
+Date: Thu, 22 Feb 2018 11:11:57 +0100
+Subject: [PATCH] mtd: spi-nor: allow NOR driver to write fewer bytes than
+ requested
+
+The write size can be constrained by the maximum message/transfer size
+of the SPI controller. Only check for ret = 0 to avoid an infinite loop.
+
+Signed-off-by: Felix Fietkau <nbd@nbd.name>
+---
+
+--- a/drivers/mtd/spi-nor/spi-nor.c
++++ b/drivers/mtd/spi-nor/spi-nor.c
+@@ -1457,7 +1457,7 @@ static int spi_nor_write(struct mtd_info
+
+ write_enable(nor);
+ ret = nor->write(nor, addr, page_remain, buf + i);
+- if (ret < 0)
++ if (ret <= 0)
+ goto write_err;
+ written = ret;
+
+@@ -1466,13 +1466,6 @@ static int spi_nor_write(struct mtd_info
+ goto write_err;
+ *retlen += written;
+ i += written;
+- if (written != page_remain) {
+- dev_err(nor->dev,
+- "While writing %zu bytes written %zd bytes\n",
+- page_remain, written);
+- ret = -EIO;
+- goto write_err;
+- }
+ }
+
+ write_err:
diff --git a/target/linux/generic/pending-5.4/460-mtd-cfi_cmdset_0002-no-erase_suspend.patch b/target/linux/generic/pending-5.4/460-mtd-cfi_cmdset_0002-no-erase_suspend.patch
new file mode 100644
index 0000000000..88d386302a
--- /dev/null
+++ b/target/linux/generic/pending-5.4/460-mtd-cfi_cmdset_0002-no-erase_suspend.patch
@@ -0,0 +1,25 @@
+From: Felix Fietkau <nbd@nbd.name>
+Subject: kernel: disable cfi cmdset 0002 erase suspend
+
+on some platforms, erase suspend leads to data corruption and lockups when write
+ops collide with erase ops. this has been observed on the buffalo wzr-hp-g300nh.
+rather than play whack-a-mole with a hard to reproduce issue on a variety of devices,
+simply disable erase suspend, as it will usually not produce any useful gain on
+the small filesystems used on embedded hardware.
+
+Signed-off-by: Felix Fietkau <nbd@nbd.name>
+---
+ drivers/mtd/chips/cfi_cmdset_0002.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/mtd/chips/cfi_cmdset_0002.c
++++ b/drivers/mtd/chips/cfi_cmdset_0002.c
+@@ -812,7 +812,7 @@ static int get_chip(struct map_info *map
+ return 0;
+
+ case FL_ERASING:
+- if (!cfip || !(cfip->EraseSuspend & (0x1|0x2)) ||
++ if (1 /* no suspend */ || !cfip || !(cfip->EraseSuspend & (0x1|0x2)) ||
+ !(mode == FL_READY || mode == FL_POINT ||
+ (mode == FL_WRITING && (cfip->EraseSuspend & 0x2))))
+ goto sleep;
diff --git a/target/linux/generic/pending-5.4/461-mtd-cfi_cmdset_0002-add-buffer-write-cmd-timeout.patch b/target/linux/generic/pending-5.4/461-mtd-cfi_cmdset_0002-add-buffer-write-cmd-timeout.patch
new file mode 100644
index 0000000000..1b07791eca
--- /dev/null
+++ b/target/linux/generic/pending-5.4/461-mtd-cfi_cmdset_0002-add-buffer-write-cmd-timeout.patch
@@ -0,0 +1,17 @@
+From: George Kashperko <george@znau.edu.ua>
+Subject: Issue map read after Write Buffer Load command to ensure chip is ready to receive data.
+
+Signed-off-by: George Kashperko <george@znau.edu.ua>
+---
+ drivers/mtd/chips/cfi_cmdset_0002.c | 1 +
+ 1 file changed, 1 insertion(+)
+--- a/drivers/mtd/chips/cfi_cmdset_0002.c
++++ b/drivers/mtd/chips/cfi_cmdset_0002.c
+@@ -1838,6 +1838,7 @@ static int __xipram do_write_buffer(stru
+
+ /* Write Buffer Load */
+ map_write(map, CMD(0x25), cmd_adr);
++ (void) map_read(map, cmd_adr);
+
+ chip->state = FL_WRITING_TO_BUFFER;
+
diff --git a/target/linux/generic/pending-5.4/465-m25p80-mx-disable-software-protection.patch b/target/linux/generic/pending-5.4/465-m25p80-mx-disable-software-protection.patch
new file mode 100644
index 0000000000..0c5d6cfa63
--- /dev/null
+++ b/target/linux/generic/pending-5.4/465-m25p80-mx-disable-software-protection.patch
@@ -0,0 +1,18 @@
+From: Felix Fietkau <nbd@nbd.name>
+Subject: Disable software protection bits for Macronix flashes.
+
+Signed-off-by: Felix Fietkau <nbd@nbd.name>
+---
+ drivers/mtd/spi-nor/spi-nor.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/drivers/mtd/spi-nor/spi-nor.c
++++ b/drivers/mtd/spi-nor/spi-nor.c
+@@ -2735,6 +2735,7 @@ static int spi_nor_init(struct spi_nor *
+ */
+ if (JEDEC_MFR(nor->info) == SNOR_MFR_ATMEL ||
+ JEDEC_MFR(nor->info) == SNOR_MFR_INTEL ||
++ JEDEC_MFR(nor->info) == SNOR_MFR_MACRONIX ||
+ JEDEC_MFR(nor->info) == SNOR_MFR_SST ||
+ nor->info->flags & SPI_NOR_HAS_LOCK) {
+ write_enable(nor);
diff --git a/target/linux/generic/pending-5.4/466-Revert-mtd-spi-nor-fix-Spansion-regressions-aliased-.patch b/target/linux/generic/pending-5.4/466-Revert-mtd-spi-nor-fix-Spansion-regressions-aliased-.patch
new file mode 100644
index 0000000000..fb6ae7df7b
--- /dev/null
+++ b/target/linux/generic/pending-5.4/466-Revert-mtd-spi-nor-fix-Spansion-regressions-aliased-.patch
@@ -0,0 +1,37 @@
+From: Matthias Schiffer <mschiffer@universe-factory.net>
+Date: Tue, 9 Jan 2018 20:41:48 +0100
+Subject: [PATCH] Revert "mtd: spi-nor: fix Spansion regressions (aliased with
+ Winbond)"
+
+This reverts commit 67b9bcd36906e12a15ffec19463afbbd6a41660e.
+
+The underlying issue breaking Spansion flash has been fixed with "mtd: spi-nor:
+wait until lock/unlock operations are ready" and "mtd: spi-nor: wait for SR_WIP
+to clear on initial unlock", so we can support unlocking for Winbond flash
+again.
+
+Signed-off-by: Matthias Schiffer <mschiffer@universe-factory.net>
+---
+ drivers/mtd/spi-nor/spi-nor.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+--- a/drivers/mtd/spi-nor/spi-nor.c
++++ b/drivers/mtd/spi-nor/spi-nor.c
+@@ -2737,6 +2737,7 @@ static int spi_nor_init(struct spi_nor *
+ JEDEC_MFR(nor->info) == SNOR_MFR_INTEL ||
+ JEDEC_MFR(nor->info) == SNOR_MFR_MACRONIX ||
+ JEDEC_MFR(nor->info) == SNOR_MFR_SST ||
++ JEDEC_MFR(nor->info) == SNOR_MFR_WINBOND ||
+ nor->info->flags & SPI_NOR_HAS_LOCK) {
+ write_enable(nor);
+ write_sr(nor, 0);
+@@ -2873,7 +2874,8 @@ int spi_nor_scan(struct spi_nor *nor, co
+
+ /* NOR protection support for STmicro/Micron chips and similar */
+ if (JEDEC_MFR(info) == SNOR_MFR_MICRON ||
+- info->flags & SPI_NOR_HAS_LOCK) {
++ JEDEC_MFR(info) == SNOR_MFR_WINBOND ||
++ info->flags & SPI_NOR_HAS_LOCK) {
+ nor->flash_lock = stm_lock;
+ nor->flash_unlock = stm_unlock;
+ nor->flash_is_locked = stm_is_locked;
diff --git a/target/linux/generic/pending-5.4/470-mtd-spi-nor-support-limiting-4K-sectors-support-base.patch b/target/linux/generic/pending-5.4/470-mtd-spi-nor-support-limiting-4K-sectors-support-base.patch
new file mode 100644
index 0000000000..12d785856a
--- /dev/null
+++ b/target/linux/generic/pending-5.4/470-mtd-spi-nor-support-limiting-4K-sectors-support-base.patch
@@ -0,0 +1,56 @@
+From: Felix Fietkau <nbd@nbd.name>
+Date: Sat, 4 Nov 2017 07:40:23 +0100
+Subject: [PATCH] mtd: spi-nor: support limiting 4K sectors support based on
+ flash size
+
+Some devices need 4K sectors to be able to deal with small flash chips.
+For instance, w25x05 is 64 KiB in size, and without 4K sectors, the
+entire chip is just one erase block.
+On bigger flash chip sizes, using 4K sectors can significantly slow down
+many operations, including using a writable filesystem. There are several
+platforms where it makes sense to use a single kernel on both kinds of
+devices.
+
+To support this properly, allow configuring an upper flash chip size
+limit for 4K sectors support.
+
+Signed-off-by: Felix Fietkau <nbd@nbd.name>
+---
+
+--- a/drivers/mtd/spi-nor/Kconfig
++++ b/drivers/mtd/spi-nor/Kconfig
+@@ -39,6 +39,17 @@ config SPI_ASPEED_SMC
+ and support for the SPI flash memory controller (SPI) for
+ the host firmware. The implementation only supports SPI NOR.
+
++config MTD_SPI_NOR_USE_4K_SECTORS_LIMIT
++ int "Maximum flash chip size to use 4K sectors on (in KiB)"
++ depends on MTD_SPI_NOR_USE_4K_SECTORS
++ default "4096"
++ help
++ There are many flash chips that support 4K sectors, but are so large
++ that using them significantly slows down writing large amounts of
++ data or using a writable filesystem.
++ Any flash chip larger than the size specified in this option will
++ not use 4K sectors.
++
+ config SPI_ATMEL_QUADSPI
+ tristate "Atmel Quad SPI Controller"
+ depends on ARCH_AT91 || (ARM && COMPILE_TEST && !ARCH_EBSA110)
+--- a/drivers/mtd/spi-nor/spi-nor.c
++++ b/drivers/mtd/spi-nor/spi-nor.c
+@@ -2649,10 +2649,12 @@ static int spi_nor_select_erase(struct s
+
+ #ifdef CONFIG_MTD_SPI_NOR_USE_4K_SECTORS
+ /* prefer "small sector" erase if possible */
+- if (info->flags & SECT_4K) {
++ if ((info->flags & SECT_4K) && (mtd->size <=
++ CONFIG_MTD_SPI_NOR_USE_4K_SECTORS_LIMIT * 1024)) {
+ nor->erase_opcode = SPINOR_OP_BE_4K;
+ mtd->erasesize = 4096;
+- } else if (info->flags & SECT_4K_PMC) {
++ } else if ((info->flags & SECT_4K_PMC) && (mtd->size <=
++ CONFIG_MTD_SPI_NOR_USE_4K_SECTORS_LIMIT * 1024)) {
+ nor->erase_opcode = SPINOR_OP_BE_4K_PMC;
+ mtd->erasesize = 4096;
+ } else
diff --git a/target/linux/generic/pending-5.4/475-mtd-spi-nor-Add-Winbond-w25q128jv-support.patch b/target/linux/generic/pending-5.4/475-mtd-spi-nor-Add-Winbond-w25q128jv-support.patch
new file mode 100644
index 0000000000..72c4470332
--- /dev/null
+++ b/target/linux/generic/pending-5.4/475-mtd-spi-nor-Add-Winbond-w25q128jv-support.patch
@@ -0,0 +1,34 @@
+From: Robert Marko <robimarko@gmail.com>
+To: linux-mtd@lists.infradead.org
+Subject: mtd: spi-nor: Add Winbond w25q128jv support
+Date: Mon, 25 Jun 2018 13:17:48 +0200
+
+Datasheet:
+http://www.winbond.com/resource-files/w25q128jv%20revf%2003272018%20plus.pdf
+
+Testing done on Mikrotik Routerboard wAP R board.
+It does not support Dual or Quad modes.
+
+Signed-off-by: Robert Marko <robimarko@gmail.com>
+---
+
+Changes in v2:
+ - Correct the title
+---
+ drivers/mtd/spi-nor/spi-nor.c | 5 +++++
+ 1 file changed, 5 insertions(+)
+
+--- a/drivers/mtd/spi-nor/spi-nor.c
++++ b/drivers/mtd/spi-nor/spi-nor.c
+@@ -1241,6 +1241,11 @@ static const struct flash_info spi_nor_i
+ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
+ SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
+ },
++ {
++ "w25q128jv", INFO(0xef7018, 0, 64 * 1024, 256,
++ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
++ SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
++ },
+ { "w25q80", INFO(0xef5014, 0, 64 * 1024, 16, SECT_4K) },
+ { "w25q80bl", INFO(0xef4014, 0, 64 * 1024, 16, SECT_4K) },
+ { "w25q128", INFO(0xef4018, 0, 64 * 1024, 256, SECT_4K) },
diff --git a/target/linux/generic/pending-5.4/476-mtd-spi-nor-add-eon-en25q128.patch b/target/linux/generic/pending-5.4/476-mtd-spi-nor-add-eon-en25q128.patch
new file mode 100644
index 0000000000..69ac17ce4c
--- /dev/null
+++ b/target/linux/generic/pending-5.4/476-mtd-spi-nor-add-eon-en25q128.patch
@@ -0,0 +1,18 @@
+From: Piotr Dymacz <pepe2k@gmail.com>
+Subject: kernel/mtd: add support for EON EN25Q128
+
+Signed-off-by: Piotr Dymacz <pepe2k@gmail.com>
+---
+ drivers/mtd/spi-nor/spi-nor.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/drivers/mtd/spi-nor/spi-nor.c
++++ b/drivers/mtd/spi-nor/spi-nor.c
+@@ -994,6 +994,7 @@ static const struct flash_info spi_nor_i
+ { "en25q32b", INFO(0x1c3016, 0, 64 * 1024, 64, 0) },
+ { "en25p64", INFO(0x1c2017, 0, 64 * 1024, 128, 0) },
+ { "en25q64", INFO(0x1c3017, 0, 64 * 1024, 128, SECT_4K) },
++ { "en25q128", INFO(0x1c3018, 0, 64 * 1024, 256, SECT_4K) },
+ { "en25qh32", INFO(0x1c7016, 0, 64 * 1024, 64, 0) },
+ { "en25qh128", INFO(0x1c7018, 0, 64 * 1024, 256, 0) },
+ { "en25qh256", INFO(0x1c7019, 0, 64 * 1024, 512, 0) },
diff --git a/target/linux/generic/pending-5.4/477-mtd-add-spi-nor-add-mx25u3235f.patch b/target/linux/generic/pending-5.4/477-mtd-add-spi-nor-add-mx25u3235f.patch
new file mode 100644
index 0000000000..0648b12e12
--- /dev/null
+++ b/target/linux/generic/pending-5.4/477-mtd-add-spi-nor-add-mx25u3235f.patch
@@ -0,0 +1,18 @@
+From: André Valentin <avalentin@marcant.net>
+Subject: linux/mtd: add id for mx25u3235f needed by ZyXEL NBG6817
+
+Signed-off-by: André Valentin <avalentin@marcant.net>
+---
+ drivers/mtd/spi-nor/spi-nor.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/drivers/mtd/spi-nor/spi-nor.c
++++ b/drivers/mtd/spi-nor/spi-nor.c
+@@ -1083,6 +1083,7 @@ static const struct flash_info spi_nor_i
+ { "mx25l3205d", INFO(0xc22016, 0, 64 * 1024, 64, SECT_4K) },
+ { "mx25l3255e", INFO(0xc29e16, 0, 64 * 1024, 64, SECT_4K) },
+ { "mx25l6405d", INFO(0xc22017, 0, 64 * 1024, 128, SECT_4K) },
++ { "mx25u3235f", INFO(0xc22536, 0, 64 * 1024, 64, 0) },
+ { "mx25u2033e", INFO(0xc22532, 0, 64 * 1024, 4, SECT_4K) },
+ { "mx25u4035", INFO(0xc22533, 0, 64 * 1024, 8, SECT_4K) },
+ { "mx25u8035", INFO(0xc22534, 0, 64 * 1024, 16, SECT_4K) },
diff --git a/target/linux/generic/pending-5.4/479-mtd-spi-nor-add-eon-en25qh64.patch b/target/linux/generic/pending-5.4/479-mtd-spi-nor-add-eon-en25qh64.patch
new file mode 100644
index 0000000000..6bb7754733
--- /dev/null
+++ b/target/linux/generic/pending-5.4/479-mtd-spi-nor-add-eon-en25qh64.patch
@@ -0,0 +1,10 @@
+--- a/drivers/mtd/spi-nor/spi-nor.c
++++ b/drivers/mtd/spi-nor/spi-nor.c
+@@ -996,6 +996,7 @@ static const struct flash_info spi_nor_i
+ { "en25q64", INFO(0x1c3017, 0, 64 * 1024, 128, SECT_4K) },
+ { "en25q128", INFO(0x1c3018, 0, 64 * 1024, 256, SECT_4K) },
+ { "en25qh32", INFO(0x1c7016, 0, 64 * 1024, 64, 0) },
++ { "en25qh64", INFO(0x1c7017, 0, 64 * 1024, 128, 0) },
+ { "en25qh128", INFO(0x1c7018, 0, 64 * 1024, 256, 0) },
+ { "en25qh256", INFO(0x1c7019, 0, 64 * 1024, 512, 0) },
+ { "en25s64", INFO(0x1c3817, 0, 64 * 1024, 128, SECT_4K) },
diff --git a/target/linux/generic/pending-5.4/479-mtd-spi-nor-add-xtx-xt25f128b.patch b/target/linux/generic/pending-5.4/479-mtd-spi-nor-add-xtx-xt25f128b.patch
new file mode 100644
index 0000000000..664837928b
--- /dev/null
+++ b/target/linux/generic/pending-5.4/479-mtd-spi-nor-add-xtx-xt25f128b.patch
@@ -0,0 +1,42 @@
+From patchwork Thu Feb 6 17:19:41 2020
+Content-Type: text/plain; charset="utf-8"
+MIME-Version: 1.0
+Content-Transfer-Encoding: 7bit
+X-Patchwork-Submitter: Daniel Golle <daniel@makrotopia.org>
+X-Patchwork-Id: 1234465
+Date: Thu, 6 Feb 2020 19:19:41 +0200
+From: Daniel Golle <daniel@makrotopia.org>
+To: linux-mtd@lists.infradead.org
+Subject: [PATCH v2] mtd: spi-nor: Add support for xt25f128b chip
+Message-ID: <20200206171941.GA2398@makrotopia.org>
+MIME-Version: 1.0
+Content-Disposition: inline
+List-Subscribe: <http://lists.infradead.org/mailman/listinfo/linux-mtd>,
+ <mailto:linux-mtd-request@lists.infradead.org?subject=subscribe>
+Cc: Eitan Cohen <eitan@neot-semadar.com>, Piotr Dymacz <pepe2k@gmail.com>,
+ Tudor Ambarus <tudor.ambarus@microchip.com>
+Sender: "linux-mtd" <linux-mtd-bounces@lists.infradead.org>
+Errors-To: linux-mtd-bounces+incoming=patchwork.ozlabs.org@lists.infradead.org
+
+Add XT25F128B made by XTX Technology (Shenzhen) Limited.
+This chip supports dual and quad read and uniform 4K-byte erase.
+Verified on Teltonika RUT955 which comes with XT25F128B in recent
+versions of the device.
+
+Signed-off-by: Daniel Golle <daniel@makrotopia.org>
+---
+ drivers/mtd/spi-nor/spi-nor.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+--- a/drivers/mtd/spi-nor/spi-nor.c
++++ b/drivers/mtd/spi-nor/spi-nor.c
+@@ -1273,6 +1273,9 @@ static const struct flash_info spi_nor_i
+ /* XMC (Wuhan Xinxin Semiconductor Manufacturing Corp.) */
+ { "XM25QH64A", INFO(0x207017, 0, 64 * 1024, 128, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+ { "XM25QH128A", INFO(0x207018, 0, 64 * 1024, 256, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
++
++ /* XTX Technology (Shenzhen) Limited */
++ { "xt25f128b", INFO(0x0B4018, 0, 64 * 1024, 256, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+ { },
+ };
+
diff --git a/target/linux/generic/pending-5.4/480-mtd-set-rootfs-to-be-root-dev.patch b/target/linux/generic/pending-5.4/480-mtd-set-rootfs-to-be-root-dev.patch
new file mode 100644
index 0000000000..dce0432fac
--- /dev/null
+++ b/target/linux/generic/pending-5.4/480-mtd-set-rootfs-to-be-root-dev.patch
@@ -0,0 +1,38 @@
+From: Gabor Juhos <juhosg@openwrt.org>
+Subject: kernel/3.1[02]: move MTD root device setup code to mtdcore
+
+The current code only allows to automatically set
+root device on MTD partitions. Move the code to MTD
+core to allow to use it with all MTD devices.
+
+Signed-off-by: Gabor Juhos <juhosg@openwrt.org>
+---
+ drivers/mtd/mtdcore.c | 10 ++++++++++
+ 1 file changed, 10 insertions(+)
+
+--- a/drivers/mtd/mtdcore.c
++++ b/drivers/mtd/mtdcore.c
+@@ -41,6 +41,7 @@
+ #include <linux/reboot.h>
+ #include <linux/leds.h>
+ #include <linux/debugfs.h>
++#include <linux/root_dev.h>
+
+ #include <linux/mtd/mtd.h>
+ #include <linux/mtd/partitions.h>
+@@ -593,6 +594,15 @@ int add_mtd_device(struct mtd_info *mtd)
+ of this try_ nonsense, and no bitching about it
+ either. :) */
+ __module_get(THIS_MODULE);
++
++ if (!strcmp(mtd->name, "rootfs") &&
++ IS_ENABLED(CONFIG_MTD_ROOTFS_ROOT_DEV) &&
++ ROOT_DEV == 0) {
++ pr_notice("mtd: device %d (%s) set to be root filesystem\n",
++ mtd->index, mtd->name);
++ ROOT_DEV = MKDEV(MTD_BLOCK_MAJOR, mtd->index);
++ }
++
+ return 0;
+
+ fail_added:
diff --git a/target/linux/generic/pending-5.4/490-ubi-auto-attach-mtd-device-named-ubi-or-data-on-boot.patch b/target/linux/generic/pending-5.4/490-ubi-auto-attach-mtd-device-named-ubi-or-data-on-boot.patch
new file mode 100644
index 0000000000..79d18d036f
--- /dev/null
+++ b/target/linux/generic/pending-5.4/490-ubi-auto-attach-mtd-device-named-ubi-or-data-on-boot.patch
@@ -0,0 +1,97 @@
+From: Daniel Golle <daniel@makrotopia.org>
+Subject: ubi: auto-attach mtd device named "ubi" or "data" on boot
+
+Signed-off-by: Daniel Golle <daniel@makrotopia.org>
+---
+ drivers/mtd/ubi/build.c | 36 ++++++++++++++++++++++++++++++++++++
+ 1 file changed, 36 insertions(+)
+
+--- a/drivers/mtd/ubi/build.c
++++ b/drivers/mtd/ubi/build.c
+@@ -1181,6 +1181,73 @@ static struct mtd_info * __init open_mtd
+ return mtd;
+ }
+
++/*
++ * This function tries attaching mtd partitions named either "ubi" or "data"
++ * during boot.
++ */
++static void __init ubi_auto_attach(void)
++{
++ int err;
++ struct mtd_info *mtd;
++ loff_t offset = 0;
++ size_t len;
++ char magic[4];
++
++ /* try attaching mtd device named "ubi" or "data" */
++ mtd = open_mtd_device("ubi");
++ if (IS_ERR(mtd))
++ mtd = open_mtd_device("data");
++
++ if (IS_ERR(mtd))
++ return;
++
++ /* get the first not bad block */
++ if (mtd_can_have_bb(mtd))
++ while (mtd_block_isbad(mtd, offset)) {
++ offset += mtd->erasesize;
++
++ if (offset > mtd->size) {
++ pr_err("UBI error: Failed to find a non-bad "
++ "block on mtd%d\n", mtd->index);
++ goto cleanup;
++ }
++ }
++
++ /* check if the read from flash was successful */
++ err = mtd_read(mtd, offset, 4, &len, (void *) magic);
++ if ((err && !mtd_is_bitflip(err)) || len != 4) {
++ pr_err("UBI error: unable to read from mtd%d\n", mtd->index);
++ goto cleanup;
++ }
++
++ /* check for a valid ubi magic */
++ if (strncmp(magic, "UBI#", 4)) {
++ pr_err("UBI error: no valid UBI magic found inside mtd%d\n", mtd->index);
++ goto cleanup;
++ }
++
++ /* don't auto-add media types where UBI doesn't makes sense */
++ if (mtd->type != MTD_NANDFLASH &&
++ mtd->type != MTD_NORFLASH &&
++ mtd->type != MTD_DATAFLASH &&
++ mtd->type != MTD_MLCNANDFLASH)
++ goto cleanup;
++
++ mutex_lock(&ubi_devices_mutex);
++ pr_notice("UBI: auto-attach mtd%d\n", mtd->index);
++ err = ubi_attach_mtd_dev(mtd, UBI_DEV_NUM_AUTO, 0, 0);
++ mutex_unlock(&ubi_devices_mutex);
++ if (err < 0) {
++ pr_err("UBI error: cannot attach mtd%d\n", mtd->index);
++ goto cleanup;
++ }
++
++ return;
++
++cleanup:
++ put_mtd_device(mtd);
++}
++
+ static int __init ubi_init(void)
+ {
+ int err, i, k;
+@@ -1264,6 +1331,12 @@ static int __init ubi_init(void)
+ }
+ }
+
++ /* auto-attach mtd devices only if built-in to the kernel and no ubi.mtd
++ * parameter was given */
++ if (IS_ENABLED(CONFIG_MTD_ROOTFS_ROOT_DEV) &&
++ !ubi_is_module() && !mtd_devs)
++ ubi_auto_attach();
++
+ err = ubiblock_init();
+ if (err) {
+ pr_err("UBI error: block: cannot initialize, error %d\n", err);
diff --git a/target/linux/generic/pending-5.4/491-ubi-auto-create-ubiblock-device-for-rootfs.patch b/target/linux/generic/pending-5.4/491-ubi-auto-create-ubiblock-device-for-rootfs.patch
new file mode 100644
index 0000000000..cb2d525610
--- /dev/null
+++ b/target/linux/generic/pending-5.4/491-ubi-auto-create-ubiblock-device-for-rootfs.patch
@@ -0,0 +1,66 @@
+From: Daniel Golle <daniel@makrotopia.org>
+Subject: ubi: auto-create ubiblock device for rootfs
+
+Signed-off-by: Daniel Golle <daniel@makrotopia.org>
+---
+ drivers/mtd/ubi/block.c | 42 ++++++++++++++++++++++++++++++++++++++++++
+ 1 file changed, 42 insertions(+)
+
+--- a/drivers/mtd/ubi/block.c
++++ b/drivers/mtd/ubi/block.c
+@@ -633,6 +633,44 @@ static void __init ubiblock_create_from_
+ }
+ }
+
++#define UBIFS_NODE_MAGIC 0x06101831
++static inline int ubi_vol_is_ubifs(struct ubi_volume_desc *desc)
++{
++ int ret;
++ uint32_t magic_of, magic;
++ ret = ubi_read(desc, 0, (char *)&magic_of, 0, 4);
++ if (ret)
++ return 0;
++ magic = le32_to_cpu(magic_of);
++ return magic == UBIFS_NODE_MAGIC;
++}
++
++static void __init ubiblock_create_auto_rootfs(void)
++{
++ int ubi_num, ret, is_ubifs;
++ struct ubi_volume_desc *desc;
++ struct ubi_volume_info vi;
++
++ for (ubi_num = 0; ubi_num < UBI_MAX_DEVICES; ubi_num++) {
++ desc = ubi_open_volume_nm(ubi_num, "rootfs", UBI_READONLY);
++ if (IS_ERR(desc))
++ continue;
++
++ ubi_get_volume_info(desc, &vi);
++ is_ubifs = ubi_vol_is_ubifs(desc);
++ ubi_close_volume(desc);
++ if (is_ubifs)
++ break;
++
++ ret = ubiblock_create(&vi);
++ if (ret)
++ pr_err("UBI error: block: can't add '%s' volume, err=%d\n",
++ vi.name, ret);
++ /* always break if we get here */
++ break;
++ }
++}
++
+ static void ubiblock_remove_all(void)
+ {
+ struct ubiblock *next;
+@@ -665,6 +703,10 @@ int __init ubiblock_init(void)
+ */
+ ubiblock_create_from_param();
+
++ /* auto-attach "rootfs" volume if existing and non-ubifs */
++ if (IS_ENABLED(CONFIG_MTD_ROOTFS_ROOT_DEV))
++ ubiblock_create_auto_rootfs();
++
+ /*
+ * Block devices are only created upon user requests, so we ignore
+ * existing volumes.
diff --git a/target/linux/generic/pending-5.4/492-try-auto-mounting-ubi0-rootfs-in-init-do_mounts.c.patch b/target/linux/generic/pending-5.4/492-try-auto-mounting-ubi0-rootfs-in-init-do_mounts.c.patch
new file mode 100644
index 0000000000..b76e83e4a3
--- /dev/null
+++ b/target/linux/generic/pending-5.4/492-try-auto-mounting-ubi0-rootfs-in-init-do_mounts.c.patch
@@ -0,0 +1,51 @@
+From: Daniel Golle <daniel@makrotopia.org>
+Subject: try auto-mounting ubi0:rootfs in init/do_mounts.c
+
+Signed-off-by: Daniel Golle <daniel@makrotopia.org>
+---
+ init/do_mounts.c | 26 +++++++++++++++++++++++++-
+ 1 file changed, 25 insertions(+), 1 deletion(-)
+
+--- a/init/do_mounts.c
++++ b/init/do_mounts.c
+@@ -427,7 +427,28 @@ retry:
+ out:
+ put_page(page);
+ }
+-
++
++static int __init mount_ubi_rootfs(void)
++{
++ int flags = MS_SILENT;
++ int err, tried = 0;
++
++ while (tried < 2) {
++ err = do_mount_root("ubi0:rootfs", "ubifs", flags, \
++ root_mount_data);
++ switch (err) {
++ case -EACCES:
++ flags |= MS_RDONLY;
++ tried++;
++ break;
++ default:
++ return err;
++ }
++ }
++
++ return -EINVAL;
++}
++
+ #ifdef CONFIG_ROOT_NFS
+
+ #define NFSROOT_TIMEOUT_MIN 5
+@@ -521,6 +542,10 @@ void __init mount_root(void)
+ change_floppy("root floppy");
+ }
+ #endif
++#ifdef CONFIG_MTD_ROOTFS_ROOT_DEV
++ if (!mount_ubi_rootfs())
++ return;
++#endif
+ #ifdef CONFIG_BLOCK
+ {
+ int err = create_dev("/dev/root", ROOT_DEV);
diff --git a/target/linux/generic/pending-5.4/493-ubi-set-ROOT_DEV-to-ubiblock-rootfs-if-unset.patch b/target/linux/generic/pending-5.4/493-ubi-set-ROOT_DEV-to-ubiblock-rootfs-if-unset.patch
new file mode 100644
index 0000000000..1bb53ada7f
--- /dev/null
+++ b/target/linux/generic/pending-5.4/493-ubi-set-ROOT_DEV-to-ubiblock-rootfs-if-unset.patch
@@ -0,0 +1,34 @@
+From: Daniel Golle <daniel@makrotopia.org>
+Subject: ubi: set ROOT_DEV to ubiblock "rootfs" if unset
+
+Signed-off-by: Daniel Golle <daniel@makrotopia.org>
+---
+ drivers/mtd/ubi/block.c | 10 ++++++++++
+ 1 file changed, 10 insertions(+)
+
+--- a/drivers/mtd/ubi/block.c
++++ b/drivers/mtd/ubi/block.c
+@@ -50,6 +50,7 @@
+ #include <linux/scatterlist.h>
+ #include <linux/idr.h>
+ #include <asm/div64.h>
++#include <linux/root_dev.h>
+
+ #include "ubi-media.h"
+ #include "ubi.h"
+@@ -445,6 +446,15 @@ int ubiblock_create(struct ubi_volume_in
+ dev_info(disk_to_dev(dev->gd), "created from ubi%d:%d(%s)",
+ dev->ubi_num, dev->vol_id, vi->name);
+ mutex_unlock(&devices_mutex);
++
++ if (!strcmp(vi->name, "rootfs") &&
++ IS_ENABLED(CONFIG_MTD_ROOTFS_ROOT_DEV) &&
++ ROOT_DEV == 0) {
++ pr_notice("ubiblock: device ubiblock%d_%d (%s) set to be root filesystem\n",
++ dev->ubi_num, dev->vol_id, vi->name);
++ ROOT_DEV = MKDEV(gd->major, gd->first_minor);
++ }
++
+ return 0;
+
+ out_free_queue:
diff --git a/target/linux/generic/pending-5.4/494-mtd-ubi-add-EOF-marker-support.patch b/target/linux/generic/pending-5.4/494-mtd-ubi-add-EOF-marker-support.patch
new file mode 100644
index 0000000000..e38f11e592
--- /dev/null
+++ b/target/linux/generic/pending-5.4/494-mtd-ubi-add-EOF-marker-support.patch
@@ -0,0 +1,60 @@
+From: Gabor Juhos <juhosg@openwrt.org>
+Subject: mtd: add EOF marker support to the UBI layer
+
+Signed-off-by: Gabor Juhos <juhosg@openwrt.org>
+---
+ drivers/mtd/ubi/attach.c | 25 ++++++++++++++++++++++---
+ drivers/mtd/ubi/ubi.h | 1 +
+ 2 files changed, 23 insertions(+), 3 deletions(-)
+
+--- a/drivers/mtd/ubi/attach.c
++++ b/drivers/mtd/ubi/attach.c
+@@ -939,6 +939,13 @@ static bool vol_ignored(int vol_id)
+ #endif
+ }
+
++static bool ec_hdr_has_eof(struct ubi_ec_hdr *ech)
++{
++ return ech->padding1[0] == 'E' &&
++ ech->padding1[1] == 'O' &&
++ ech->padding1[2] == 'F';
++}
++
+ /**
+ * scan_peb - scan and process UBI headers of a PEB.
+ * @ubi: UBI device description object
+@@ -971,9 +978,21 @@ static int scan_peb(struct ubi_device *u
+ return 0;
+ }
+
+- err = ubi_io_read_ec_hdr(ubi, pnum, ech, 0);
+- if (err < 0)
+- return err;
++ if (!ai->eof_found) {
++ err = ubi_io_read_ec_hdr(ubi, pnum, ech, 0);
++ if (err < 0)
++ return err;
++
++ if (ec_hdr_has_eof(ech)) {
++ pr_notice("UBI: EOF marker found, PEBs from %d will be erased\n",
++ pnum);
++ ai->eof_found = true;
++ }
++ }
++
++ if (ai->eof_found)
++ err = UBI_IO_FF_BITFLIPS;
++
+ switch (err) {
+ case 0:
+ break;
+--- a/drivers/mtd/ubi/ubi.h
++++ b/drivers/mtd/ubi/ubi.h
+@@ -789,6 +789,7 @@ struct ubi_attach_info {
+ int mean_ec;
+ uint64_t ec_sum;
+ int ec_count;
++ bool eof_found;
+ struct kmem_cache *aeb_slab_cache;
+ struct ubi_ec_hdr *ech;
+ struct ubi_vid_io_buf *vidb;
diff --git a/target/linux/generic/pending-5.4/495-mtd-core-add-get_mtd_device_by_node.patch b/target/linux/generic/pending-5.4/495-mtd-core-add-get_mtd_device_by_node.patch
new file mode 100644
index 0000000000..1446ca2abc
--- /dev/null
+++ b/target/linux/generic/pending-5.4/495-mtd-core-add-get_mtd_device_by_node.patch
@@ -0,0 +1,75 @@
+From 1bd1b740f208d1cf4071932cc51860d37266c402 Mon Sep 17 00:00:00 2001
+From: Bernhard Frauendienst <kernel@nospam.obeliks.de>
+Date: Sat, 1 Sep 2018 00:30:11 +0200
+Subject: [PATCH 495/497] mtd: core: add get_mtd_device_by_node
+
+Add function to retrieve a mtd device by its OF node. Since drivers can
+assign arbitrary names to mtd devices in the absence of a label
+property, there is no other reliable way to retrieve a mtd device for a
+given OF node.
+
+Signed-off-by: Bernhard Frauendienst <kernel@nospam.obeliks.de>
+Reviewed-by: Miquel Raynal <miquel.raynal@bootlin.com>
+---
+ drivers/mtd/mtdcore.c | 38 ++++++++++++++++++++++++++++++++++++++
+ include/linux/mtd/mtd.h | 2 ++
+ 2 files changed, 40 insertions(+)
+
+--- a/drivers/mtd/mtdcore.c
++++ b/drivers/mtd/mtdcore.c
+@@ -938,6 +938,44 @@ out_unlock:
+ }
+ EXPORT_SYMBOL_GPL(get_mtd_device_nm);
+
++/**
++ * get_mtd_device_by_node - obtain a validated handle for an MTD device
++ * by of_node
++ * @of_node: OF node of MTD device to open
++ *
++ * This function returns MTD device description structure in case of
++ * success and an error code in case of failure.
++ */
++struct mtd_info *get_mtd_device_by_node(const struct device_node *of_node)
++{
++ int err = -ENODEV;
++ struct mtd_info *mtd = NULL, *other;
++
++ mutex_lock(&mtd_table_mutex);
++
++ mtd_for_each_device(other) {
++ if (of_node == other->dev.of_node) {
++ mtd = other;
++ break;
++ }
++ }
++
++ if (!mtd)
++ goto out_unlock;
++
++ err = __get_mtd_device(mtd);
++ if (err)
++ goto out_unlock;
++
++ mutex_unlock(&mtd_table_mutex);
++ return mtd;
++
++out_unlock:
++ mutex_unlock(&mtd_table_mutex);
++ return ERR_PTR(err);
++}
++EXPORT_SYMBOL_GPL(get_mtd_device_by_node);
++
+ void put_mtd_device(struct mtd_info *mtd)
+ {
+ mutex_lock(&mtd_table_mutex);
+--- a/include/linux/mtd/mtd.h
++++ b/include/linux/mtd/mtd.h
+@@ -589,6 +589,8 @@ extern struct mtd_info *get_mtd_device(s
+ extern int __get_mtd_device(struct mtd_info *mtd);
+ extern void __put_mtd_device(struct mtd_info *mtd);
+ extern struct mtd_info *get_mtd_device_nm(const char *name);
++extern struct mtd_info *get_mtd_device_by_node(
++ const struct device_node *of_node);
+ extern void put_mtd_device(struct mtd_info *mtd);
+
+
diff --git a/target/linux/generic/pending-5.4/496-dt-bindings-add-bindings-for-mtd-concat-devices.patch b/target/linux/generic/pending-5.4/496-dt-bindings-add-bindings-for-mtd-concat-devices.patch
new file mode 100644
index 0000000000..01f3b9ec2d
--- /dev/null
+++ b/target/linux/generic/pending-5.4/496-dt-bindings-add-bindings-for-mtd-concat-devices.patch
@@ -0,0 +1,52 @@
+From 5734c6669fba7ddb5ef491ccff7159d15dba0b59 Mon Sep 17 00:00:00 2001
+From: Bernhard Frauendienst <kernel@nospam.obeliks.de>
+Date: Wed, 5 Sep 2018 01:32:51 +0200
+Subject: [PATCH 496/497] dt-bindings: add bindings for mtd-concat devices
+
+Document virtual mtd-concat device bindings.
+
+Signed-off-by: Bernhard Frauendienst <kernel@nospam.obeliks.de>
+---
+ .../devicetree/bindings/mtd/mtd-concat.txt | 36 +++++++++++++++++++
+ 1 file changed, 36 insertions(+)
+ create mode 100644 Documentation/devicetree/bindings/mtd/mtd-concat.txt
+
+--- /dev/null
++++ b/Documentation/devicetree/bindings/mtd/mtd-concat.txt
+@@ -0,0 +1,36 @@
++Virtual MTD concat device
++
++Requires properties:
++- devices: list of phandles to mtd nodes that should be concatenated
++
++Example:
++
++&spi {
++ flash0: flash@0 {
++ ...
++ };
++ flash1: flash@1 {
++ ...
++ };
++};
++
++flash {
++ compatible = "mtd-concat";
++
++ devices = <&flash0 &flash1>;
++
++ partitions {
++ compatible = "fixed-partitions";
++
++ partition@0 {
++ label = "boot";
++ reg = <0x0000000 0x0040000>;
++ read-only;
++ };
++
++ partition@40000 {
++ label = "firmware";
++ reg = <0x0040000 0x1fc0000>;
++ };
++ }
++}
diff --git a/target/linux/generic/pending-5.4/497-mtd-mtdconcat-add-dt-driver-for-concat-devices.patch b/target/linux/generic/pending-5.4/497-mtd-mtdconcat-add-dt-driver-for-concat-devices.patch
new file mode 100644
index 0000000000..6584e22882
--- /dev/null
+++ b/target/linux/generic/pending-5.4/497-mtd-mtdconcat-add-dt-driver-for-concat-devices.patch
@@ -0,0 +1,216 @@
+From e53f712d8eac71f54399b61038ccf87d2cee99d7 Mon Sep 17 00:00:00 2001
+From: Bernhard Frauendienst <kernel@nospam.obeliks.de>
+Date: Sat, 25 Aug 2018 12:35:22 +0200
+Subject: [PATCH 497/497] mtd: mtdconcat: add dt driver for concat devices
+
+Some mtd drivers like physmap variants have support for concatenating
+multiple mtd devices, but there is no generic way to define such a
+concat device from within the device tree.
+
+This is useful for some SoC boards that use multiple flash chips as
+memory banks of a single mtd device, with partitions spanning chip
+borders.
+
+This commit adds a driver for creating virtual mtd-concat devices. They
+must have a compatible = "mtd-concat" line, and define a list of devices
+to concat in the 'devices' property, for example:
+
+flash {
+ compatible = "mtd-concat";
+
+ devices = <&flash0 &flash1>;
+
+ partitions {
+ ...
+ };
+};
+
+The driver is added to the very end of the mtd Makefile to increase the
+likelyhood of all child devices already being loaded at the time of
+probing, preventing unnecessary deferred probes.
+
+Signed-off-by: Bernhard Frauendienst <kernel@nospam.obeliks.de>
+---
+ drivers/mtd/Kconfig | 2 +
+ drivers/mtd/Makefile | 3 +
+ drivers/mtd/composite/Kconfig | 12 +++
+ drivers/mtd/composite/Makefile | 6 ++
+ drivers/mtd/composite/virt_concat.c | 128 ++++++++++++++++++++++++++++
+ 5 files changed, 151 insertions(+)
+ create mode 100644 drivers/mtd/composite/Kconfig
+ create mode 100644 drivers/mtd/composite/Makefile
+ create mode 100644 drivers/mtd/composite/virt_concat.c
+
+--- a/drivers/mtd/Kconfig
++++ b/drivers/mtd/Kconfig
+@@ -374,4 +374,6 @@ source "drivers/mtd/spi-nor/Kconfig"
+
+ source "drivers/mtd/ubi/Kconfig"
+
++source "drivers/mtd/composite/Kconfig"
++
+ endif # MTD
+--- a/drivers/mtd/Makefile
++++ b/drivers/mtd/Makefile
+@@ -39,3 +39,6 @@ obj-y += chips/ lpddr/ maps/ devices/ n
+
+ obj-$(CONFIG_MTD_SPI_NOR) += spi-nor/
+ obj-$(CONFIG_MTD_UBI) += ubi/
++
++# Composite drivers must be loaded last
++obj-y += composite/
+--- /dev/null
++++ b/drivers/mtd/composite/Kconfig
+@@ -0,0 +1,12 @@
++menu "Composite MTD device drivers"
++ depends on MTD!=n
++
++config MTD_VIRT_CONCAT
++ tristate "Virtual concat MTD device"
++ help
++ This driver allows creation of a virtual MTD concat device, which
++ concatenates multiple underlying MTD devices to a single device.
++ This is required by some SoC boards where multiple memory banks are
++ used as one device with partitions spanning across device boundaries.
++
++endmenu
+--- /dev/null
++++ b/drivers/mtd/composite/Makefile
+@@ -0,0 +1,6 @@
++# SPDX-License-Identifier: GPL-2.0
++#
++# linux/drivers/mtd/composite/Makefile
++#
++
++obj-$(CONFIG_MTD_VIRT_CONCAT) += virt_concat.o
+--- /dev/null
++++ b/drivers/mtd/composite/virt_concat.c
+@@ -0,0 +1,128 @@
++// SPDX-License-Identifier: GPL-2.0+
++/*
++ * Virtual concat MTD device driver
++ *
++ * Copyright (C) 2018 Bernhard Frauendienst
++ * Author: Bernhard Frauendienst, kernel@nospam.obeliks.de
++ */
++
++#include <linux/module.h>
++#include <linux/device.h>
++#include <linux/mtd/concat.h>
++#include <linux/mtd/mtd.h>
++#include <linux/mtd/partitions.h>
++#include <linux/of.h>
++#include <linux/of_platform.h>
++#include <linux/slab.h>
++
++/*
++ * struct of_virt_concat - platform device driver data.
++ * @cmtd the final mtd_concat device
++ * @num_devices the number of devices in @devices
++ * @devices points to an array of devices already loaded
++ */
++struct of_virt_concat {
++ struct mtd_info *cmtd;
++ int num_devices;
++ struct mtd_info **devices;
++};
++
++static int virt_concat_remove(struct platform_device *pdev)
++{
++ struct of_virt_concat *info;
++ int i;
++
++ info = platform_get_drvdata(pdev);
++ if (!info)
++ return 0;
++
++ // unset data for when this is called after a probe error
++ platform_set_drvdata(pdev, NULL);
++
++ if (info->cmtd) {
++ mtd_device_unregister(info->cmtd);
++ mtd_concat_destroy(info->cmtd);
++ }
++
++ if (info->devices) {
++ for (i = 0; i < info->num_devices; i++)
++ put_mtd_device(info->devices[i]);
++ }
++
++ return 0;
++}
++
++static int virt_concat_probe(struct platform_device *pdev)
++{
++ struct device_node *node = pdev->dev.of_node;
++ struct of_phandle_iterator it;
++ struct of_virt_concat *info;
++ struct mtd_info *mtd;
++ int err = 0, count;
++
++ count = of_count_phandle_with_args(node, "devices", NULL);
++ if (count <= 0)
++ return -EINVAL;
++
++ info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
++ if (!info)
++ return -ENOMEM;
++ info->devices = devm_kcalloc(&pdev->dev, count,
++ sizeof(*(info->devices)), GFP_KERNEL);
++ if (!info->devices) {
++ err = -ENOMEM;
++ goto err_remove;
++ }
++
++ platform_set_drvdata(pdev, info);
++
++ of_for_each_phandle(&it, err, node, "devices", NULL, 0) {
++ mtd = get_mtd_device_by_node(it.node);
++ if (IS_ERR(mtd)) {
++ of_node_put(it.node);
++ err = -EPROBE_DEFER;
++ goto err_remove;
++ }
++
++ info->devices[info->num_devices++] = mtd;
++ }
++
++ info->cmtd = mtd_concat_create(info->devices, info->num_devices,
++ dev_name(&pdev->dev));
++ if (!info->cmtd) {
++ err = -ENXIO;
++ goto err_remove;
++ }
++
++ info->cmtd->dev.parent = &pdev->dev;
++ mtd_set_of_node(info->cmtd, node);
++ mtd_device_register(info->cmtd, NULL, 0);
++
++ return 0;
++
++err_remove:
++ virt_concat_remove(pdev);
++
++ return err;
++}
++
++static const struct of_device_id virt_concat_of_match[] = {
++ { .compatible = "mtd-concat", },
++ { /* sentinel */ }
++};
++MODULE_DEVICE_TABLE(of, virt_concat_of_match);
++
++static struct platform_driver virt_concat_driver = {
++ .probe = virt_concat_probe,
++ .remove = virt_concat_remove,
++ .driver = {
++ .name = "virt-mtdconcat",
++ .of_match_table = virt_concat_of_match,
++ },
++};
++
++module_platform_driver(virt_concat_driver);
++
++MODULE_LICENSE("GPL v2");
++MODULE_AUTHOR("Bernhard Frauendienst <kernel@nospam.obeliks.de>");
++MODULE_DESCRIPTION("Virtual concat MTD device driver");
diff --git a/target/linux/generic/pending-5.4/530-jffs2_make_lzma_available.patch b/target/linux/generic/pending-5.4/530-jffs2_make_lzma_available.patch
new file mode 100644
index 0000000000..f28186a40c
--- /dev/null
+++ b/target/linux/generic/pending-5.4/530-jffs2_make_lzma_available.patch
@@ -0,0 +1,5180 @@
+From: Alexandros C. Couloumbis <alex@ozo.com>
+Subject: fs: add jffs2/lzma support (not activated by default yet)
+
+lede-commit: c2c88d315fa0e881f8b19da07b62859b915b11b2
+Signed-off-by: Alexandros C. Couloumbis <alex@ozo.com>
+---
+ fs/jffs2/Kconfig | 9 +
+ fs/jffs2/Makefile | 3 +
+ fs/jffs2/compr.c | 6 +
+ fs/jffs2/compr.h | 10 +-
+ fs/jffs2/compr_lzma.c | 128 +++
+ fs/jffs2/super.c | 33 +-
+ include/linux/lzma.h | 62 ++
+ include/linux/lzma/LzFind.h | 115 +++
+ include/linux/lzma/LzHash.h | 54 +
+ include/linux/lzma/LzmaDec.h | 231 +++++
+ include/linux/lzma/LzmaEnc.h | 80 ++
+ include/linux/lzma/Types.h | 226 +++++
+ include/uapi/linux/jffs2.h | 1 +
+ lib/Kconfig | 6 +
+ lib/Makefile | 12 +
+ lib/lzma/LzFind.c | 761 ++++++++++++++
+ lib/lzma/LzmaDec.c | 999 +++++++++++++++++++
+ lib/lzma/LzmaEnc.c | 2271 ++++++++++++++++++++++++++++++++++++++++++
+ lib/lzma/Makefile | 7 +
+ 19 files changed, 5008 insertions(+), 6 deletions(-)
+ create mode 100644 fs/jffs2/compr_lzma.c
+ create mode 100644 include/linux/lzma.h
+ create mode 100644 include/linux/lzma/LzFind.h
+ create mode 100644 include/linux/lzma/LzHash.h
+ create mode 100644 include/linux/lzma/LzmaDec.h
+ create mode 100644 include/linux/lzma/LzmaEnc.h
+ create mode 100644 include/linux/lzma/Types.h
+ create mode 100644 lib/lzma/LzFind.c
+ create mode 100644 lib/lzma/LzmaDec.c
+ create mode 100644 lib/lzma/LzmaEnc.c
+ create mode 100644 lib/lzma/Makefile
+
+--- a/fs/jffs2/Kconfig
++++ b/fs/jffs2/Kconfig
+@@ -135,6 +135,15 @@ config JFFS2_LZO
+ This feature was added in July, 2007. Say 'N' if you need
+ compatibility with older bootloaders or kernels.
+
++config JFFS2_LZMA
++ bool "JFFS2 LZMA compression support" if JFFS2_COMPRESSION_OPTIONS
++ select LZMA_COMPRESS
++ select LZMA_DECOMPRESS
++ depends on JFFS2_FS
++ default n
++ help
++ JFFS2 wrapper to the LZMA C SDK
++
+ config JFFS2_RTIME
+ bool "JFFS2 RTIME compression support" if JFFS2_COMPRESSION_OPTIONS
+ depends on JFFS2_FS
+--- a/fs/jffs2/Makefile
++++ b/fs/jffs2/Makefile
+@@ -19,4 +19,7 @@ jffs2-$(CONFIG_JFFS2_RUBIN) += compr_rub
+ jffs2-$(CONFIG_JFFS2_RTIME) += compr_rtime.o
+ jffs2-$(CONFIG_JFFS2_ZLIB) += compr_zlib.o
+ jffs2-$(CONFIG_JFFS2_LZO) += compr_lzo.o
++jffs2-$(CONFIG_JFFS2_LZMA) += compr_lzma.o
+ jffs2-$(CONFIG_JFFS2_SUMMARY) += summary.o
++
++CFLAGS_compr_lzma.o += -Iinclude/linux -Ilib/lzma
+--- a/fs/jffs2/compr.c
++++ b/fs/jffs2/compr.c
+@@ -378,6 +378,9 @@ int __init jffs2_compressors_init(void)
+ #ifdef CONFIG_JFFS2_LZO
+ jffs2_lzo_init();
+ #endif
++#ifdef CONFIG_JFFS2_LZMA
++ jffs2_lzma_init();
++#endif
+ /* Setting default compression mode */
+ #ifdef CONFIG_JFFS2_CMODE_NONE
+ jffs2_compression_mode = JFFS2_COMPR_MODE_NONE;
+@@ -401,6 +404,9 @@ int __init jffs2_compressors_init(void)
+ int jffs2_compressors_exit(void)
+ {
+ /* Unregistering compressors */
++#ifdef CONFIG_JFFS2_LZMA
++ jffs2_lzma_exit();
++#endif
+ #ifdef CONFIG_JFFS2_LZO
+ jffs2_lzo_exit();
+ #endif
+--- a/fs/jffs2/compr.h
++++ b/fs/jffs2/compr.h
+@@ -29,9 +29,9 @@
+ #define JFFS2_DYNRUBIN_PRIORITY 20
+ #define JFFS2_LZARI_PRIORITY 30
+ #define JFFS2_RTIME_PRIORITY 50
+-#define JFFS2_ZLIB_PRIORITY 60
+-#define JFFS2_LZO_PRIORITY 80
+-
++#define JFFS2_LZMA_PRIORITY 70
++#define JFFS2_ZLIB_PRIORITY 80
++#define JFFS2_LZO_PRIORITY 90
+
+ #define JFFS2_RUBINMIPS_DISABLED /* RUBINs will be used only */
+ #define JFFS2_DYNRUBIN_DISABLED /* for decompression */
+@@ -101,5 +101,9 @@ void jffs2_zlib_exit(void);
+ int jffs2_lzo_init(void);
+ void jffs2_lzo_exit(void);
+ #endif
++#ifdef CONFIG_JFFS2_LZMA
++int jffs2_lzma_init(void);
++void jffs2_lzma_exit(void);
++#endif
+
+ #endif /* __JFFS2_COMPR_H__ */
+--- /dev/null
++++ b/fs/jffs2/compr_lzma.c
+@@ -0,0 +1,128 @@
++/*
++ * JFFS2 -- Journalling Flash File System, Version 2.
++ *
++ * For licensing information, see the file 'LICENCE' in this directory.
++ *
++ * JFFS2 wrapper to the LZMA C SDK
++ *
++ */
++
++#include <linux/lzma.h>
++#include "compr.h"
++
++#ifdef __KERNEL__
++ static DEFINE_MUTEX(deflate_mutex);
++#endif
++
++CLzmaEncHandle *p;
++Byte propsEncoded[LZMA_PROPS_SIZE];
++SizeT propsSize = sizeof(propsEncoded);
++
++STATIC void lzma_free_workspace(void)
++{
++ LzmaEnc_Destroy(p, &lzma_alloc, &lzma_alloc);
++}
++
++STATIC int INIT lzma_alloc_workspace(CLzmaEncProps *props)
++{
++ if ((p = (CLzmaEncHandle *)LzmaEnc_Create(&lzma_alloc)) == NULL)
++ {
++ PRINT_ERROR("Failed to allocate lzma deflate workspace\n");
++ return -ENOMEM;
++ }
++
++ if (LzmaEnc_SetProps(p, props) != SZ_OK)
++ {
++ lzma_free_workspace();
++ return -1;
++ }
++
++ if (LzmaEnc_WriteProperties(p, propsEncoded, &propsSize) != SZ_OK)
++ {
++ lzma_free_workspace();
++ return -1;
++ }
++
++ return 0;
++}
++
++STATIC int jffs2_lzma_compress(unsigned char *data_in, unsigned char *cpage_out,
++ uint32_t *sourcelen, uint32_t *dstlen)
++{
++ SizeT compress_size = (SizeT)(*dstlen);
++ int ret;
++
++ #ifdef __KERNEL__
++ mutex_lock(&deflate_mutex);
++ #endif
++
++ ret = LzmaEnc_MemEncode(p, cpage_out, &compress_size, data_in, *sourcelen,
++ 0, NULL, &lzma_alloc, &lzma_alloc);
++
++ #ifdef __KERNEL__
++ mutex_unlock(&deflate_mutex);
++ #endif
++
++ if (ret != SZ_OK)
++ return -1;
++
++ *dstlen = (uint32_t)compress_size;
++
++ return 0;
++}
++
++STATIC int jffs2_lzma_decompress(unsigned char *data_in, unsigned char *cpage_out,
++ uint32_t srclen, uint32_t destlen)
++{
++ int ret;
++ SizeT dl = (SizeT)destlen;
++ SizeT sl = (SizeT)srclen;
++ ELzmaStatus status;
++
++ ret = LzmaDecode(cpage_out, &dl, data_in, &sl, propsEncoded,
++ propsSize, LZMA_FINISH_ANY, &status, &lzma_alloc);
++
++ if (ret != SZ_OK || status == LZMA_STATUS_NOT_FINISHED || dl != (SizeT)destlen)
++ return -1;
++
++ return 0;
++}
++
++static struct jffs2_compressor jffs2_lzma_comp = {
++ .priority = JFFS2_LZMA_PRIORITY,
++ .name = "lzma",
++ .compr = JFFS2_COMPR_LZMA,
++ .compress = &jffs2_lzma_compress,
++ .decompress = &jffs2_lzma_decompress,
++ .disabled = 0,
++};
++
++int INIT jffs2_lzma_init(void)
++{
++ int ret;
++ CLzmaEncProps props;
++ LzmaEncProps_Init(&props);
++
++ props.dictSize = LZMA_BEST_DICT(0x2000);
++ props.level = LZMA_BEST_LEVEL;
++ props.lc = LZMA_BEST_LC;
++ props.lp = LZMA_BEST_LP;
++ props.pb = LZMA_BEST_PB;
++ props.fb = LZMA_BEST_FB;
++
++ ret = lzma_alloc_workspace(&props);
++ if (ret < 0)
++ return ret;
++
++ ret = jffs2_register_compressor(&jffs2_lzma_comp);
++ if (ret)
++ lzma_free_workspace();
++
++ return ret;
++}
++
++void jffs2_lzma_exit(void)
++{
++ jffs2_unregister_compressor(&jffs2_lzma_comp);
++ lzma_free_workspace();
++}
+--- a/fs/jffs2/super.c
++++ b/fs/jffs2/super.c
+@@ -374,14 +374,41 @@ static int __init init_jffs2_fs(void)
+ BUILD_BUG_ON(sizeof(struct jffs2_raw_inode) != 68);
+ BUILD_BUG_ON(sizeof(struct jffs2_raw_summary) != 32);
+
+- pr_info("version 2.2."
++ pr_info("version 2.2"
+ #ifdef CONFIG_JFFS2_FS_WRITEBUFFER
+ " (NAND)"
+ #endif
+ #ifdef CONFIG_JFFS2_SUMMARY
+- " (SUMMARY) "
++ " (SUMMARY)"
+ #endif
+- " © 2001-2006 Red Hat, Inc.\n");
++#ifdef CONFIG_JFFS2_ZLIB
++ " (ZLIB)"
++#endif
++#ifdef CONFIG_JFFS2_LZO
++ " (LZO)"
++#endif
++#ifdef CONFIG_JFFS2_LZMA
++ " (LZMA)"
++#endif
++#ifdef CONFIG_JFFS2_RTIME
++ " (RTIME)"
++#endif
++#ifdef CONFIG_JFFS2_RUBIN
++ " (RUBIN)"
++#endif
++#ifdef CONFIG_JFFS2_CMODE_NONE
++ " (CMODE_NONE)"
++#endif
++#ifdef CONFIG_JFFS2_CMODE_PRIORITY
++ " (CMODE_PRIORITY)"
++#endif
++#ifdef CONFIG_JFFS2_CMODE_SIZE
++ " (CMODE_SIZE)"
++#endif
++#ifdef CONFIG_JFFS2_CMODE_FAVOURLZO
++ " (CMODE_FAVOURLZO)"
++#endif
++ " (c) 2001-2006 Red Hat, Inc.\n");
+
+ jffs2_inode_cachep = kmem_cache_create("jffs2_i",
+ sizeof(struct jffs2_inode_info),
+--- /dev/null
++++ b/include/linux/lzma.h
+@@ -0,0 +1,62 @@
++#ifndef __LZMA_H__
++#define __LZMA_H__
++
++#ifdef __KERNEL__
++ #include <linux/kernel.h>
++ #include <linux/sched.h>
++ #include <linux/slab.h>
++ #include <linux/vmalloc.h>
++ #include <linux/init.h>
++ #define LZMA_MALLOC vmalloc
++ #define LZMA_FREE vfree
++ #define PRINT_ERROR(msg) printk(KERN_WARNING #msg)
++ #define INIT __init
++ #define STATIC static
++#else
++ #include <stdint.h>
++ #include <stdlib.h>
++ #include <stdio.h>
++ #include <unistd.h>
++ #include <string.h>
++ #include <asm/types.h>
++ #include <errno.h>
++ #include <linux/jffs2.h>
++ #ifndef PAGE_SIZE
++ extern int page_size;
++ #define PAGE_SIZE page_size
++ #endif
++ #define LZMA_MALLOC malloc
++ #define LZMA_FREE free
++ #define PRINT_ERROR(msg) fprintf(stderr, msg)
++ #define INIT
++ #define STATIC
++#endif
++
++#include "lzma/LzmaDec.h"
++#include "lzma/LzmaEnc.h"
++
++#define LZMA_BEST_LEVEL (9)
++#define LZMA_BEST_LC (0)
++#define LZMA_BEST_LP (0)
++#define LZMA_BEST_PB (0)
++#define LZMA_BEST_FB (273)
++
++#define LZMA_BEST_DICT(n) (((int)((n) / 2)) * 2)
++
++static void *p_lzma_malloc(void *p, size_t size)
++{
++ if (size == 0)
++ return NULL;
++
++ return LZMA_MALLOC(size);
++}
++
++static void p_lzma_free(void *p, void *address)
++{
++ if (address != NULL)
++ LZMA_FREE(address);
++}
++
++static ISzAlloc lzma_alloc = {p_lzma_malloc, p_lzma_free};
++
++#endif
+--- /dev/null
++++ b/include/linux/lzma/LzFind.h
+@@ -0,0 +1,115 @@
++/* LzFind.h -- Match finder for LZ algorithms
++2009-04-22 : Igor Pavlov : Public domain */
++
++#ifndef __LZ_FIND_H
++#define __LZ_FIND_H
++
++#include "Types.h"
++
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++typedef UInt32 CLzRef;
++
++typedef struct _CMatchFinder
++{
++ Byte *buffer;
++ UInt32 pos;
++ UInt32 posLimit;
++ UInt32 streamPos;
++ UInt32 lenLimit;
++
++ UInt32 cyclicBufferPos;
++ UInt32 cyclicBufferSize; /* it must be = (historySize + 1) */
++
++ UInt32 matchMaxLen;
++ CLzRef *hash;
++ CLzRef *son;
++ UInt32 hashMask;
++ UInt32 cutValue;
++
++ Byte *bufferBase;
++ ISeqInStream *stream;
++ int streamEndWasReached;
++
++ UInt32 blockSize;
++ UInt32 keepSizeBefore;
++ UInt32 keepSizeAfter;
++
++ UInt32 numHashBytes;
++ int directInput;
++ size_t directInputRem;
++ int btMode;
++ int bigHash;
++ UInt32 historySize;
++ UInt32 fixedHashSize;
++ UInt32 hashSizeSum;
++ UInt32 numSons;
++ SRes result;
++ UInt32 crc[256];
++} CMatchFinder;
++
++#define Inline_MatchFinder_GetPointerToCurrentPos(p) ((p)->buffer)
++#define Inline_MatchFinder_GetIndexByte(p, index) ((p)->buffer[(Int32)(index)])
++
++#define Inline_MatchFinder_GetNumAvailableBytes(p) ((p)->streamPos - (p)->pos)
++
++int MatchFinder_NeedMove(CMatchFinder *p);
++Byte *MatchFinder_GetPointerToCurrentPos(CMatchFinder *p);
++void MatchFinder_MoveBlock(CMatchFinder *p);
++void MatchFinder_ReadIfRequired(CMatchFinder *p);
++
++void MatchFinder_Construct(CMatchFinder *p);
++
++/* Conditions:
++ historySize <= 3 GB
++ keepAddBufferBefore + matchMaxLen + keepAddBufferAfter < 511MB
++*/
++int MatchFinder_Create(CMatchFinder *p, UInt32 historySize,
++ UInt32 keepAddBufferBefore, UInt32 matchMaxLen, UInt32 keepAddBufferAfter,
++ ISzAlloc *alloc);
++void MatchFinder_Free(CMatchFinder *p, ISzAlloc *alloc);
++void MatchFinder_Normalize3(UInt32 subValue, CLzRef *items, UInt32 numItems);
++void MatchFinder_ReduceOffsets(CMatchFinder *p, UInt32 subValue);
++
++UInt32 * GetMatchesSpec1(UInt32 lenLimit, UInt32 curMatch, UInt32 pos, const Byte *buffer, CLzRef *son,
++ UInt32 _cyclicBufferPos, UInt32 _cyclicBufferSize, UInt32 _cutValue,
++ UInt32 *distances, UInt32 maxLen);
++
++/*
++Conditions:
++ Mf_GetNumAvailableBytes_Func must be called before each Mf_GetMatchLen_Func.
++ Mf_GetPointerToCurrentPos_Func's result must be used only before any other function
++*/
++
++typedef void (*Mf_Init_Func)(void *object);
++typedef Byte (*Mf_GetIndexByte_Func)(void *object, Int32 index);
++typedef UInt32 (*Mf_GetNumAvailableBytes_Func)(void *object);
++typedef const Byte * (*Mf_GetPointerToCurrentPos_Func)(void *object);
++typedef UInt32 (*Mf_GetMatches_Func)(void *object, UInt32 *distances);
++typedef void (*Mf_Skip_Func)(void *object, UInt32);
++
++typedef struct _IMatchFinder
++{
++ Mf_Init_Func Init;
++ Mf_GetIndexByte_Func GetIndexByte;
++ Mf_GetNumAvailableBytes_Func GetNumAvailableBytes;
++ Mf_GetPointerToCurrentPos_Func GetPointerToCurrentPos;
++ Mf_GetMatches_Func GetMatches;
++ Mf_Skip_Func Skip;
++} IMatchFinder;
++
++void MatchFinder_CreateVTable(CMatchFinder *p, IMatchFinder *vTable);
++
++void MatchFinder_Init(CMatchFinder *p);
++UInt32 Bt3Zip_MatchFinder_GetMatches(CMatchFinder *p, UInt32 *distances);
++UInt32 Hc3Zip_MatchFinder_GetMatches(CMatchFinder *p, UInt32 *distances);
++void Bt3Zip_MatchFinder_Skip(CMatchFinder *p, UInt32 num);
++void Hc3Zip_MatchFinder_Skip(CMatchFinder *p, UInt32 num);
++
++#ifdef __cplusplus
++}
++#endif
++
++#endif
+--- /dev/null
++++ b/include/linux/lzma/LzHash.h
+@@ -0,0 +1,54 @@
++/* LzHash.h -- HASH functions for LZ algorithms
++2009-02-07 : Igor Pavlov : Public domain */
++
++#ifndef __LZ_HASH_H
++#define __LZ_HASH_H
++
++#define kHash2Size (1 << 10)
++#define kHash3Size (1 << 16)
++#define kHash4Size (1 << 20)
++
++#define kFix3HashSize (kHash2Size)
++#define kFix4HashSize (kHash2Size + kHash3Size)
++#define kFix5HashSize (kHash2Size + kHash3Size + kHash4Size)
++
++#define HASH2_CALC hashValue = cur[0] | ((UInt32)cur[1] << 8);
++
++#define HASH3_CALC { \
++ UInt32 temp = p->crc[cur[0]] ^ cur[1]; \
++ hash2Value = temp & (kHash2Size - 1); \
++ hashValue = (temp ^ ((UInt32)cur[2] << 8)) & p->hashMask; }
++
++#define HASH4_CALC { \
++ UInt32 temp = p->crc[cur[0]] ^ cur[1]; \
++ hash2Value = temp & (kHash2Size - 1); \
++ hash3Value = (temp ^ ((UInt32)cur[2] << 8)) & (kHash3Size - 1); \
++ hashValue = (temp ^ ((UInt32)cur[2] << 8) ^ (p->crc[cur[3]] << 5)) & p->hashMask; }
++
++#define HASH5_CALC { \
++ UInt32 temp = p->crc[cur[0]] ^ cur[1]; \
++ hash2Value = temp & (kHash2Size - 1); \
++ hash3Value = (temp ^ ((UInt32)cur[2] << 8)) & (kHash3Size - 1); \
++ hash4Value = (temp ^ ((UInt32)cur[2] << 8) ^ (p->crc[cur[3]] << 5)); \
++ hashValue = (hash4Value ^ (p->crc[cur[4]] << 3)) & p->hashMask; \
++ hash4Value &= (kHash4Size - 1); }
++
++/* #define HASH_ZIP_CALC hashValue = ((cur[0] | ((UInt32)cur[1] << 8)) ^ p->crc[cur[2]]) & 0xFFFF; */
++#define HASH_ZIP_CALC hashValue = ((cur[2] | ((UInt32)cur[0] << 8)) ^ p->crc[cur[1]]) & 0xFFFF;
++
++
++#define MT_HASH2_CALC \
++ hash2Value = (p->crc[cur[0]] ^ cur[1]) & (kHash2Size - 1);
++
++#define MT_HASH3_CALC { \
++ UInt32 temp = p->crc[cur[0]] ^ cur[1]; \
++ hash2Value = temp & (kHash2Size - 1); \
++ hash3Value = (temp ^ ((UInt32)cur[2] << 8)) & (kHash3Size - 1); }
++
++#define MT_HASH4_CALC { \
++ UInt32 temp = p->crc[cur[0]] ^ cur[1]; \
++ hash2Value = temp & (kHash2Size - 1); \
++ hash3Value = (temp ^ ((UInt32)cur[2] << 8)) & (kHash3Size - 1); \
++ hash4Value = (temp ^ ((UInt32)cur[2] << 8) ^ (p->crc[cur[3]] << 5)) & (kHash4Size - 1); }
++
++#endif
+--- /dev/null
++++ b/include/linux/lzma/LzmaDec.h
+@@ -0,0 +1,231 @@
++/* LzmaDec.h -- LZMA Decoder
++2009-02-07 : Igor Pavlov : Public domain */
++
++#ifndef __LZMA_DEC_H
++#define __LZMA_DEC_H
++
++#include "Types.h"
++
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/* #define _LZMA_PROB32 */
++/* _LZMA_PROB32 can increase the speed on some CPUs,
++ but memory usage for CLzmaDec::probs will be doubled in that case */
++
++#ifdef _LZMA_PROB32
++#define CLzmaProb UInt32
++#else
++#define CLzmaProb UInt16
++#endif
++
++
++/* ---------- LZMA Properties ---------- */
++
++#define LZMA_PROPS_SIZE 5
++
++typedef struct _CLzmaProps
++{
++ unsigned lc, lp, pb;
++ UInt32 dicSize;
++} CLzmaProps;
++
++/* LzmaProps_Decode - decodes properties
++Returns:
++ SZ_OK
++ SZ_ERROR_UNSUPPORTED - Unsupported properties
++*/
++
++SRes LzmaProps_Decode(CLzmaProps *p, const Byte *data, unsigned size);
++
++
++/* ---------- LZMA Decoder state ---------- */
++
++/* LZMA_REQUIRED_INPUT_MAX = number of required input bytes for worst case.
++ Num bits = log2((2^11 / 31) ^ 22) + 26 < 134 + 26 = 160; */
++
++#define LZMA_REQUIRED_INPUT_MAX 20
++
++typedef struct
++{
++ CLzmaProps prop;
++ CLzmaProb *probs;
++ Byte *dic;
++ const Byte *buf;
++ UInt32 range, code;
++ SizeT dicPos;
++ SizeT dicBufSize;
++ UInt32 processedPos;
++ UInt32 checkDicSize;
++ unsigned state;
++ UInt32 reps[4];
++ unsigned remainLen;
++ int needFlush;
++ int needInitState;
++ UInt32 numProbs;
++ unsigned tempBufSize;
++ Byte tempBuf[LZMA_REQUIRED_INPUT_MAX];
++} CLzmaDec;
++
++#define LzmaDec_Construct(p) { (p)->dic = 0; (p)->probs = 0; }
++
++void LzmaDec_Init(CLzmaDec *p);
++
++/* There are two types of LZMA streams:
++ 0) Stream with end mark. That end mark adds about 6 bytes to compressed size.
++ 1) Stream without end mark. You must know exact uncompressed size to decompress such stream. */
++
++typedef enum
++{
++ LZMA_FINISH_ANY, /* finish at any point */
++ LZMA_FINISH_END /* block must be finished at the end */
++} ELzmaFinishMode;
++
++/* ELzmaFinishMode has meaning only if the decoding reaches output limit !!!
++
++ You must use LZMA_FINISH_END, when you know that current output buffer
++ covers last bytes of block. In other cases you must use LZMA_FINISH_ANY.
++
++ If LZMA decoder sees end marker before reaching output limit, it returns SZ_OK,
++ and output value of destLen will be less than output buffer size limit.
++ You can check status result also.
++
++ You can use multiple checks to test data integrity after full decompression:
++ 1) Check Result and "status" variable.
++ 2) Check that output(destLen) = uncompressedSize, if you know real uncompressedSize.
++ 3) Check that output(srcLen) = compressedSize, if you know real compressedSize.
++ You must use correct finish mode in that case. */
++
++typedef enum
++{
++ LZMA_STATUS_NOT_SPECIFIED, /* use main error code instead */
++ LZMA_STATUS_FINISHED_WITH_MARK, /* stream was finished with end mark. */
++ LZMA_STATUS_NOT_FINISHED, /* stream was not finished */
++ LZMA_STATUS_NEEDS_MORE_INPUT, /* you must provide more input bytes */
++ LZMA_STATUS_MAYBE_FINISHED_WITHOUT_MARK /* there is probability that stream was finished without end mark */
++} ELzmaStatus;
++
++/* ELzmaStatus is used only as output value for function call */
++
++
++/* ---------- Interfaces ---------- */
++
++/* There are 3 levels of interfaces:
++ 1) Dictionary Interface
++ 2) Buffer Interface
++ 3) One Call Interface
++ You can select any of these interfaces, but don't mix functions from different
++ groups for same object. */
++
++
++/* There are two variants to allocate state for Dictionary Interface:
++ 1) LzmaDec_Allocate / LzmaDec_Free
++ 2) LzmaDec_AllocateProbs / LzmaDec_FreeProbs
++ You can use variant 2, if you set dictionary buffer manually.
++ For Buffer Interface you must always use variant 1.
++
++LzmaDec_Allocate* can return:
++ SZ_OK
++ SZ_ERROR_MEM - Memory allocation error
++ SZ_ERROR_UNSUPPORTED - Unsupported properties
++*/
++
++SRes LzmaDec_AllocateProbs(CLzmaDec *p, const Byte *props, unsigned propsSize, ISzAlloc *alloc);
++void LzmaDec_FreeProbs(CLzmaDec *p, ISzAlloc *alloc);
++
++SRes LzmaDec_Allocate(CLzmaDec *state, const Byte *prop, unsigned propsSize, ISzAlloc *alloc);
++void LzmaDec_Free(CLzmaDec *state, ISzAlloc *alloc);
++
++/* ---------- Dictionary Interface ---------- */
++
++/* You can use it, if you want to eliminate the overhead for data copying from
++ dictionary to some other external buffer.
++ You must work with CLzmaDec variables directly in this interface.
++
++ STEPS:
++ LzmaDec_Constr()
++ LzmaDec_Allocate()
++ for (each new stream)
++ {
++ LzmaDec_Init()
++ while (it needs more decompression)
++ {
++ LzmaDec_DecodeToDic()
++ use data from CLzmaDec::dic and update CLzmaDec::dicPos
++ }
++ }
++ LzmaDec_Free()
++*/
++
++/* LzmaDec_DecodeToDic
++
++ The decoding to internal dictionary buffer (CLzmaDec::dic).
++ You must manually update CLzmaDec::dicPos, if it reaches CLzmaDec::dicBufSize !!!
++
++finishMode:
++ It has meaning only if the decoding reaches output limit (dicLimit).
++ LZMA_FINISH_ANY - Decode just dicLimit bytes.
++ LZMA_FINISH_END - Stream must be finished after dicLimit.
++
++Returns:
++ SZ_OK
++ status:
++ LZMA_STATUS_FINISHED_WITH_MARK
++ LZMA_STATUS_NOT_FINISHED
++ LZMA_STATUS_NEEDS_MORE_INPUT
++ LZMA_STATUS_MAYBE_FINISHED_WITHOUT_MARK
++ SZ_ERROR_DATA - Data error
++*/
++
++SRes LzmaDec_DecodeToDic(CLzmaDec *p, SizeT dicLimit,
++ const Byte *src, SizeT *srcLen, ELzmaFinishMode finishMode, ELzmaStatus *status);
++
++
++/* ---------- Buffer Interface ---------- */
++
++/* It's zlib-like interface.
++ See LzmaDec_DecodeToDic description for information about STEPS and return results,
++ but you must use LzmaDec_DecodeToBuf instead of LzmaDec_DecodeToDic and you don't need
++ to work with CLzmaDec variables manually.
++
++finishMode:
++ It has meaning only if the decoding reaches output limit (*destLen).
++ LZMA_FINISH_ANY - Decode just destLen bytes.
++ LZMA_FINISH_END - Stream must be finished after (*destLen).
++*/
++
++SRes LzmaDec_DecodeToBuf(CLzmaDec *p, Byte *dest, SizeT *destLen,
++ const Byte *src, SizeT *srcLen, ELzmaFinishMode finishMode, ELzmaStatus *status);
++
++
++/* ---------- One Call Interface ---------- */
++
++/* LzmaDecode
++
++finishMode:
++ It has meaning only if the decoding reaches output limit (*destLen).
++ LZMA_FINISH_ANY - Decode just destLen bytes.
++ LZMA_FINISH_END - Stream must be finished after (*destLen).
++
++Returns:
++ SZ_OK
++ status:
++ LZMA_STATUS_FINISHED_WITH_MARK
++ LZMA_STATUS_NOT_FINISHED
++ LZMA_STATUS_MAYBE_FINISHED_WITHOUT_MARK
++ SZ_ERROR_DATA - Data error
++ SZ_ERROR_MEM - Memory allocation error
++ SZ_ERROR_UNSUPPORTED - Unsupported properties
++ SZ_ERROR_INPUT_EOF - It needs more bytes in input buffer (src).
++*/
++
++SRes LzmaDecode(Byte *dest, SizeT *destLen, const Byte *src, SizeT *srcLen,
++ const Byte *propData, unsigned propSize, ELzmaFinishMode finishMode,
++ ELzmaStatus *status, ISzAlloc *alloc);
++
++#ifdef __cplusplus
++}
++#endif
++
++#endif
+--- /dev/null
++++ b/include/linux/lzma/LzmaEnc.h
+@@ -0,0 +1,80 @@
++/* LzmaEnc.h -- LZMA Encoder
++2009-02-07 : Igor Pavlov : Public domain */
++
++#ifndef __LZMA_ENC_H
++#define __LZMA_ENC_H
++
++#include "Types.h"
++
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++#define LZMA_PROPS_SIZE 5
++
++typedef struct _CLzmaEncProps
++{
++ int level; /* 0 <= level <= 9 */
++ UInt32 dictSize; /* (1 << 12) <= dictSize <= (1 << 27) for 32-bit version
++ (1 << 12) <= dictSize <= (1 << 30) for 64-bit version
++ default = (1 << 24) */
++ int lc; /* 0 <= lc <= 8, default = 3 */
++ int lp; /* 0 <= lp <= 4, default = 0 */
++ int pb; /* 0 <= pb <= 4, default = 2 */
++ int algo; /* 0 - fast, 1 - normal, default = 1 */
++ int fb; /* 5 <= fb <= 273, default = 32 */
++ int btMode; /* 0 - hashChain Mode, 1 - binTree mode - normal, default = 1 */
++ int numHashBytes; /* 2, 3 or 4, default = 4 */
++ UInt32 mc; /* 1 <= mc <= (1 << 30), default = 32 */
++ unsigned writeEndMark; /* 0 - do not write EOPM, 1 - write EOPM, default = 0 */
++ int numThreads; /* 1 or 2, default = 2 */
++} CLzmaEncProps;
++
++void LzmaEncProps_Init(CLzmaEncProps *p);
++void LzmaEncProps_Normalize(CLzmaEncProps *p);
++UInt32 LzmaEncProps_GetDictSize(const CLzmaEncProps *props2);
++
++
++/* ---------- CLzmaEncHandle Interface ---------- */
++
++/* LzmaEnc_* functions can return the following exit codes:
++Returns:
++ SZ_OK - OK
++ SZ_ERROR_MEM - Memory allocation error
++ SZ_ERROR_PARAM - Incorrect paramater in props
++ SZ_ERROR_WRITE - Write callback error.
++ SZ_ERROR_PROGRESS - some break from progress callback
++ SZ_ERROR_THREAD - errors in multithreading functions (only for Mt version)
++*/
++
++typedef void * CLzmaEncHandle;
++
++CLzmaEncHandle LzmaEnc_Create(ISzAlloc *alloc);
++void LzmaEnc_Destroy(CLzmaEncHandle p, ISzAlloc *alloc, ISzAlloc *allocBig);
++SRes LzmaEnc_SetProps(CLzmaEncHandle p, const CLzmaEncProps *props);
++SRes LzmaEnc_WriteProperties(CLzmaEncHandle p, Byte *properties, SizeT *size);
++SRes LzmaEnc_Encode(CLzmaEncHandle p, ISeqOutStream *outStream, ISeqInStream *inStream,
++ ICompressProgress *progress, ISzAlloc *alloc, ISzAlloc *allocBig);
++SRes LzmaEnc_MemEncode(CLzmaEncHandle p, Byte *dest, SizeT *destLen, const Byte *src, SizeT srcLen,
++ int writeEndMark, ICompressProgress *progress, ISzAlloc *alloc, ISzAlloc *allocBig);
++
++/* ---------- One Call Interface ---------- */
++
++/* LzmaEncode
++Return code:
++ SZ_OK - OK
++ SZ_ERROR_MEM - Memory allocation error
++ SZ_ERROR_PARAM - Incorrect paramater
++ SZ_ERROR_OUTPUT_EOF - output buffer overflow
++ SZ_ERROR_THREAD - errors in multithreading functions (only for Mt version)
++*/
++
++SRes LzmaEncode(Byte *dest, SizeT *destLen, const Byte *src, SizeT srcLen,
++ const CLzmaEncProps *props, Byte *propsEncoded, SizeT *propsSize, int writeEndMark,
++ ICompressProgress *progress, ISzAlloc *alloc, ISzAlloc *allocBig);
++
++#ifdef __cplusplus
++}
++#endif
++
++#endif
+--- /dev/null
++++ b/include/linux/lzma/Types.h
+@@ -0,0 +1,226 @@
++/* Types.h -- Basic types
++2009-11-23 : Igor Pavlov : Public domain */
++
++#ifndef __7Z_TYPES_H
++#define __7Z_TYPES_H
++
++#include <stddef.h>
++
++#ifdef _WIN32
++#include <windows.h>
++#endif
++
++#ifndef EXTERN_C_BEGIN
++#ifdef __cplusplus
++#define EXTERN_C_BEGIN extern "C" {
++#define EXTERN_C_END }
++#else
++#define EXTERN_C_BEGIN
++#define EXTERN_C_END
++#endif
++#endif
++
++EXTERN_C_BEGIN
++
++#define SZ_OK 0
++
++#define SZ_ERROR_DATA 1
++#define SZ_ERROR_MEM 2
++#define SZ_ERROR_CRC 3
++#define SZ_ERROR_UNSUPPORTED 4
++#define SZ_ERROR_PARAM 5
++#define SZ_ERROR_INPUT_EOF 6
++#define SZ_ERROR_OUTPUT_EOF 7
++#define SZ_ERROR_READ 8
++#define SZ_ERROR_WRITE 9
++#define SZ_ERROR_PROGRESS 10
++#define SZ_ERROR_FAIL 11
++#define SZ_ERROR_THREAD 12
++
++#define SZ_ERROR_ARCHIVE 16
++#define SZ_ERROR_NO_ARCHIVE 17
++
++typedef int SRes;
++
++#ifdef _WIN32
++typedef DWORD WRes;
++#else
++typedef int WRes;
++#endif
++
++#ifndef RINOK
++#define RINOK(x) { int __result__ = (x); if (__result__ != 0) return __result__; }
++#endif
++
++typedef unsigned char Byte;
++typedef short Int16;
++typedef unsigned short UInt16;
++
++#ifdef _LZMA_UINT32_IS_ULONG
++typedef long Int32;
++typedef unsigned long UInt32;
++#else
++typedef int Int32;
++typedef unsigned int UInt32;
++#endif
++
++#ifdef _SZ_NO_INT_64
++
++/* define _SZ_NO_INT_64, if your compiler doesn't support 64-bit integers.
++ NOTES: Some code will work incorrectly in that case! */
++
++typedef long Int64;
++typedef unsigned long UInt64;
++
++#else
++
++#if defined(_MSC_VER) || defined(__BORLANDC__)
++typedef __int64 Int64;
++typedef unsigned __int64 UInt64;
++#else
++typedef long long int Int64;
++typedef unsigned long long int UInt64;
++#endif
++
++#endif
++
++#ifdef _LZMA_NO_SYSTEM_SIZE_T
++typedef UInt32 SizeT;
++#else
++typedef size_t SizeT;
++#endif
++
++typedef int Bool;
++#define True 1
++#define False 0
++
++
++#ifdef _WIN32
++#define MY_STD_CALL __stdcall
++#else
++#define MY_STD_CALL
++#endif
++
++#ifdef _MSC_VER
++
++#if _MSC_VER >= 1300
++#define MY_NO_INLINE __declspec(noinline)
++#else
++#define MY_NO_INLINE
++#endif
++
++#define MY_CDECL __cdecl
++#define MY_FAST_CALL __fastcall
++
++#else
++
++#define MY_CDECL
++#define MY_FAST_CALL
++
++#endif
++
++
++/* The following interfaces use first parameter as pointer to structure */
++
++typedef struct
++{
++ SRes (*Read)(void *p, void *buf, size_t *size);
++ /* if (input(*size) != 0 && output(*size) == 0) means end_of_stream.
++ (output(*size) < input(*size)) is allowed */
++} ISeqInStream;
++
++/* it can return SZ_ERROR_INPUT_EOF */
++SRes SeqInStream_Read(ISeqInStream *stream, void *buf, size_t size);
++SRes SeqInStream_Read2(ISeqInStream *stream, void *buf, size_t size, SRes errorType);
++SRes SeqInStream_ReadByte(ISeqInStream *stream, Byte *buf);
++
++typedef struct
++{
++ size_t (*Write)(void *p, const void *buf, size_t size);
++ /* Returns: result - the number of actually written bytes.
++ (result < size) means error */
++} ISeqOutStream;
++
++typedef enum
++{
++ SZ_SEEK_SET = 0,
++ SZ_SEEK_CUR = 1,
++ SZ_SEEK_END = 2
++} ESzSeek;
++
++typedef struct
++{
++ SRes (*Read)(void *p, void *buf, size_t *size); /* same as ISeqInStream::Read */
++ SRes (*Seek)(void *p, Int64 *pos, ESzSeek origin);
++} ISeekInStream;
++
++typedef struct
++{
++ SRes (*Look)(void *p, void **buf, size_t *size);
++ /* if (input(*size) != 0 && output(*size) == 0) means end_of_stream.
++ (output(*size) > input(*size)) is not allowed
++ (output(*size) < input(*size)) is allowed */
++ SRes (*Skip)(void *p, size_t offset);
++ /* offset must be <= output(*size) of Look */
++
++ SRes (*Read)(void *p, void *buf, size_t *size);
++ /* reads directly (without buffer). It's same as ISeqInStream::Read */
++ SRes (*Seek)(void *p, Int64 *pos, ESzSeek origin);
++} ILookInStream;
++
++SRes LookInStream_LookRead(ILookInStream *stream, void *buf, size_t *size);
++SRes LookInStream_SeekTo(ILookInStream *stream, UInt64 offset);
++
++/* reads via ILookInStream::Read */
++SRes LookInStream_Read2(ILookInStream *stream, void *buf, size_t size, SRes errorType);
++SRes LookInStream_Read(ILookInStream *stream, void *buf, size_t size);
++
++#define LookToRead_BUF_SIZE (1 << 14)
++
++typedef struct
++{
++ ILookInStream s;
++ ISeekInStream *realStream;
++ size_t pos;
++ size_t size;
++ Byte buf[LookToRead_BUF_SIZE];
++} CLookToRead;
++
++void LookToRead_CreateVTable(CLookToRead *p, int lookahead);
++void LookToRead_Init(CLookToRead *p);
++
++typedef struct
++{
++ ISeqInStream s;
++ ILookInStream *realStream;
++} CSecToLook;
++
++void SecToLook_CreateVTable(CSecToLook *p);
++
++typedef struct
++{
++ ISeqInStream s;
++ ILookInStream *realStream;
++} CSecToRead;
++
++void SecToRead_CreateVTable(CSecToRead *p);
++
++typedef struct
++{
++ SRes (*Progress)(void *p, UInt64 inSize, UInt64 outSize);
++ /* Returns: result. (result != SZ_OK) means break.
++ Value (UInt64)(Int64)-1 for size means unknown value. */
++} ICompressProgress;
++
++typedef struct
++{
++ void *(*Alloc)(void *p, size_t size);
++ void (*Free)(void *p, void *address); /* address can be 0 */
++} ISzAlloc;
++
++#define IAlloc_Alloc(p, size) (p)->Alloc((p), size)
++#define IAlloc_Free(p, a) (p)->Free((p), a)
++
++EXTERN_C_END
++
++#endif
+--- a/include/uapi/linux/jffs2.h
++++ b/include/uapi/linux/jffs2.h
+@@ -46,6 +46,7 @@
+ #define JFFS2_COMPR_DYNRUBIN 0x05
+ #define JFFS2_COMPR_ZLIB 0x06
+ #define JFFS2_COMPR_LZO 0x07
++#define JFFS2_COMPR_LZMA 0x08
+ /* Compatibility flags. */
+ #define JFFS2_COMPAT_MASK 0xc000 /* What do to if an unknown nodetype is found */
+ #define JFFS2_NODE_ACCURATE 0x2000
+--- a/lib/Kconfig
++++ b/lib/Kconfig
+@@ -278,6 +278,12 @@ config ZSTD_DECOMPRESS
+
+ source "lib/xz/Kconfig"
+
++config LZMA_COMPRESS
++ tristate
++
++config LZMA_DECOMPRESS
++ tristate
++
+ #
+ # These all provide a common interface (hence the apparent duplication with
+ # ZLIB_INFLATE; DECOMPRESS_GZIP is just a wrapper.)
+--- a/lib/Makefile
++++ b/lib/Makefile
+@@ -3,6 +3,16 @@
+ # Makefile for some libs needed in the kernel.
+ #
+
++ifdef CONFIG_JFFS2_ZLIB
++ CONFIG_ZLIB_INFLATE:=y
++ CONFIG_ZLIB_DEFLATE:=y
++endif
++
++ifdef CONFIG_JFFS2_LZMA
++ CONFIG_LZMA_DECOMPRESS:=y
++ CONFIG_LZMA_COMPRESS:=y
++endif
++
+ ifdef CONFIG_FUNCTION_TRACER
+ ORIG_CFLAGS := $(KBUILD_CFLAGS)
+ KBUILD_CFLAGS = $(subst $(CC_FLAGS_FTRACE),,$(ORIG_CFLAGS))
+@@ -139,6 +149,8 @@ obj-$(CONFIG_ZSTD_COMPRESS) += zstd/
+ obj-$(CONFIG_ZSTD_DECOMPRESS) += zstd/
+ obj-$(CONFIG_XZ_DEC) += xz/
+ obj-$(CONFIG_RAID6_PQ) += raid6/
++obj-$(CONFIG_LZMA_COMPRESS) += lzma/
++obj-$(CONFIG_LZMA_DECOMPRESS) += lzma/
+
+ lib-$(CONFIG_DECOMPRESS_GZIP) += decompress_inflate.o
+ lib-$(CONFIG_DECOMPRESS_BZIP2) += decompress_bunzip2.o
+--- /dev/null
++++ b/lib/lzma/LzFind.c
+@@ -0,0 +1,761 @@
++/* LzFind.c -- Match finder for LZ algorithms
++2009-04-22 : Igor Pavlov : Public domain */
++
++#include <string.h>
++
++#include "LzFind.h"
++#include "LzHash.h"
++
++#define kEmptyHashValue 0
++#define kMaxValForNormalize ((UInt32)0xFFFFFFFF)
++#define kNormalizeStepMin (1 << 10) /* it must be power of 2 */
++#define kNormalizeMask (~(kNormalizeStepMin - 1))
++#define kMaxHistorySize ((UInt32)3 << 30)
++
++#define kStartMaxLen 3
++
++static void LzInWindow_Free(CMatchFinder *p, ISzAlloc *alloc)
++{
++ if (!p->directInput)
++ {
++ alloc->Free(alloc, p->bufferBase);
++ p->bufferBase = 0;
++ }
++}
++
++/* keepSizeBefore + keepSizeAfter + keepSizeReserv must be < 4G) */
++
++static int LzInWindow_Create(CMatchFinder *p, UInt32 keepSizeReserv, ISzAlloc *alloc)
++{
++ UInt32 blockSize = p->keepSizeBefore + p->keepSizeAfter + keepSizeReserv;
++ if (p->directInput)
++ {
++ p->blockSize = blockSize;
++ return 1;
++ }
++ if (p->bufferBase == 0 || p->blockSize != blockSize)
++ {
++ LzInWindow_Free(p, alloc);
++ p->blockSize = blockSize;
++ p->bufferBase = (Byte *)alloc->Alloc(alloc, (size_t)blockSize);
++ }
++ return (p->bufferBase != 0);
++}
++
++Byte *MatchFinder_GetPointerToCurrentPos(CMatchFinder *p) { return p->buffer; }
++Byte MatchFinder_GetIndexByte(CMatchFinder *p, Int32 index) { return p->buffer[index]; }
++
++UInt32 MatchFinder_GetNumAvailableBytes(CMatchFinder *p) { return p->streamPos - p->pos; }
++
++void MatchFinder_ReduceOffsets(CMatchFinder *p, UInt32 subValue)
++{
++ p->posLimit -= subValue;
++ p->pos -= subValue;
++ p->streamPos -= subValue;
++}
++
++static void MatchFinder_ReadBlock(CMatchFinder *p)
++{
++ if (p->streamEndWasReached || p->result != SZ_OK)
++ return;
++ if (p->directInput)
++ {
++ UInt32 curSize = 0xFFFFFFFF - p->streamPos;
++ if (curSize > p->directInputRem)
++ curSize = (UInt32)p->directInputRem;
++ p->directInputRem -= curSize;
++ p->streamPos += curSize;
++ if (p->directInputRem == 0)
++ p->streamEndWasReached = 1;
++ return;
++ }
++ for (;;)
++ {
++ Byte *dest = p->buffer + (p->streamPos - p->pos);
++ size_t size = (p->bufferBase + p->blockSize - dest);
++ if (size == 0)
++ return;
++ p->result = p->stream->Read(p->stream, dest, &size);
++ if (p->result != SZ_OK)
++ return;
++ if (size == 0)
++ {
++ p->streamEndWasReached = 1;
++ return;
++ }
++ p->streamPos += (UInt32)size;
++ if (p->streamPos - p->pos > p->keepSizeAfter)
++ return;
++ }
++}
++
++void MatchFinder_MoveBlock(CMatchFinder *p)
++{
++ memmove(p->bufferBase,
++ p->buffer - p->keepSizeBefore,
++ (size_t)(p->streamPos - p->pos + p->keepSizeBefore));
++ p->buffer = p->bufferBase + p->keepSizeBefore;
++}
++
++int MatchFinder_NeedMove(CMatchFinder *p)
++{
++ if (p->directInput)
++ return 0;
++ /* if (p->streamEndWasReached) return 0; */
++ return ((size_t)(p->bufferBase + p->blockSize - p->buffer) <= p->keepSizeAfter);
++}
++
++void MatchFinder_ReadIfRequired(CMatchFinder *p)
++{
++ if (p->streamEndWasReached)
++ return;
++ if (p->keepSizeAfter >= p->streamPos - p->pos)
++ MatchFinder_ReadBlock(p);
++}
++
++static void MatchFinder_CheckAndMoveAndRead(CMatchFinder *p)
++{
++ if (MatchFinder_NeedMove(p))
++ MatchFinder_MoveBlock(p);
++ MatchFinder_ReadBlock(p);
++}
++
++static void MatchFinder_SetDefaultSettings(CMatchFinder *p)
++{
++ p->cutValue = 32;
++ p->btMode = 1;
++ p->numHashBytes = 4;
++ p->bigHash = 0;
++}
++
++#define kCrcPoly 0xEDB88320
++
++void MatchFinder_Construct(CMatchFinder *p)
++{
++ UInt32 i;
++ p->bufferBase = 0;
++ p->directInput = 0;
++ p->hash = 0;
++ MatchFinder_SetDefaultSettings(p);
++
++ for (i = 0; i < 256; i++)
++ {
++ UInt32 r = i;
++ int j;
++ for (j = 0; j < 8; j++)
++ r = (r >> 1) ^ (kCrcPoly & ~((r & 1) - 1));
++ p->crc[i] = r;
++ }
++}
++
++static void MatchFinder_FreeThisClassMemory(CMatchFinder *p, ISzAlloc *alloc)
++{
++ alloc->Free(alloc, p->hash);
++ p->hash = 0;
++}
++
++void MatchFinder_Free(CMatchFinder *p, ISzAlloc *alloc)
++{
++ MatchFinder_FreeThisClassMemory(p, alloc);
++ LzInWindow_Free(p, alloc);
++}
++
++static CLzRef* AllocRefs(UInt32 num, ISzAlloc *alloc)
++{
++ size_t sizeInBytes = (size_t)num * sizeof(CLzRef);
++ if (sizeInBytes / sizeof(CLzRef) != num)
++ return 0;
++ return (CLzRef *)alloc->Alloc(alloc, sizeInBytes);
++}
++
++int MatchFinder_Create(CMatchFinder *p, UInt32 historySize,
++ UInt32 keepAddBufferBefore, UInt32 matchMaxLen, UInt32 keepAddBufferAfter,
++ ISzAlloc *alloc)
++{
++ UInt32 sizeReserv;
++ if (historySize > kMaxHistorySize)
++ {
++ MatchFinder_Free(p, alloc);
++ return 0;
++ }
++ sizeReserv = historySize >> 1;
++ if (historySize > ((UInt32)2 << 30))
++ sizeReserv = historySize >> 2;
++ sizeReserv += (keepAddBufferBefore + matchMaxLen + keepAddBufferAfter) / 2 + (1 << 19);
++
++ p->keepSizeBefore = historySize + keepAddBufferBefore + 1;
++ p->keepSizeAfter = matchMaxLen + keepAddBufferAfter;
++ /* we need one additional byte, since we use MoveBlock after pos++ and before dictionary using */
++ if (LzInWindow_Create(p, sizeReserv, alloc))
++ {
++ UInt32 newCyclicBufferSize = historySize + 1;
++ UInt32 hs;
++ p->matchMaxLen = matchMaxLen;
++ {
++ p->fixedHashSize = 0;
++ if (p->numHashBytes == 2)
++ hs = (1 << 16) - 1;
++ else
++ {
++ hs = historySize - 1;
++ hs |= (hs >> 1);
++ hs |= (hs >> 2);
++ hs |= (hs >> 4);
++ hs |= (hs >> 8);
++ hs >>= 1;
++ hs |= 0xFFFF; /* don't change it! It's required for Deflate */
++ if (hs > (1 << 24))
++ {
++ if (p->numHashBytes == 3)
++ hs = (1 << 24) - 1;
++ else
++ hs >>= 1;
++ }
++ }
++ p->hashMask = hs;
++ hs++;
++ if (p->numHashBytes > 2) p->fixedHashSize += kHash2Size;
++ if (p->numHashBytes > 3) p->fixedHashSize += kHash3Size;
++ if (p->numHashBytes > 4) p->fixedHashSize += kHash4Size;
++ hs += p->fixedHashSize;
++ }
++
++ {
++ UInt32 prevSize = p->hashSizeSum + p->numSons;
++ UInt32 newSize;
++ p->historySize = historySize;
++ p->hashSizeSum = hs;
++ p->cyclicBufferSize = newCyclicBufferSize;
++ p->numSons = (p->btMode ? newCyclicBufferSize * 2 : newCyclicBufferSize);
++ newSize = p->hashSizeSum + p->numSons;
++ if (p->hash != 0 && prevSize == newSize)
++ return 1;
++ MatchFinder_FreeThisClassMemory(p, alloc);
++ p->hash = AllocRefs(newSize, alloc);
++ if (p->hash != 0)
++ {
++ p->son = p->hash + p->hashSizeSum;
++ return 1;
++ }
++ }
++ }
++ MatchFinder_Free(p, alloc);
++ return 0;
++}
++
++static void MatchFinder_SetLimits(CMatchFinder *p)
++{
++ UInt32 limit = kMaxValForNormalize - p->pos;
++ UInt32 limit2 = p->cyclicBufferSize - p->cyclicBufferPos;
++ if (limit2 < limit)
++ limit = limit2;
++ limit2 = p->streamPos - p->pos;
++ if (limit2 <= p->keepSizeAfter)
++ {
++ if (limit2 > 0)
++ limit2 = 1;
++ }
++ else
++ limit2 -= p->keepSizeAfter;
++ if (limit2 < limit)
++ limit = limit2;
++ {
++ UInt32 lenLimit = p->streamPos - p->pos;
++ if (lenLimit > p->matchMaxLen)
++ lenLimit = p->matchMaxLen;
++ p->lenLimit = lenLimit;
++ }
++ p->posLimit = p->pos + limit;
++}
++
++void MatchFinder_Init(CMatchFinder *p)
++{
++ UInt32 i;
++ for (i = 0; i < p->hashSizeSum; i++)
++ p->hash[i] = kEmptyHashValue;
++ p->cyclicBufferPos = 0;
++ p->buffer = p->bufferBase;
++ p->pos = p->streamPos = p->cyclicBufferSize;
++ p->result = SZ_OK;
++ p->streamEndWasReached = 0;
++ MatchFinder_ReadBlock(p);
++ MatchFinder_SetLimits(p);
++}
++
++static UInt32 MatchFinder_GetSubValue(CMatchFinder *p)
++{
++ return (p->pos - p->historySize - 1) & kNormalizeMask;
++}
++
++void MatchFinder_Normalize3(UInt32 subValue, CLzRef *items, UInt32 numItems)
++{
++ UInt32 i;
++ for (i = 0; i < numItems; i++)
++ {
++ UInt32 value = items[i];
++ if (value <= subValue)
++ value = kEmptyHashValue;
++ else
++ value -= subValue;
++ items[i] = value;
++ }
++}
++
++static void MatchFinder_Normalize(CMatchFinder *p)
++{
++ UInt32 subValue = MatchFinder_GetSubValue(p);
++ MatchFinder_Normalize3(subValue, p->hash, p->hashSizeSum + p->numSons);
++ MatchFinder_ReduceOffsets(p, subValue);
++}
++
++static void MatchFinder_CheckLimits(CMatchFinder *p)
++{
++ if (p->pos == kMaxValForNormalize)
++ MatchFinder_Normalize(p);
++ if (!p->streamEndWasReached && p->keepSizeAfter == p->streamPos - p->pos)
++ MatchFinder_CheckAndMoveAndRead(p);
++ if (p->cyclicBufferPos == p->cyclicBufferSize)
++ p->cyclicBufferPos = 0;
++ MatchFinder_SetLimits(p);
++}
++
++static UInt32 * Hc_GetMatchesSpec(UInt32 lenLimit, UInt32 curMatch, UInt32 pos, const Byte *cur, CLzRef *son,
++ UInt32 _cyclicBufferPos, UInt32 _cyclicBufferSize, UInt32 cutValue,
++ UInt32 *distances, UInt32 maxLen)
++{
++ son[_cyclicBufferPos] = curMatch;
++ for (;;)
++ {
++ UInt32 delta = pos - curMatch;
++ if (cutValue-- == 0 || delta >= _cyclicBufferSize)
++ return distances;
++ {
++ const Byte *pb = cur - delta;
++ curMatch = son[_cyclicBufferPos - delta + ((delta > _cyclicBufferPos) ? _cyclicBufferSize : 0)];
++ if (pb[maxLen] == cur[maxLen] && *pb == *cur)
++ {
++ UInt32 len = 0;
++ while (++len != lenLimit)
++ if (pb[len] != cur[len])
++ break;
++ if (maxLen < len)
++ {
++ *distances++ = maxLen = len;
++ *distances++ = delta - 1;
++ if (len == lenLimit)
++ return distances;
++ }
++ }
++ }
++ }
++}
++
++UInt32 * GetMatchesSpec1(UInt32 lenLimit, UInt32 curMatch, UInt32 pos, const Byte *cur, CLzRef *son,
++ UInt32 _cyclicBufferPos, UInt32 _cyclicBufferSize, UInt32 cutValue,
++ UInt32 *distances, UInt32 maxLen)
++{
++ CLzRef *ptr0 = son + (_cyclicBufferPos << 1) + 1;
++ CLzRef *ptr1 = son + (_cyclicBufferPos << 1);
++ UInt32 len0 = 0, len1 = 0;
++ for (;;)
++ {
++ UInt32 delta = pos - curMatch;
++ if (cutValue-- == 0 || delta >= _cyclicBufferSize)
++ {
++ *ptr0 = *ptr1 = kEmptyHashValue;
++ return distances;
++ }
++ {
++ CLzRef *pair = son + ((_cyclicBufferPos - delta + ((delta > _cyclicBufferPos) ? _cyclicBufferSize : 0)) << 1);
++ const Byte *pb = cur - delta;
++ UInt32 len = (len0 < len1 ? len0 : len1);
++ if (pb[len] == cur[len])
++ {
++ if (++len != lenLimit && pb[len] == cur[len])
++ while (++len != lenLimit)
++ if (pb[len] != cur[len])
++ break;
++ if (maxLen < len)
++ {
++ *distances++ = maxLen = len;
++ *distances++ = delta - 1;
++ if (len == lenLimit)
++ {
++ *ptr1 = pair[0];
++ *ptr0 = pair[1];
++ return distances;
++ }
++ }
++ }
++ if (pb[len] < cur[len])
++ {
++ *ptr1 = curMatch;
++ ptr1 = pair + 1;
++ curMatch = *ptr1;
++ len1 = len;
++ }
++ else
++ {
++ *ptr0 = curMatch;
++ ptr0 = pair;
++ curMatch = *ptr0;
++ len0 = len;
++ }
++ }
++ }
++}
++
++static void SkipMatchesSpec(UInt32 lenLimit, UInt32 curMatch, UInt32 pos, const Byte *cur, CLzRef *son,
++ UInt32 _cyclicBufferPos, UInt32 _cyclicBufferSize, UInt32 cutValue)
++{
++ CLzRef *ptr0 = son + (_cyclicBufferPos << 1) + 1;
++ CLzRef *ptr1 = son + (_cyclicBufferPos << 1);
++ UInt32 len0 = 0, len1 = 0;
++ for (;;)
++ {
++ UInt32 delta = pos - curMatch;
++ if (cutValue-- == 0 || delta >= _cyclicBufferSize)
++ {
++ *ptr0 = *ptr1 = kEmptyHashValue;
++ return;
++ }
++ {
++ CLzRef *pair = son + ((_cyclicBufferPos - delta + ((delta > _cyclicBufferPos) ? _cyclicBufferSize : 0)) << 1);
++ const Byte *pb = cur - delta;
++ UInt32 len = (len0 < len1 ? len0 : len1);
++ if (pb[len] == cur[len])
++ {
++ while (++len != lenLimit)
++ if (pb[len] != cur[len])
++ break;
++ {
++ if (len == lenLimit)
++ {
++ *ptr1 = pair[0];
++ *ptr0 = pair[1];
++ return;
++ }
++ }
++ }
++ if (pb[len] < cur[len])
++ {
++ *ptr1 = curMatch;
++ ptr1 = pair + 1;
++ curMatch = *ptr1;
++ len1 = len;
++ }
++ else
++ {
++ *ptr0 = curMatch;
++ ptr0 = pair;
++ curMatch = *ptr0;
++ len0 = len;
++ }
++ }
++ }
++}
++
++#define MOVE_POS \
++ ++p->cyclicBufferPos; \
++ p->buffer++; \
++ if (++p->pos == p->posLimit) MatchFinder_CheckLimits(p);
++
++#define MOVE_POS_RET MOVE_POS return offset;
++
++static void MatchFinder_MovePos(CMatchFinder *p) { MOVE_POS; }
++
++#define GET_MATCHES_HEADER2(minLen, ret_op) \
++ UInt32 lenLimit; UInt32 hashValue; const Byte *cur; UInt32 curMatch; \
++ lenLimit = p->lenLimit; { if (lenLimit < minLen) { MatchFinder_MovePos(p); ret_op; }} \
++ cur = p->buffer;
++
++#define GET_MATCHES_HEADER(minLen) GET_MATCHES_HEADER2(minLen, return 0)
++#define SKIP_HEADER(minLen) GET_MATCHES_HEADER2(minLen, continue)
++
++#define MF_PARAMS(p) p->pos, p->buffer, p->son, p->cyclicBufferPos, p->cyclicBufferSize, p->cutValue
++
++#define GET_MATCHES_FOOTER(offset, maxLen) \
++ offset = (UInt32)(GetMatchesSpec1(lenLimit, curMatch, MF_PARAMS(p), \
++ distances + offset, maxLen) - distances); MOVE_POS_RET;
++
++#define SKIP_FOOTER \
++ SkipMatchesSpec(lenLimit, curMatch, MF_PARAMS(p)); MOVE_POS;
++
++static UInt32 Bt2_MatchFinder_GetMatches(CMatchFinder *p, UInt32 *distances)
++{
++ UInt32 offset;
++ GET_MATCHES_HEADER(2)
++ HASH2_CALC;
++ curMatch = p->hash[hashValue];
++ p->hash[hashValue] = p->pos;
++ offset = 0;
++ GET_MATCHES_FOOTER(offset, 1)
++}
++
++UInt32 Bt3Zip_MatchFinder_GetMatches(CMatchFinder *p, UInt32 *distances)
++{
++ UInt32 offset;
++ GET_MATCHES_HEADER(3)
++ HASH_ZIP_CALC;
++ curMatch = p->hash[hashValue];
++ p->hash[hashValue] = p->pos;
++ offset = 0;
++ GET_MATCHES_FOOTER(offset, 2)
++}
++
++static UInt32 Bt3_MatchFinder_GetMatches(CMatchFinder *p, UInt32 *distances)
++{
++ UInt32 hash2Value, delta2, maxLen, offset;
++ GET_MATCHES_HEADER(3)
++
++ HASH3_CALC;
++
++ delta2 = p->pos - p->hash[hash2Value];
++ curMatch = p->hash[kFix3HashSize + hashValue];
++
++ p->hash[hash2Value] =
++ p->hash[kFix3HashSize + hashValue] = p->pos;
++
++
++ maxLen = 2;
++ offset = 0;
++ if (delta2 < p->cyclicBufferSize && *(cur - delta2) == *cur)
++ {
++ for (; maxLen != lenLimit; maxLen++)
++ if (cur[(ptrdiff_t)maxLen - delta2] != cur[maxLen])
++ break;
++ distances[0] = maxLen;
++ distances[1] = delta2 - 1;
++ offset = 2;
++ if (maxLen == lenLimit)
++ {
++ SkipMatchesSpec(lenLimit, curMatch, MF_PARAMS(p));
++ MOVE_POS_RET;
++ }
++ }
++ GET_MATCHES_FOOTER(offset, maxLen)
++}
++
++static UInt32 Bt4_MatchFinder_GetMatches(CMatchFinder *p, UInt32 *distances)
++{
++ UInt32 hash2Value, hash3Value, delta2, delta3, maxLen, offset;
++ GET_MATCHES_HEADER(4)
++
++ HASH4_CALC;
++
++ delta2 = p->pos - p->hash[ hash2Value];
++ delta3 = p->pos - p->hash[kFix3HashSize + hash3Value];
++ curMatch = p->hash[kFix4HashSize + hashValue];
++
++ p->hash[ hash2Value] =
++ p->hash[kFix3HashSize + hash3Value] =
++ p->hash[kFix4HashSize + hashValue] = p->pos;
++
++ maxLen = 1;
++ offset = 0;
++ if (delta2 < p->cyclicBufferSize && *(cur - delta2) == *cur)
++ {
++ distances[0] = maxLen = 2;
++ distances[1] = delta2 - 1;
++ offset = 2;
++ }
++ if (delta2 != delta3 && delta3 < p->cyclicBufferSize && *(cur - delta3) == *cur)
++ {
++ maxLen = 3;
++ distances[offset + 1] = delta3 - 1;
++ offset += 2;
++ delta2 = delta3;
++ }
++ if (offset != 0)
++ {
++ for (; maxLen != lenLimit; maxLen++)
++ if (cur[(ptrdiff_t)maxLen - delta2] != cur[maxLen])
++ break;
++ distances[offset - 2] = maxLen;
++ if (maxLen == lenLimit)
++ {
++ SkipMatchesSpec(lenLimit, curMatch, MF_PARAMS(p));
++ MOVE_POS_RET;
++ }
++ }
++ if (maxLen < 3)
++ maxLen = 3;
++ GET_MATCHES_FOOTER(offset, maxLen)
++}
++
++static UInt32 Hc4_MatchFinder_GetMatches(CMatchFinder *p, UInt32 *distances)
++{
++ UInt32 hash2Value, hash3Value, delta2, delta3, maxLen, offset;
++ GET_MATCHES_HEADER(4)
++
++ HASH4_CALC;
++
++ delta2 = p->pos - p->hash[ hash2Value];
++ delta3 = p->pos - p->hash[kFix3HashSize + hash3Value];
++ curMatch = p->hash[kFix4HashSize + hashValue];
++
++ p->hash[ hash2Value] =
++ p->hash[kFix3HashSize + hash3Value] =
++ p->hash[kFix4HashSize + hashValue] = p->pos;
++
++ maxLen = 1;
++ offset = 0;
++ if (delta2 < p->cyclicBufferSize && *(cur - delta2) == *cur)
++ {
++ distances[0] = maxLen = 2;
++ distances[1] = delta2 - 1;
++ offset = 2;
++ }
++ if (delta2 != delta3 && delta3 < p->cyclicBufferSize && *(cur - delta3) == *cur)
++ {
++ maxLen = 3;
++ distances[offset + 1] = delta3 - 1;
++ offset += 2;
++ delta2 = delta3;
++ }
++ if (offset != 0)
++ {
++ for (; maxLen != lenLimit; maxLen++)
++ if (cur[(ptrdiff_t)maxLen - delta2] != cur[maxLen])
++ break;
++ distances[offset - 2] = maxLen;
++ if (maxLen == lenLimit)
++ {
++ p->son[p->cyclicBufferPos] = curMatch;
++ MOVE_POS_RET;
++ }
++ }
++ if (maxLen < 3)
++ maxLen = 3;
++ offset = (UInt32)(Hc_GetMatchesSpec(lenLimit, curMatch, MF_PARAMS(p),
++ distances + offset, maxLen) - (distances));
++ MOVE_POS_RET
++}
++
++UInt32 Hc3Zip_MatchFinder_GetMatches(CMatchFinder *p, UInt32 *distances)
++{
++ UInt32 offset;
++ GET_MATCHES_HEADER(3)
++ HASH_ZIP_CALC;
++ curMatch = p->hash[hashValue];
++ p->hash[hashValue] = p->pos;
++ offset = (UInt32)(Hc_GetMatchesSpec(lenLimit, curMatch, MF_PARAMS(p),
++ distances, 2) - (distances));
++ MOVE_POS_RET
++}
++
++static void Bt2_MatchFinder_Skip(CMatchFinder *p, UInt32 num)
++{
++ do
++ {
++ SKIP_HEADER(2)
++ HASH2_CALC;
++ curMatch = p->hash[hashValue];
++ p->hash[hashValue] = p->pos;
++ SKIP_FOOTER
++ }
++ while (--num != 0);
++}
++
++void Bt3Zip_MatchFinder_Skip(CMatchFinder *p, UInt32 num)
++{
++ do
++ {
++ SKIP_HEADER(3)
++ HASH_ZIP_CALC;
++ curMatch = p->hash[hashValue];
++ p->hash[hashValue] = p->pos;
++ SKIP_FOOTER
++ }
++ while (--num != 0);
++}
++
++static void Bt3_MatchFinder_Skip(CMatchFinder *p, UInt32 num)
++{
++ do
++ {
++ UInt32 hash2Value;
++ SKIP_HEADER(3)
++ HASH3_CALC;
++ curMatch = p->hash[kFix3HashSize + hashValue];
++ p->hash[hash2Value] =
++ p->hash[kFix3HashSize + hashValue] = p->pos;
++ SKIP_FOOTER
++ }
++ while (--num != 0);
++}
++
++static void Bt4_MatchFinder_Skip(CMatchFinder *p, UInt32 num)
++{
++ do
++ {
++ UInt32 hash2Value, hash3Value;
++ SKIP_HEADER(4)
++ HASH4_CALC;
++ curMatch = p->hash[kFix4HashSize + hashValue];
++ p->hash[ hash2Value] =
++ p->hash[kFix3HashSize + hash3Value] = p->pos;
++ p->hash[kFix4HashSize + hashValue] = p->pos;
++ SKIP_FOOTER
++ }
++ while (--num != 0);
++}
++
++static void Hc4_MatchFinder_Skip(CMatchFinder *p, UInt32 num)
++{
++ do
++ {
++ UInt32 hash2Value, hash3Value;
++ SKIP_HEADER(4)
++ HASH4_CALC;
++ curMatch = p->hash[kFix4HashSize + hashValue];
++ p->hash[ hash2Value] =
++ p->hash[kFix3HashSize + hash3Value] =
++ p->hash[kFix4HashSize + hashValue] = p->pos;
++ p->son[p->cyclicBufferPos] = curMatch;
++ MOVE_POS
++ }
++ while (--num != 0);
++}
++
++void Hc3Zip_MatchFinder_Skip(CMatchFinder *p, UInt32 num)
++{
++ do
++ {
++ SKIP_HEADER(3)
++ HASH_ZIP_CALC;
++ curMatch = p->hash[hashValue];
++ p->hash[hashValue] = p->pos;
++ p->son[p->cyclicBufferPos] = curMatch;
++ MOVE_POS
++ }
++ while (--num != 0);
++}
++
++void MatchFinder_CreateVTable(CMatchFinder *p, IMatchFinder *vTable)
++{
++ vTable->Init = (Mf_Init_Func)MatchFinder_Init;
++ vTable->GetIndexByte = (Mf_GetIndexByte_Func)MatchFinder_GetIndexByte;
++ vTable->GetNumAvailableBytes = (Mf_GetNumAvailableBytes_Func)MatchFinder_GetNumAvailableBytes;
++ vTable->GetPointerToCurrentPos = (Mf_GetPointerToCurrentPos_Func)MatchFinder_GetPointerToCurrentPos;
++ if (!p->btMode)
++ {
++ vTable->GetMatches = (Mf_GetMatches_Func)Hc4_MatchFinder_GetMatches;
++ vTable->Skip = (Mf_Skip_Func)Hc4_MatchFinder_Skip;
++ }
++ else if (p->numHashBytes == 2)
++ {
++ vTable->GetMatches = (Mf_GetMatches_Func)Bt2_MatchFinder_GetMatches;
++ vTable->Skip = (Mf_Skip_Func)Bt2_MatchFinder_Skip;
++ }
++ else if (p->numHashBytes == 3)
++ {
++ vTable->GetMatches = (Mf_GetMatches_Func)Bt3_MatchFinder_GetMatches;
++ vTable->Skip = (Mf_Skip_Func)Bt3_MatchFinder_Skip;
++ }
++ else
++ {
++ vTable->GetMatches = (Mf_GetMatches_Func)Bt4_MatchFinder_GetMatches;
++ vTable->Skip = (Mf_Skip_Func)Bt4_MatchFinder_Skip;
++ }
++}
+--- /dev/null
++++ b/lib/lzma/LzmaDec.c
+@@ -0,0 +1,999 @@
++/* LzmaDec.c -- LZMA Decoder
++2009-09-20 : Igor Pavlov : Public domain */
++
++#include "LzmaDec.h"
++
++#include <string.h>
++
++#define kNumTopBits 24
++#define kTopValue ((UInt32)1 << kNumTopBits)
++
++#define kNumBitModelTotalBits 11
++#define kBitModelTotal (1 << kNumBitModelTotalBits)
++#define kNumMoveBits 5
++
++#define RC_INIT_SIZE 5
++
++#define NORMALIZE if (range < kTopValue) { range <<= 8; code = (code << 8) | (*buf++); }
++
++#define IF_BIT_0(p) ttt = *(p); NORMALIZE; bound = (range >> kNumBitModelTotalBits) * ttt; if (code < bound)
++#define UPDATE_0(p) range = bound; *(p) = (CLzmaProb)(ttt + ((kBitModelTotal - ttt) >> kNumMoveBits));
++#define UPDATE_1(p) range -= bound; code -= bound; *(p) = (CLzmaProb)(ttt - (ttt >> kNumMoveBits));
++#define GET_BIT2(p, i, A0, A1) IF_BIT_0(p) \
++ { UPDATE_0(p); i = (i + i); A0; } else \
++ { UPDATE_1(p); i = (i + i) + 1; A1; }
++#define GET_BIT(p, i) GET_BIT2(p, i, ; , ;)
++
++#define TREE_GET_BIT(probs, i) { GET_BIT((probs + i), i); }
++#define TREE_DECODE(probs, limit, i) \
++ { i = 1; do { TREE_GET_BIT(probs, i); } while (i < limit); i -= limit; }
++
++/* #define _LZMA_SIZE_OPT */
++
++#ifdef _LZMA_SIZE_OPT
++#define TREE_6_DECODE(probs, i) TREE_DECODE(probs, (1 << 6), i)
++#else
++#define TREE_6_DECODE(probs, i) \
++ { i = 1; \
++ TREE_GET_BIT(probs, i); \
++ TREE_GET_BIT(probs, i); \
++ TREE_GET_BIT(probs, i); \
++ TREE_GET_BIT(probs, i); \
++ TREE_GET_BIT(probs, i); \
++ TREE_GET_BIT(probs, i); \
++ i -= 0x40; }
++#endif
++
++#define NORMALIZE_CHECK if (range < kTopValue) { if (buf >= bufLimit) return DUMMY_ERROR; range <<= 8; code = (code << 8) | (*buf++); }
++
++#define IF_BIT_0_CHECK(p) ttt = *(p); NORMALIZE_CHECK; bound = (range >> kNumBitModelTotalBits) * ttt; if (code < bound)
++#define UPDATE_0_CHECK range = bound;
++#define UPDATE_1_CHECK range -= bound; code -= bound;
++#define GET_BIT2_CHECK(p, i, A0, A1) IF_BIT_0_CHECK(p) \
++ { UPDATE_0_CHECK; i = (i + i); A0; } else \
++ { UPDATE_1_CHECK; i = (i + i) + 1; A1; }
++#define GET_BIT_CHECK(p, i) GET_BIT2_CHECK(p, i, ; , ;)
++#define TREE_DECODE_CHECK(probs, limit, i) \
++ { i = 1; do { GET_BIT_CHECK(probs + i, i) } while (i < limit); i -= limit; }
++
++
++#define kNumPosBitsMax 4
++#define kNumPosStatesMax (1 << kNumPosBitsMax)
++
++#define kLenNumLowBits 3
++#define kLenNumLowSymbols (1 << kLenNumLowBits)
++#define kLenNumMidBits 3
++#define kLenNumMidSymbols (1 << kLenNumMidBits)
++#define kLenNumHighBits 8
++#define kLenNumHighSymbols (1 << kLenNumHighBits)
++
++#define LenChoice 0
++#define LenChoice2 (LenChoice + 1)
++#define LenLow (LenChoice2 + 1)
++#define LenMid (LenLow + (kNumPosStatesMax << kLenNumLowBits))
++#define LenHigh (LenMid + (kNumPosStatesMax << kLenNumMidBits))
++#define kNumLenProbs (LenHigh + kLenNumHighSymbols)
++
++
++#define kNumStates 12
++#define kNumLitStates 7
++
++#define kStartPosModelIndex 4
++#define kEndPosModelIndex 14
++#define kNumFullDistances (1 << (kEndPosModelIndex >> 1))
++
++#define kNumPosSlotBits 6
++#define kNumLenToPosStates 4
++
++#define kNumAlignBits 4
++#define kAlignTableSize (1 << kNumAlignBits)
++
++#define kMatchMinLen 2
++#define kMatchSpecLenStart (kMatchMinLen + kLenNumLowSymbols + kLenNumMidSymbols + kLenNumHighSymbols)
++
++#define IsMatch 0
++#define IsRep (IsMatch + (kNumStates << kNumPosBitsMax))
++#define IsRepG0 (IsRep + kNumStates)
++#define IsRepG1 (IsRepG0 + kNumStates)
++#define IsRepG2 (IsRepG1 + kNumStates)
++#define IsRep0Long (IsRepG2 + kNumStates)
++#define PosSlot (IsRep0Long + (kNumStates << kNumPosBitsMax))
++#define SpecPos (PosSlot + (kNumLenToPosStates << kNumPosSlotBits))
++#define Align (SpecPos + kNumFullDistances - kEndPosModelIndex)
++#define LenCoder (Align + kAlignTableSize)
++#define RepLenCoder (LenCoder + kNumLenProbs)
++#define Literal (RepLenCoder + kNumLenProbs)
++
++#define LZMA_BASE_SIZE 1846
++#define LZMA_LIT_SIZE 768
++
++#define LzmaProps_GetNumProbs(p) ((UInt32)LZMA_BASE_SIZE + (LZMA_LIT_SIZE << ((p)->lc + (p)->lp)))
++
++#if Literal != LZMA_BASE_SIZE
++StopCompilingDueBUG
++#endif
++
++#define LZMA_DIC_MIN (1 << 12)
++
++/* First LZMA-symbol is always decoded.
++And it decodes new LZMA-symbols while (buf < bufLimit), but "buf" is without last normalization
++Out:
++ Result:
++ SZ_OK - OK
++ SZ_ERROR_DATA - Error
++ p->remainLen:
++ < kMatchSpecLenStart : normal remain
++ = kMatchSpecLenStart : finished
++ = kMatchSpecLenStart + 1 : Flush marker
++ = kMatchSpecLenStart + 2 : State Init Marker
++*/
++
++static int MY_FAST_CALL LzmaDec_DecodeReal(CLzmaDec *p, SizeT limit, const Byte *bufLimit)
++{
++ CLzmaProb *probs = p->probs;
++
++ unsigned state = p->state;
++ UInt32 rep0 = p->reps[0], rep1 = p->reps[1], rep2 = p->reps[2], rep3 = p->reps[3];
++ unsigned pbMask = ((unsigned)1 << (p->prop.pb)) - 1;
++ unsigned lpMask = ((unsigned)1 << (p->prop.lp)) - 1;
++ unsigned lc = p->prop.lc;
++
++ Byte *dic = p->dic;
++ SizeT dicBufSize = p->dicBufSize;
++ SizeT dicPos = p->dicPos;
++
++ UInt32 processedPos = p->processedPos;
++ UInt32 checkDicSize = p->checkDicSize;
++ unsigned len = 0;
++
++ const Byte *buf = p->buf;
++ UInt32 range = p->range;
++ UInt32 code = p->code;
++
++ do
++ {
++ CLzmaProb *prob;
++ UInt32 bound;
++ unsigned ttt;
++ unsigned posState = processedPos & pbMask;
++
++ prob = probs + IsMatch + (state << kNumPosBitsMax) + posState;
++ IF_BIT_0(prob)
++ {
++ unsigned symbol;
++ UPDATE_0(prob);
++ prob = probs + Literal;
++ if (checkDicSize != 0 || processedPos != 0)
++ prob += (LZMA_LIT_SIZE * (((processedPos & lpMask) << lc) +
++ (dic[(dicPos == 0 ? dicBufSize : dicPos) - 1] >> (8 - lc))));
++
++ if (state < kNumLitStates)
++ {
++ state -= (state < 4) ? state : 3;
++ symbol = 1;
++ do { GET_BIT(prob + symbol, symbol) } while (symbol < 0x100);
++ }
++ else
++ {
++ unsigned matchByte = p->dic[(dicPos - rep0) + ((dicPos < rep0) ? dicBufSize : 0)];
++ unsigned offs = 0x100;
++ state -= (state < 10) ? 3 : 6;
++ symbol = 1;
++ do
++ {
++ unsigned bit;
++ CLzmaProb *probLit;
++ matchByte <<= 1;
++ bit = (matchByte & offs);
++ probLit = prob + offs + bit + symbol;
++ GET_BIT2(probLit, symbol, offs &= ~bit, offs &= bit)
++ }
++ while (symbol < 0x100);
++ }
++ dic[dicPos++] = (Byte)symbol;
++ processedPos++;
++ continue;
++ }
++ else
++ {
++ UPDATE_1(prob);
++ prob = probs + IsRep + state;
++ IF_BIT_0(prob)
++ {
++ UPDATE_0(prob);
++ state += kNumStates;
++ prob = probs + LenCoder;
++ }
++ else
++ {
++ UPDATE_1(prob);
++ if (checkDicSize == 0 && processedPos == 0)
++ return SZ_ERROR_DATA;
++ prob = probs + IsRepG0 + state;
++ IF_BIT_0(prob)
++ {
++ UPDATE_0(prob);
++ prob = probs + IsRep0Long + (state << kNumPosBitsMax) + posState;
++ IF_BIT_0(prob)
++ {
++ UPDATE_0(prob);
++ dic[dicPos] = dic[(dicPos - rep0) + ((dicPos < rep0) ? dicBufSize : 0)];
++ dicPos++;
++ processedPos++;
++ state = state < kNumLitStates ? 9 : 11;
++ continue;
++ }
++ UPDATE_1(prob);
++ }
++ else
++ {
++ UInt32 distance;
++ UPDATE_1(prob);
++ prob = probs + IsRepG1 + state;
++ IF_BIT_0(prob)
++ {
++ UPDATE_0(prob);
++ distance = rep1;
++ }
++ else
++ {
++ UPDATE_1(prob);
++ prob = probs + IsRepG2 + state;
++ IF_BIT_0(prob)
++ {
++ UPDATE_0(prob);
++ distance = rep2;
++ }
++ else
++ {
++ UPDATE_1(prob);
++ distance = rep3;
++ rep3 = rep2;
++ }
++ rep2 = rep1;
++ }
++ rep1 = rep0;
++ rep0 = distance;
++ }
++ state = state < kNumLitStates ? 8 : 11;
++ prob = probs + RepLenCoder;
++ }
++ {
++ unsigned limit, offset;
++ CLzmaProb *probLen = prob + LenChoice;
++ IF_BIT_0(probLen)
++ {
++ UPDATE_0(probLen);
++ probLen = prob + LenLow + (posState << kLenNumLowBits);
++ offset = 0;
++ limit = (1 << kLenNumLowBits);
++ }
++ else
++ {
++ UPDATE_1(probLen);
++ probLen = prob + LenChoice2;
++ IF_BIT_0(probLen)
++ {
++ UPDATE_0(probLen);
++ probLen = prob + LenMid + (posState << kLenNumMidBits);
++ offset = kLenNumLowSymbols;
++ limit = (1 << kLenNumMidBits);
++ }
++ else
++ {
++ UPDATE_1(probLen);
++ probLen = prob + LenHigh;
++ offset = kLenNumLowSymbols + kLenNumMidSymbols;
++ limit = (1 << kLenNumHighBits);
++ }
++ }
++ TREE_DECODE(probLen, limit, len);
++ len += offset;
++ }
++
++ if (state >= kNumStates)
++ {
++ UInt32 distance;
++ prob = probs + PosSlot +
++ ((len < kNumLenToPosStates ? len : kNumLenToPosStates - 1) << kNumPosSlotBits);
++ TREE_6_DECODE(prob, distance);
++ if (distance >= kStartPosModelIndex)
++ {
++ unsigned posSlot = (unsigned)distance;
++ int numDirectBits = (int)(((distance >> 1) - 1));
++ distance = (2 | (distance & 1));
++ if (posSlot < kEndPosModelIndex)
++ {
++ distance <<= numDirectBits;
++ prob = probs + SpecPos + distance - posSlot - 1;
++ {
++ UInt32 mask = 1;
++ unsigned i = 1;
++ do
++ {
++ GET_BIT2(prob + i, i, ; , distance |= mask);
++ mask <<= 1;
++ }
++ while (--numDirectBits != 0);
++ }
++ }
++ else
++ {
++ numDirectBits -= kNumAlignBits;
++ do
++ {
++ NORMALIZE
++ range >>= 1;
++
++ {
++ UInt32 t;
++ code -= range;
++ t = (0 - ((UInt32)code >> 31)); /* (UInt32)((Int32)code >> 31) */
++ distance = (distance << 1) + (t + 1);
++ code += range & t;
++ }
++ /*
++ distance <<= 1;
++ if (code >= range)
++ {
++ code -= range;
++ distance |= 1;
++ }
++ */
++ }
++ while (--numDirectBits != 0);
++ prob = probs + Align;
++ distance <<= kNumAlignBits;
++ {
++ unsigned i = 1;
++ GET_BIT2(prob + i, i, ; , distance |= 1);
++ GET_BIT2(prob + i, i, ; , distance |= 2);
++ GET_BIT2(prob + i, i, ; , distance |= 4);
++ GET_BIT2(prob + i, i, ; , distance |= 8);
++ }
++ if (distance == (UInt32)0xFFFFFFFF)
++ {
++ len += kMatchSpecLenStart;
++ state -= kNumStates;
++ break;
++ }
++ }
++ }
++ rep3 = rep2;
++ rep2 = rep1;
++ rep1 = rep0;
++ rep0 = distance + 1;
++ if (checkDicSize == 0)
++ {
++ if (distance >= processedPos)
++ return SZ_ERROR_DATA;
++ }
++ else if (distance >= checkDicSize)
++ return SZ_ERROR_DATA;
++ state = (state < kNumStates + kNumLitStates) ? kNumLitStates : kNumLitStates + 3;
++ }
++
++ len += kMatchMinLen;
++
++ if (limit == dicPos)
++ return SZ_ERROR_DATA;
++ {
++ SizeT rem = limit - dicPos;
++ unsigned curLen = ((rem < len) ? (unsigned)rem : len);
++ SizeT pos = (dicPos - rep0) + ((dicPos < rep0) ? dicBufSize : 0);
++
++ processedPos += curLen;
++
++ len -= curLen;
++ if (pos + curLen <= dicBufSize)
++ {
++ Byte *dest = dic + dicPos;
++ ptrdiff_t src = (ptrdiff_t)pos - (ptrdiff_t)dicPos;
++ const Byte *lim = dest + curLen;
++ dicPos += curLen;
++ do
++ *(dest) = (Byte)*(dest + src);
++ while (++dest != lim);
++ }
++ else
++ {
++ do
++ {
++ dic[dicPos++] = dic[pos];
++ if (++pos == dicBufSize)
++ pos = 0;
++ }
++ while (--curLen != 0);
++ }
++ }
++ }
++ }
++ while (dicPos < limit && buf < bufLimit);
++ NORMALIZE;
++ p->buf = buf;
++ p->range = range;
++ p->code = code;
++ p->remainLen = len;
++ p->dicPos = dicPos;
++ p->processedPos = processedPos;
++ p->reps[0] = rep0;
++ p->reps[1] = rep1;
++ p->reps[2] = rep2;
++ p->reps[3] = rep3;
++ p->state = state;
++
++ return SZ_OK;
++}
++
++static void MY_FAST_CALL LzmaDec_WriteRem(CLzmaDec *p, SizeT limit)
++{
++ if (p->remainLen != 0 && p->remainLen < kMatchSpecLenStart)
++ {
++ Byte *dic = p->dic;
++ SizeT dicPos = p->dicPos;
++ SizeT dicBufSize = p->dicBufSize;
++ unsigned len = p->remainLen;
++ UInt32 rep0 = p->reps[0];
++ if (limit - dicPos < len)
++ len = (unsigned)(limit - dicPos);
++
++ if (p->checkDicSize == 0 && p->prop.dicSize - p->processedPos <= len)
++ p->checkDicSize = p->prop.dicSize;
++
++ p->processedPos += len;
++ p->remainLen -= len;
++ while (len-- != 0)
++ {
++ dic[dicPos] = dic[(dicPos - rep0) + ((dicPos < rep0) ? dicBufSize : 0)];
++ dicPos++;
++ }
++ p->dicPos = dicPos;
++ }
++}
++
++static int MY_FAST_CALL LzmaDec_DecodeReal2(CLzmaDec *p, SizeT limit, const Byte *bufLimit)
++{
++ do
++ {
++ SizeT limit2 = limit;
++ if (p->checkDicSize == 0)
++ {
++ UInt32 rem = p->prop.dicSize - p->processedPos;
++ if (limit - p->dicPos > rem)
++ limit2 = p->dicPos + rem;
++ }
++ RINOK(LzmaDec_DecodeReal(p, limit2, bufLimit));
++ if (p->processedPos >= p->prop.dicSize)
++ p->checkDicSize = p->prop.dicSize;
++ LzmaDec_WriteRem(p, limit);
++ }
++ while (p->dicPos < limit && p->buf < bufLimit && p->remainLen < kMatchSpecLenStart);
++
++ if (p->remainLen > kMatchSpecLenStart)
++ {
++ p->remainLen = kMatchSpecLenStart;
++ }
++ return 0;
++}
++
++typedef enum
++{
++ DUMMY_ERROR, /* unexpected end of input stream */
++ DUMMY_LIT,
++ DUMMY_MATCH,
++ DUMMY_REP
++} ELzmaDummy;
++
++static ELzmaDummy LzmaDec_TryDummy(const CLzmaDec *p, const Byte *buf, SizeT inSize)
++{
++ UInt32 range = p->range;
++ UInt32 code = p->code;
++ const Byte *bufLimit = buf + inSize;
++ CLzmaProb *probs = p->probs;
++ unsigned state = p->state;
++ ELzmaDummy res;
++
++ {
++ CLzmaProb *prob;
++ UInt32 bound;
++ unsigned ttt;
++ unsigned posState = (p->processedPos) & ((1 << p->prop.pb) - 1);
++
++ prob = probs + IsMatch + (state << kNumPosBitsMax) + posState;
++ IF_BIT_0_CHECK(prob)
++ {
++ UPDATE_0_CHECK
++
++ /* if (bufLimit - buf >= 7) return DUMMY_LIT; */
++
++ prob = probs + Literal;
++ if (p->checkDicSize != 0 || p->processedPos != 0)
++ prob += (LZMA_LIT_SIZE *
++ ((((p->processedPos) & ((1 << (p->prop.lp)) - 1)) << p->prop.lc) +
++ (p->dic[(p->dicPos == 0 ? p->dicBufSize : p->dicPos) - 1] >> (8 - p->prop.lc))));
++
++ if (state < kNumLitStates)
++ {
++ unsigned symbol = 1;
++ do { GET_BIT_CHECK(prob + symbol, symbol) } while (symbol < 0x100);
++ }
++ else
++ {
++ unsigned matchByte = p->dic[p->dicPos - p->reps[0] +
++ ((p->dicPos < p->reps[0]) ? p->dicBufSize : 0)];
++ unsigned offs = 0x100;
++ unsigned symbol = 1;
++ do
++ {
++ unsigned bit;
++ CLzmaProb *probLit;
++ matchByte <<= 1;
++ bit = (matchByte & offs);
++ probLit = prob + offs + bit + symbol;
++ GET_BIT2_CHECK(probLit, symbol, offs &= ~bit, offs &= bit)
++ }
++ while (symbol < 0x100);
++ }
++ res = DUMMY_LIT;
++ }
++ else
++ {
++ unsigned len;
++ UPDATE_1_CHECK;
++
++ prob = probs + IsRep + state;
++ IF_BIT_0_CHECK(prob)
++ {
++ UPDATE_0_CHECK;
++ state = 0;
++ prob = probs + LenCoder;
++ res = DUMMY_MATCH;
++ }
++ else
++ {
++ UPDATE_1_CHECK;
++ res = DUMMY_REP;
++ prob = probs + IsRepG0 + state;
++ IF_BIT_0_CHECK(prob)
++ {
++ UPDATE_0_CHECK;
++ prob = probs + IsRep0Long + (state << kNumPosBitsMax) + posState;
++ IF_BIT_0_CHECK(prob)
++ {
++ UPDATE_0_CHECK;
++ NORMALIZE_CHECK;
++ return DUMMY_REP;
++ }
++ else
++ {
++ UPDATE_1_CHECK;
++ }
++ }
++ else
++ {
++ UPDATE_1_CHECK;
++ prob = probs + IsRepG1 + state;
++ IF_BIT_0_CHECK(prob)
++ {
++ UPDATE_0_CHECK;
++ }
++ else
++ {
++ UPDATE_1_CHECK;
++ prob = probs + IsRepG2 + state;
++ IF_BIT_0_CHECK(prob)
++ {
++ UPDATE_0_CHECK;
++ }
++ else
++ {
++ UPDATE_1_CHECK;
++ }
++ }
++ }
++ state = kNumStates;
++ prob = probs + RepLenCoder;
++ }
++ {
++ unsigned limit, offset;
++ CLzmaProb *probLen = prob + LenChoice;
++ IF_BIT_0_CHECK(probLen)
++ {
++ UPDATE_0_CHECK;
++ probLen = prob + LenLow + (posState << kLenNumLowBits);
++ offset = 0;
++ limit = 1 << kLenNumLowBits;
++ }
++ else
++ {
++ UPDATE_1_CHECK;
++ probLen = prob + LenChoice2;
++ IF_BIT_0_CHECK(probLen)
++ {
++ UPDATE_0_CHECK;
++ probLen = prob + LenMid + (posState << kLenNumMidBits);
++ offset = kLenNumLowSymbols;
++ limit = 1 << kLenNumMidBits;
++ }
++ else
++ {
++ UPDATE_1_CHECK;
++ probLen = prob + LenHigh;
++ offset = kLenNumLowSymbols + kLenNumMidSymbols;
++ limit = 1 << kLenNumHighBits;
++ }
++ }
++ TREE_DECODE_CHECK(probLen, limit, len);
++ len += offset;
++ }
++
++ if (state < 4)
++ {
++ unsigned posSlot;
++ prob = probs + PosSlot +
++ ((len < kNumLenToPosStates ? len : kNumLenToPosStates - 1) <<
++ kNumPosSlotBits);
++ TREE_DECODE_CHECK(prob, 1 << kNumPosSlotBits, posSlot);
++ if (posSlot >= kStartPosModelIndex)
++ {
++ int numDirectBits = ((posSlot >> 1) - 1);
++
++ /* if (bufLimit - buf >= 8) return DUMMY_MATCH; */
++
++ if (posSlot < kEndPosModelIndex)
++ {
++ prob = probs + SpecPos + ((2 | (posSlot & 1)) << numDirectBits) - posSlot - 1;
++ }
++ else
++ {
++ numDirectBits -= kNumAlignBits;
++ do
++ {
++ NORMALIZE_CHECK
++ range >>= 1;
++ code -= range & (((code - range) >> 31) - 1);
++ /* if (code >= range) code -= range; */
++ }
++ while (--numDirectBits != 0);
++ prob = probs + Align;
++ numDirectBits = kNumAlignBits;
++ }
++ {
++ unsigned i = 1;
++ do
++ {
++ GET_BIT_CHECK(prob + i, i);
++ }
++ while (--numDirectBits != 0);
++ }
++ }
++ }
++ }
++ }
++ NORMALIZE_CHECK;
++ return res;
++}
++
++
++static void LzmaDec_InitRc(CLzmaDec *p, const Byte *data)
++{
++ p->code = ((UInt32)data[1] << 24) | ((UInt32)data[2] << 16) | ((UInt32)data[3] << 8) | ((UInt32)data[4]);
++ p->range = 0xFFFFFFFF;
++ p->needFlush = 0;
++}
++
++void LzmaDec_InitDicAndState(CLzmaDec *p, Bool initDic, Bool initState)
++{
++ p->needFlush = 1;
++ p->remainLen = 0;
++ p->tempBufSize = 0;
++
++ if (initDic)
++ {
++ p->processedPos = 0;
++ p->checkDicSize = 0;
++ p->needInitState = 1;
++ }
++ if (initState)
++ p->needInitState = 1;
++}
++
++void LzmaDec_Init(CLzmaDec *p)
++{
++ p->dicPos = 0;
++ LzmaDec_InitDicAndState(p, True, True);
++}
++
++static void LzmaDec_InitStateReal(CLzmaDec *p)
++{
++ UInt32 numProbs = Literal + ((UInt32)LZMA_LIT_SIZE << (p->prop.lc + p->prop.lp));
++ UInt32 i;
++ CLzmaProb *probs = p->probs;
++ for (i = 0; i < numProbs; i++)
++ probs[i] = kBitModelTotal >> 1;
++ p->reps[0] = p->reps[1] = p->reps[2] = p->reps[3] = 1;
++ p->state = 0;
++ p->needInitState = 0;
++}
++
++SRes LzmaDec_DecodeToDic(CLzmaDec *p, SizeT dicLimit, const Byte *src, SizeT *srcLen,
++ ELzmaFinishMode finishMode, ELzmaStatus *status)
++{
++ SizeT inSize = *srcLen;
++ (*srcLen) = 0;
++ LzmaDec_WriteRem(p, dicLimit);
++
++ *status = LZMA_STATUS_NOT_SPECIFIED;
++
++ while (p->remainLen != kMatchSpecLenStart)
++ {
++ int checkEndMarkNow;
++
++ if (p->needFlush != 0)
++ {
++ for (; inSize > 0 && p->tempBufSize < RC_INIT_SIZE; (*srcLen)++, inSize--)
++ p->tempBuf[p->tempBufSize++] = *src++;
++ if (p->tempBufSize < RC_INIT_SIZE)
++ {
++ *status = LZMA_STATUS_NEEDS_MORE_INPUT;
++ return SZ_OK;
++ }
++ if (p->tempBuf[0] != 0)
++ return SZ_ERROR_DATA;
++
++ LzmaDec_InitRc(p, p->tempBuf);
++ p->tempBufSize = 0;
++ }
++
++ checkEndMarkNow = 0;
++ if (p->dicPos >= dicLimit)
++ {
++ if (p->remainLen == 0 && p->code == 0)
++ {
++ *status = LZMA_STATUS_MAYBE_FINISHED_WITHOUT_MARK;
++ return SZ_OK;
++ }
++ if (finishMode == LZMA_FINISH_ANY)
++ {
++ *status = LZMA_STATUS_NOT_FINISHED;
++ return SZ_OK;
++ }
++ if (p->remainLen != 0)
++ {
++ *status = LZMA_STATUS_NOT_FINISHED;
++ return SZ_ERROR_DATA;
++ }
++ checkEndMarkNow = 1;
++ }
++
++ if (p->needInitState)
++ LzmaDec_InitStateReal(p);
++
++ if (p->tempBufSize == 0)
++ {
++ SizeT processed;
++ const Byte *bufLimit;
++ if (inSize < LZMA_REQUIRED_INPUT_MAX || checkEndMarkNow)
++ {
++ int dummyRes = LzmaDec_TryDummy(p, src, inSize);
++ if (dummyRes == DUMMY_ERROR)
++ {
++ memcpy(p->tempBuf, src, inSize);
++ p->tempBufSize = (unsigned)inSize;
++ (*srcLen) += inSize;
++ *status = LZMA_STATUS_NEEDS_MORE_INPUT;
++ return SZ_OK;
++ }
++ if (checkEndMarkNow && dummyRes != DUMMY_MATCH)
++ {
++ *status = LZMA_STATUS_NOT_FINISHED;
++ return SZ_ERROR_DATA;
++ }
++ bufLimit = src;
++ }
++ else
++ bufLimit = src + inSize - LZMA_REQUIRED_INPUT_MAX;
++ p->buf = src;
++ if (LzmaDec_DecodeReal2(p, dicLimit, bufLimit) != 0)
++ return SZ_ERROR_DATA;
++ processed = (SizeT)(p->buf - src);
++ (*srcLen) += processed;
++ src += processed;
++ inSize -= processed;
++ }
++ else
++ {
++ unsigned rem = p->tempBufSize, lookAhead = 0;
++ while (rem < LZMA_REQUIRED_INPUT_MAX && lookAhead < inSize)
++ p->tempBuf[rem++] = src[lookAhead++];
++ p->tempBufSize = rem;
++ if (rem < LZMA_REQUIRED_INPUT_MAX || checkEndMarkNow)
++ {
++ int dummyRes = LzmaDec_TryDummy(p, p->tempBuf, rem);
++ if (dummyRes == DUMMY_ERROR)
++ {
++ (*srcLen) += lookAhead;
++ *status = LZMA_STATUS_NEEDS_MORE_INPUT;
++ return SZ_OK;
++ }
++ if (checkEndMarkNow && dummyRes != DUMMY_MATCH)
++ {
++ *status = LZMA_STATUS_NOT_FINISHED;
++ return SZ_ERROR_DATA;
++ }
++ }
++ p->buf = p->tempBuf;
++ if (LzmaDec_DecodeReal2(p, dicLimit, p->buf) != 0)
++ return SZ_ERROR_DATA;
++ lookAhead -= (rem - (unsigned)(p->buf - p->tempBuf));
++ (*srcLen) += lookAhead;
++ src += lookAhead;
++ inSize -= lookAhead;
++ p->tempBufSize = 0;
++ }
++ }
++ if (p->code == 0)
++ *status = LZMA_STATUS_FINISHED_WITH_MARK;
++ return (p->code == 0) ? SZ_OK : SZ_ERROR_DATA;
++}
++
++SRes LzmaDec_DecodeToBuf(CLzmaDec *p, Byte *dest, SizeT *destLen, const Byte *src, SizeT *srcLen, ELzmaFinishMode finishMode, ELzmaStatus *status)
++{
++ SizeT outSize = *destLen;
++ SizeT inSize = *srcLen;
++ *srcLen = *destLen = 0;
++ for (;;)
++ {
++ SizeT inSizeCur = inSize, outSizeCur, dicPos;
++ ELzmaFinishMode curFinishMode;
++ SRes res;
++ if (p->dicPos == p->dicBufSize)
++ p->dicPos = 0;
++ dicPos = p->dicPos;
++ if (outSize > p->dicBufSize - dicPos)
++ {
++ outSizeCur = p->dicBufSize;
++ curFinishMode = LZMA_FINISH_ANY;
++ }
++ else
++ {
++ outSizeCur = dicPos + outSize;
++ curFinishMode = finishMode;
++ }
++
++ res = LzmaDec_DecodeToDic(p, outSizeCur, src, &inSizeCur, curFinishMode, status);
++ src += inSizeCur;
++ inSize -= inSizeCur;
++ *srcLen += inSizeCur;
++ outSizeCur = p->dicPos - dicPos;
++ memcpy(dest, p->dic + dicPos, outSizeCur);
++ dest += outSizeCur;
++ outSize -= outSizeCur;
++ *destLen += outSizeCur;
++ if (res != 0)
++ return res;
++ if (outSizeCur == 0 || outSize == 0)
++ return SZ_OK;
++ }
++}
++
++void LzmaDec_FreeProbs(CLzmaDec *p, ISzAlloc *alloc)
++{
++ alloc->Free(alloc, p->probs);
++ p->probs = 0;
++}
++
++static void LzmaDec_FreeDict(CLzmaDec *p, ISzAlloc *alloc)
++{
++ alloc->Free(alloc, p->dic);
++ p->dic = 0;
++}
++
++void LzmaDec_Free(CLzmaDec *p, ISzAlloc *alloc)
++{
++ LzmaDec_FreeProbs(p, alloc);
++ LzmaDec_FreeDict(p, alloc);
++}
++
++SRes LzmaProps_Decode(CLzmaProps *p, const Byte *data, unsigned size)
++{
++ UInt32 dicSize;
++ Byte d;
++
++ if (size < LZMA_PROPS_SIZE)
++ return SZ_ERROR_UNSUPPORTED;
++ else
++ dicSize = data[1] | ((UInt32)data[2] << 8) | ((UInt32)data[3] << 16) | ((UInt32)data[4] << 24);
++
++ if (dicSize < LZMA_DIC_MIN)
++ dicSize = LZMA_DIC_MIN;
++ p->dicSize = dicSize;
++
++ d = data[0];
++ if (d >= (9 * 5 * 5))
++ return SZ_ERROR_UNSUPPORTED;
++
++ p->lc = d % 9;
++ d /= 9;
++ p->pb = d / 5;
++ p->lp = d % 5;
++
++ return SZ_OK;
++}
++
++static SRes LzmaDec_AllocateProbs2(CLzmaDec *p, const CLzmaProps *propNew, ISzAlloc *alloc)
++{
++ UInt32 numProbs = LzmaProps_GetNumProbs(propNew);
++ if (p->probs == 0 || numProbs != p->numProbs)
++ {
++ LzmaDec_FreeProbs(p, alloc);
++ p->probs = (CLzmaProb *)alloc->Alloc(alloc, numProbs * sizeof(CLzmaProb));
++ p->numProbs = numProbs;
++ if (p->probs == 0)
++ return SZ_ERROR_MEM;
++ }
++ return SZ_OK;
++}
++
++SRes LzmaDec_AllocateProbs(CLzmaDec *p, const Byte *props, unsigned propsSize, ISzAlloc *alloc)
++{
++ CLzmaProps propNew;
++ RINOK(LzmaProps_Decode(&propNew, props, propsSize));
++ RINOK(LzmaDec_AllocateProbs2(p, &propNew, alloc));
++ p->prop = propNew;
++ return SZ_OK;
++}
++
++SRes LzmaDec_Allocate(CLzmaDec *p, const Byte *props, unsigned propsSize, ISzAlloc *alloc)
++{
++ CLzmaProps propNew;
++ SizeT dicBufSize;
++ RINOK(LzmaProps_Decode(&propNew, props, propsSize));
++ RINOK(LzmaDec_AllocateProbs2(p, &propNew, alloc));
++ dicBufSize = propNew.dicSize;
++ if (p->dic == 0 || dicBufSize != p->dicBufSize)
++ {
++ LzmaDec_FreeDict(p, alloc);
++ p->dic = (Byte *)alloc->Alloc(alloc, dicBufSize);
++ if (p->dic == 0)
++ {
++ LzmaDec_FreeProbs(p, alloc);
++ return SZ_ERROR_MEM;
++ }
++ }
++ p->dicBufSize = dicBufSize;
++ p->prop = propNew;
++ return SZ_OK;
++}
++
++SRes LzmaDecode(Byte *dest, SizeT *destLen, const Byte *src, SizeT *srcLen,
++ const Byte *propData, unsigned propSize, ELzmaFinishMode finishMode,
++ ELzmaStatus *status, ISzAlloc *alloc)
++{
++ CLzmaDec p;
++ SRes res;
++ SizeT inSize = *srcLen;
++ SizeT outSize = *destLen;
++ *srcLen = *destLen = 0;
++ if (inSize < RC_INIT_SIZE)
++ return SZ_ERROR_INPUT_EOF;
++
++ LzmaDec_Construct(&p);
++ res = LzmaDec_AllocateProbs(&p, propData, propSize, alloc);
++ if (res != 0)
++ return res;
++ p.dic = dest;
++ p.dicBufSize = outSize;
++
++ LzmaDec_Init(&p);
++
++ *srcLen = inSize;
++ res = LzmaDec_DecodeToDic(&p, outSize, src, srcLen, finishMode, status);
++
++ if (res == SZ_OK && *status == LZMA_STATUS_NEEDS_MORE_INPUT)
++ res = SZ_ERROR_INPUT_EOF;
++
++ (*destLen) = p.dicPos;
++ LzmaDec_FreeProbs(&p, alloc);
++ return res;
++}
+--- /dev/null
++++ b/lib/lzma/LzmaEnc.c
+@@ -0,0 +1,2271 @@
++/* LzmaEnc.c -- LZMA Encoder
++2009-11-24 : Igor Pavlov : Public domain */
++
++#include <string.h>
++
++/* #define SHOW_STAT */
++/* #define SHOW_STAT2 */
++
++#if defined(SHOW_STAT) || defined(SHOW_STAT2)
++#include <stdio.h>
++#endif
++
++#include "LzmaEnc.h"
++
++/* disable MT */
++#define _7ZIP_ST
++
++#include "LzFind.h"
++#ifndef _7ZIP_ST
++#include "LzFindMt.h"
++#endif
++
++#ifdef SHOW_STAT
++static int ttt = 0;
++#endif
++
++#define kBlockSizeMax ((1 << LZMA_NUM_BLOCK_SIZE_BITS) - 1)
++
++#define kBlockSize (9 << 10)
++#define kUnpackBlockSize (1 << 18)
++#define kMatchArraySize (1 << 21)
++#define kMatchRecordMaxSize ((LZMA_MATCH_LEN_MAX * 2 + 3) * LZMA_MATCH_LEN_MAX)
++
++#define kNumMaxDirectBits (31)
++
++#define kNumTopBits 24
++#define kTopValue ((UInt32)1 << kNumTopBits)
++
++#define kNumBitModelTotalBits 11
++#define kBitModelTotal (1 << kNumBitModelTotalBits)
++#define kNumMoveBits 5
++#define kProbInitValue (kBitModelTotal >> 1)
++
++#define kNumMoveReducingBits 4
++#define kNumBitPriceShiftBits 4
++#define kBitPrice (1 << kNumBitPriceShiftBits)
++
++void LzmaEncProps_Init(CLzmaEncProps *p)
++{
++ p->level = 5;
++ p->dictSize = p->mc = 0;
++ p->lc = p->lp = p->pb = p->algo = p->fb = p->btMode = p->numHashBytes = p->numThreads = -1;
++ p->writeEndMark = 0;
++}
++
++void LzmaEncProps_Normalize(CLzmaEncProps *p)
++{
++ int level = p->level;
++ if (level < 0) level = 5;
++ p->level = level;
++ if (p->dictSize == 0) p->dictSize = (level <= 5 ? (1 << (level * 2 + 14)) : (level == 6 ? (1 << 25) : (1 << 26)));
++ if (p->lc < 0) p->lc = 3;
++ if (p->lp < 0) p->lp = 0;
++ if (p->pb < 0) p->pb = 2;
++ if (p->algo < 0) p->algo = (level < 5 ? 0 : 1);
++ if (p->fb < 0) p->fb = (level < 7 ? 32 : 64);
++ if (p->btMode < 0) p->btMode = (p->algo == 0 ? 0 : 1);
++ if (p->numHashBytes < 0) p->numHashBytes = 4;
++ if (p->mc == 0) p->mc = (16 + (p->fb >> 1)) >> (p->btMode ? 0 : 1);
++ if (p->numThreads < 0)
++ p->numThreads =
++ #ifndef _7ZIP_ST
++ ((p->btMode && p->algo) ? 2 : 1);
++ #else
++ 1;
++ #endif
++}
++
++UInt32 LzmaEncProps_GetDictSize(const CLzmaEncProps *props2)
++{
++ CLzmaEncProps props = *props2;
++ LzmaEncProps_Normalize(&props);
++ return props.dictSize;
++}
++
++/* #define LZMA_LOG_BSR */
++/* Define it for Intel's CPU */
++
++
++#ifdef LZMA_LOG_BSR
++
++#define kDicLogSizeMaxCompress 30
++
++#define BSR2_RET(pos, res) { unsigned long i; _BitScanReverse(&i, (pos)); res = (i + i) + ((pos >> (i - 1)) & 1); }
++
++UInt32 GetPosSlot1(UInt32 pos)
++{
++ UInt32 res;
++ BSR2_RET(pos, res);
++ return res;
++}
++#define GetPosSlot2(pos, res) { BSR2_RET(pos, res); }
++#define GetPosSlot(pos, res) { if (pos < 2) res = pos; else BSR2_RET(pos, res); }
++
++#else
++
++#define kNumLogBits (9 + (int)sizeof(size_t) / 2)
++#define kDicLogSizeMaxCompress ((kNumLogBits - 1) * 2 + 7)
++
++void LzmaEnc_FastPosInit(Byte *g_FastPos)
++{
++ int c = 2, slotFast;
++ g_FastPos[0] = 0;
++ g_FastPos[1] = 1;
++
++ for (slotFast = 2; slotFast < kNumLogBits * 2; slotFast++)
++ {
++ UInt32 k = (1 << ((slotFast >> 1) - 1));
++ UInt32 j;
++ for (j = 0; j < k; j++, c++)
++ g_FastPos[c] = (Byte)slotFast;
++ }
++}
++
++#define BSR2_RET(pos, res) { UInt32 i = 6 + ((kNumLogBits - 1) & \
++ (0 - (((((UInt32)1 << (kNumLogBits + 6)) - 1) - pos) >> 31))); \
++ res = p->g_FastPos[pos >> i] + (i * 2); }
++/*
++#define BSR2_RET(pos, res) { res = (pos < (1 << (kNumLogBits + 6))) ? \
++ p->g_FastPos[pos >> 6] + 12 : \
++ p->g_FastPos[pos >> (6 + kNumLogBits - 1)] + (6 + (kNumLogBits - 1)) * 2; }
++*/
++
++#define GetPosSlot1(pos) p->g_FastPos[pos]
++#define GetPosSlot2(pos, res) { BSR2_RET(pos, res); }
++#define GetPosSlot(pos, res) { if (pos < kNumFullDistances) res = p->g_FastPos[pos]; else BSR2_RET(pos, res); }
++
++#endif
++
++
++#define LZMA_NUM_REPS 4
++
++typedef unsigned CState;
++
++typedef struct
++{
++ UInt32 price;
++
++ CState state;
++ int prev1IsChar;
++ int prev2;
++
++ UInt32 posPrev2;
++ UInt32 backPrev2;
++
++ UInt32 posPrev;
++ UInt32 backPrev;
++ UInt32 backs[LZMA_NUM_REPS];
++} COptimal;
++
++#define kNumOpts (1 << 12)
++
++#define kNumLenToPosStates 4
++#define kNumPosSlotBits 6
++#define kDicLogSizeMin 0
++#define kDicLogSizeMax 32
++#define kDistTableSizeMax (kDicLogSizeMax * 2)
++
++
++#define kNumAlignBits 4
++#define kAlignTableSize (1 << kNumAlignBits)
++#define kAlignMask (kAlignTableSize - 1)
++
++#define kStartPosModelIndex 4
++#define kEndPosModelIndex 14
++#define kNumPosModels (kEndPosModelIndex - kStartPosModelIndex)
++
++#define kNumFullDistances (1 << (kEndPosModelIndex >> 1))
++
++#ifdef _LZMA_PROB32
++#define CLzmaProb UInt32
++#else
++#define CLzmaProb UInt16
++#endif
++
++#define LZMA_PB_MAX 4
++#define LZMA_LC_MAX 8
++#define LZMA_LP_MAX 4
++
++#define LZMA_NUM_PB_STATES_MAX (1 << LZMA_PB_MAX)
++
++
++#define kLenNumLowBits 3
++#define kLenNumLowSymbols (1 << kLenNumLowBits)
++#define kLenNumMidBits 3
++#define kLenNumMidSymbols (1 << kLenNumMidBits)
++#define kLenNumHighBits 8
++#define kLenNumHighSymbols (1 << kLenNumHighBits)
++
++#define kLenNumSymbolsTotal (kLenNumLowSymbols + kLenNumMidSymbols + kLenNumHighSymbols)
++
++#define LZMA_MATCH_LEN_MIN 2
++#define LZMA_MATCH_LEN_MAX (LZMA_MATCH_LEN_MIN + kLenNumSymbolsTotal - 1)
++
++#define kNumStates 12
++
++typedef struct
++{
++ CLzmaProb choice;
++ CLzmaProb choice2;
++ CLzmaProb low[LZMA_NUM_PB_STATES_MAX << kLenNumLowBits];
++ CLzmaProb mid[LZMA_NUM_PB_STATES_MAX << kLenNumMidBits];
++ CLzmaProb high[kLenNumHighSymbols];
++} CLenEnc;
++
++typedef struct
++{
++ CLenEnc p;
++ UInt32 prices[LZMA_NUM_PB_STATES_MAX][kLenNumSymbolsTotal];
++ UInt32 tableSize;
++ UInt32 counters[LZMA_NUM_PB_STATES_MAX];
++} CLenPriceEnc;
++
++typedef struct
++{
++ UInt32 range;
++ Byte cache;
++ UInt64 low;
++ UInt64 cacheSize;
++ Byte *buf;
++ Byte *bufLim;
++ Byte *bufBase;
++ ISeqOutStream *outStream;
++ UInt64 processed;
++ SRes res;
++} CRangeEnc;
++
++typedef struct
++{
++ CLzmaProb *litProbs;
++
++ CLzmaProb isMatch[kNumStates][LZMA_NUM_PB_STATES_MAX];
++ CLzmaProb isRep[kNumStates];
++ CLzmaProb isRepG0[kNumStates];
++ CLzmaProb isRepG1[kNumStates];
++ CLzmaProb isRepG2[kNumStates];
++ CLzmaProb isRep0Long[kNumStates][LZMA_NUM_PB_STATES_MAX];
++
++ CLzmaProb posSlotEncoder[kNumLenToPosStates][1 << kNumPosSlotBits];
++ CLzmaProb posEncoders[kNumFullDistances - kEndPosModelIndex];
++ CLzmaProb posAlignEncoder[1 << kNumAlignBits];
++
++ CLenPriceEnc lenEnc;
++ CLenPriceEnc repLenEnc;
++
++ UInt32 reps[LZMA_NUM_REPS];
++ UInt32 state;
++} CSaveState;
++
++typedef struct
++{
++ IMatchFinder matchFinder;
++ void *matchFinderObj;
++
++ #ifndef _7ZIP_ST
++ Bool mtMode;
++ CMatchFinderMt matchFinderMt;
++ #endif
++
++ CMatchFinder matchFinderBase;
++
++ #ifndef _7ZIP_ST
++ Byte pad[128];
++ #endif
++
++ UInt32 optimumEndIndex;
++ UInt32 optimumCurrentIndex;
++
++ UInt32 longestMatchLength;
++ UInt32 numPairs;
++ UInt32 numAvail;
++ COptimal opt[kNumOpts];
++
++ #ifndef LZMA_LOG_BSR
++ Byte g_FastPos[1 << kNumLogBits];
++ #endif
++
++ UInt32 ProbPrices[kBitModelTotal >> kNumMoveReducingBits];
++ UInt32 matches[LZMA_MATCH_LEN_MAX * 2 + 2 + 1];
++ UInt32 numFastBytes;
++ UInt32 additionalOffset;
++ UInt32 reps[LZMA_NUM_REPS];
++ UInt32 state;
++
++ UInt32 posSlotPrices[kNumLenToPosStates][kDistTableSizeMax];
++ UInt32 distancesPrices[kNumLenToPosStates][kNumFullDistances];
++ UInt32 alignPrices[kAlignTableSize];
++ UInt32 alignPriceCount;
++
++ UInt32 distTableSize;
++
++ unsigned lc, lp, pb;
++ unsigned lpMask, pbMask;
++
++ CLzmaProb *litProbs;
++
++ CLzmaProb isMatch[kNumStates][LZMA_NUM_PB_STATES_MAX];
++ CLzmaProb isRep[kNumStates];
++ CLzmaProb isRepG0[kNumStates];
++ CLzmaProb isRepG1[kNumStates];
++ CLzmaProb isRepG2[kNumStates];
++ CLzmaProb isRep0Long[kNumStates][LZMA_NUM_PB_STATES_MAX];
++
++ CLzmaProb posSlotEncoder[kNumLenToPosStates][1 << kNumPosSlotBits];
++ CLzmaProb posEncoders[kNumFullDistances - kEndPosModelIndex];
++ CLzmaProb posAlignEncoder[1 << kNumAlignBits];
++
++ CLenPriceEnc lenEnc;
++ CLenPriceEnc repLenEnc;
++
++ unsigned lclp;
++
++ Bool fastMode;
++
++ CRangeEnc rc;
++
++ Bool writeEndMark;
++ UInt64 nowPos64;
++ UInt32 matchPriceCount;
++ Bool finished;
++ Bool multiThread;
++
++ SRes result;
++ UInt32 dictSize;
++ UInt32 matchFinderCycles;
++
++ int needInit;
++
++ CSaveState saveState;
++} CLzmaEnc;
++
++void LzmaEnc_SaveState(CLzmaEncHandle pp)
++{
++ CLzmaEnc *p = (CLzmaEnc *)pp;
++ CSaveState *dest = &p->saveState;
++ int i;
++ dest->lenEnc = p->lenEnc;
++ dest->repLenEnc = p->repLenEnc;
++ dest->state = p->state;
++
++ for (i = 0; i < kNumStates; i++)
++ {
++ memcpy(dest->isMatch[i], p->isMatch[i], sizeof(p->isMatch[i]));
++ memcpy(dest->isRep0Long[i], p->isRep0Long[i], sizeof(p->isRep0Long[i]));
++ }
++ for (i = 0; i < kNumLenToPosStates; i++)
++ memcpy(dest->posSlotEncoder[i], p->posSlotEncoder[i], sizeof(p->posSlotEncoder[i]));
++ memcpy(dest->isRep, p->isRep, sizeof(p->isRep));
++ memcpy(dest->isRepG0, p->isRepG0, sizeof(p->isRepG0));
++ memcpy(dest->isRepG1, p->isRepG1, sizeof(p->isRepG1));
++ memcpy(dest->isRepG2, p->isRepG2, sizeof(p->isRepG2));
++ memcpy(dest->posEncoders, p->posEncoders, sizeof(p->posEncoders));
++ memcpy(dest->posAlignEncoder, p->posAlignEncoder, sizeof(p->posAlignEncoder));
++ memcpy(dest->reps, p->reps, sizeof(p->reps));
++ memcpy(dest->litProbs, p->litProbs, (0x300 << p->lclp) * sizeof(CLzmaProb));
++}
++
++void LzmaEnc_RestoreState(CLzmaEncHandle pp)
++{
++ CLzmaEnc *dest = (CLzmaEnc *)pp;
++ const CSaveState *p = &dest->saveState;
++ int i;
++ dest->lenEnc = p->lenEnc;
++ dest->repLenEnc = p->repLenEnc;
++ dest->state = p->state;
++
++ for (i = 0; i < kNumStates; i++)
++ {
++ memcpy(dest->isMatch[i], p->isMatch[i], sizeof(p->isMatch[i]));
++ memcpy(dest->isRep0Long[i], p->isRep0Long[i], sizeof(p->isRep0Long[i]));
++ }
++ for (i = 0; i < kNumLenToPosStates; i++)
++ memcpy(dest->posSlotEncoder[i], p->posSlotEncoder[i], sizeof(p->posSlotEncoder[i]));
++ memcpy(dest->isRep, p->isRep, sizeof(p->isRep));
++ memcpy(dest->isRepG0, p->isRepG0, sizeof(p->isRepG0));
++ memcpy(dest->isRepG1, p->isRepG1, sizeof(p->isRepG1));
++ memcpy(dest->isRepG2, p->isRepG2, sizeof(p->isRepG2));
++ memcpy(dest->posEncoders, p->posEncoders, sizeof(p->posEncoders));
++ memcpy(dest->posAlignEncoder, p->posAlignEncoder, sizeof(p->posAlignEncoder));
++ memcpy(dest->reps, p->reps, sizeof(p->reps));
++ memcpy(dest->litProbs, p->litProbs, (0x300 << dest->lclp) * sizeof(CLzmaProb));
++}
++
++SRes LzmaEnc_SetProps(CLzmaEncHandle pp, const CLzmaEncProps *props2)
++{
++ CLzmaEnc *p = (CLzmaEnc *)pp;
++ CLzmaEncProps props = *props2;
++ LzmaEncProps_Normalize(&props);
++
++ if (props.lc > LZMA_LC_MAX || props.lp > LZMA_LP_MAX || props.pb > LZMA_PB_MAX ||
++ props.dictSize > (1 << kDicLogSizeMaxCompress) || props.dictSize > (1 << 30))
++ return SZ_ERROR_PARAM;
++ p->dictSize = props.dictSize;
++ p->matchFinderCycles = props.mc;
++ {
++ unsigned fb = props.fb;
++ if (fb < 5)
++ fb = 5;
++ if (fb > LZMA_MATCH_LEN_MAX)
++ fb = LZMA_MATCH_LEN_MAX;
++ p->numFastBytes = fb;
++ }
++ p->lc = props.lc;
++ p->lp = props.lp;
++ p->pb = props.pb;
++ p->fastMode = (props.algo == 0);
++ p->matchFinderBase.btMode = props.btMode;
++ {
++ UInt32 numHashBytes = 4;
++ if (props.btMode)
++ {
++ if (props.numHashBytes < 2)
++ numHashBytes = 2;
++ else if (props.numHashBytes < 4)
++ numHashBytes = props.numHashBytes;
++ }
++ p->matchFinderBase.numHashBytes = numHashBytes;
++ }
++
++ p->matchFinderBase.cutValue = props.mc;
++
++ p->writeEndMark = props.writeEndMark;
++
++ #ifndef _7ZIP_ST
++ /*
++ if (newMultiThread != _multiThread)
++ {
++ ReleaseMatchFinder();
++ _multiThread = newMultiThread;
++ }
++ */
++ p->multiThread = (props.numThreads > 1);
++ #endif
++
++ return SZ_OK;
++}
++
++static const int kLiteralNextStates[kNumStates] = {0, 0, 0, 0, 1, 2, 3, 4, 5, 6, 4, 5};
++static const int kMatchNextStates[kNumStates] = {7, 7, 7, 7, 7, 7, 7, 10, 10, 10, 10, 10};
++static const int kRepNextStates[kNumStates] = {8, 8, 8, 8, 8, 8, 8, 11, 11, 11, 11, 11};
++static const int kShortRepNextStates[kNumStates]= {9, 9, 9, 9, 9, 9, 9, 11, 11, 11, 11, 11};
++
++#define IsCharState(s) ((s) < 7)
++
++#define GetLenToPosState(len) (((len) < kNumLenToPosStates + 1) ? (len) - 2 : kNumLenToPosStates - 1)
++
++#define kInfinityPrice (1 << 30)
++
++static void RangeEnc_Construct(CRangeEnc *p)
++{
++ p->outStream = 0;
++ p->bufBase = 0;
++}
++
++#define RangeEnc_GetProcessed(p) ((p)->processed + ((p)->buf - (p)->bufBase) + (p)->cacheSize)
++
++#define RC_BUF_SIZE (1 << 16)
++static int RangeEnc_Alloc(CRangeEnc *p, ISzAlloc *alloc)
++{
++ if (p->bufBase == 0)
++ {
++ p->bufBase = (Byte *)alloc->Alloc(alloc, RC_BUF_SIZE);
++ if (p->bufBase == 0)
++ return 0;
++ p->bufLim = p->bufBase + RC_BUF_SIZE;
++ }
++ return 1;
++}
++
++static void RangeEnc_Free(CRangeEnc *p, ISzAlloc *alloc)
++{
++ alloc->Free(alloc, p->bufBase);
++ p->bufBase = 0;
++}
++
++static void RangeEnc_Init(CRangeEnc *p)
++{
++ /* Stream.Init(); */
++ p->low = 0;
++ p->range = 0xFFFFFFFF;
++ p->cacheSize = 1;
++ p->cache = 0;
++
++ p->buf = p->bufBase;
++
++ p->processed = 0;
++ p->res = SZ_OK;
++}
++
++static void RangeEnc_FlushStream(CRangeEnc *p)
++{
++ size_t num;
++ if (p->res != SZ_OK)
++ return;
++ num = p->buf - p->bufBase;
++ if (num != p->outStream->Write(p->outStream, p->bufBase, num))
++ p->res = SZ_ERROR_WRITE;
++ p->processed += num;
++ p->buf = p->bufBase;
++}
++
++static void MY_FAST_CALL RangeEnc_ShiftLow(CRangeEnc *p)
++{
++ if ((UInt32)p->low < (UInt32)0xFF000000 || (int)(p->low >> 32) != 0)
++ {
++ Byte temp = p->cache;
++ do
++ {
++ Byte *buf = p->buf;
++ *buf++ = (Byte)(temp + (Byte)(p->low >> 32));
++ p->buf = buf;
++ if (buf == p->bufLim)
++ RangeEnc_FlushStream(p);
++ temp = 0xFF;
++ }
++ while (--p->cacheSize != 0);
++ p->cache = (Byte)((UInt32)p->low >> 24);
++ }
++ p->cacheSize++;
++ p->low = (UInt32)p->low << 8;
++}
++
++static void RangeEnc_FlushData(CRangeEnc *p)
++{
++ int i;
++ for (i = 0; i < 5; i++)
++ RangeEnc_ShiftLow(p);
++}
++
++static void RangeEnc_EncodeDirectBits(CRangeEnc *p, UInt32 value, int numBits)
++{
++ do
++ {
++ p->range >>= 1;
++ p->low += p->range & (0 - ((value >> --numBits) & 1));
++ if (p->range < kTopValue)
++ {
++ p->range <<= 8;
++ RangeEnc_ShiftLow(p);
++ }
++ }
++ while (numBits != 0);
++}
++
++static void RangeEnc_EncodeBit(CRangeEnc *p, CLzmaProb *prob, UInt32 symbol)
++{
++ UInt32 ttt = *prob;
++ UInt32 newBound = (p->range >> kNumBitModelTotalBits) * ttt;
++ if (symbol == 0)
++ {
++ p->range = newBound;
++ ttt += (kBitModelTotal - ttt) >> kNumMoveBits;
++ }
++ else
++ {
++ p->low += newBound;
++ p->range -= newBound;
++ ttt -= ttt >> kNumMoveBits;
++ }
++ *prob = (CLzmaProb)ttt;
++ if (p->range < kTopValue)
++ {
++ p->range <<= 8;
++ RangeEnc_ShiftLow(p);
++ }
++}
++
++static void LitEnc_Encode(CRangeEnc *p, CLzmaProb *probs, UInt32 symbol)
++{
++ symbol |= 0x100;
++ do
++ {
++ RangeEnc_EncodeBit(p, probs + (symbol >> 8), (symbol >> 7) & 1);
++ symbol <<= 1;
++ }
++ while (symbol < 0x10000);
++}
++
++static void LitEnc_EncodeMatched(CRangeEnc *p, CLzmaProb *probs, UInt32 symbol, UInt32 matchByte)
++{
++ UInt32 offs = 0x100;
++ symbol |= 0x100;
++ do
++ {
++ matchByte <<= 1;
++ RangeEnc_EncodeBit(p, probs + (offs + (matchByte & offs) + (symbol >> 8)), (symbol >> 7) & 1);
++ symbol <<= 1;
++ offs &= ~(matchByte ^ symbol);
++ }
++ while (symbol < 0x10000);
++}
++
++void LzmaEnc_InitPriceTables(UInt32 *ProbPrices)
++{
++ UInt32 i;
++ for (i = (1 << kNumMoveReducingBits) / 2; i < kBitModelTotal; i += (1 << kNumMoveReducingBits))
++ {
++ const int kCyclesBits = kNumBitPriceShiftBits;
++ UInt32 w = i;
++ UInt32 bitCount = 0;
++ int j;
++ for (j = 0; j < kCyclesBits; j++)
++ {
++ w = w * w;
++ bitCount <<= 1;
++ while (w >= ((UInt32)1 << 16))
++ {
++ w >>= 1;
++ bitCount++;
++ }
++ }
++ ProbPrices[i >> kNumMoveReducingBits] = ((kNumBitModelTotalBits << kCyclesBits) - 15 - bitCount);
++ }
++}
++
++
++#define GET_PRICE(prob, symbol) \
++ p->ProbPrices[((prob) ^ (((-(int)(symbol))) & (kBitModelTotal - 1))) >> kNumMoveReducingBits];
++
++#define GET_PRICEa(prob, symbol) \
++ ProbPrices[((prob) ^ ((-((int)(symbol))) & (kBitModelTotal - 1))) >> kNumMoveReducingBits];
++
++#define GET_PRICE_0(prob) p->ProbPrices[(prob) >> kNumMoveReducingBits]
++#define GET_PRICE_1(prob) p->ProbPrices[((prob) ^ (kBitModelTotal - 1)) >> kNumMoveReducingBits]
++
++#define GET_PRICE_0a(prob) ProbPrices[(prob) >> kNumMoveReducingBits]
++#define GET_PRICE_1a(prob) ProbPrices[((prob) ^ (kBitModelTotal - 1)) >> kNumMoveReducingBits]
++
++static UInt32 LitEnc_GetPrice(const CLzmaProb *probs, UInt32 symbol, UInt32 *ProbPrices)
++{
++ UInt32 price = 0;
++ symbol |= 0x100;
++ do
++ {
++ price += GET_PRICEa(probs[symbol >> 8], (symbol >> 7) & 1);
++ symbol <<= 1;
++ }
++ while (symbol < 0x10000);
++ return price;
++}
++
++static UInt32 LitEnc_GetPriceMatched(const CLzmaProb *probs, UInt32 symbol, UInt32 matchByte, UInt32 *ProbPrices)
++{
++ UInt32 price = 0;
++ UInt32 offs = 0x100;
++ symbol |= 0x100;
++ do
++ {
++ matchByte <<= 1;
++ price += GET_PRICEa(probs[offs + (matchByte & offs) + (symbol >> 8)], (symbol >> 7) & 1);
++ symbol <<= 1;
++ offs &= ~(matchByte ^ symbol);
++ }
++ while (symbol < 0x10000);
++ return price;
++}
++
++
++static void RcTree_Encode(CRangeEnc *rc, CLzmaProb *probs, int numBitLevels, UInt32 symbol)
++{
++ UInt32 m = 1;
++ int i;
++ for (i = numBitLevels; i != 0;)
++ {
++ UInt32 bit;
++ i--;
++ bit = (symbol >> i) & 1;
++ RangeEnc_EncodeBit(rc, probs + m, bit);
++ m = (m << 1) | bit;
++ }
++}
++
++static void RcTree_ReverseEncode(CRangeEnc *rc, CLzmaProb *probs, int numBitLevels, UInt32 symbol)
++{
++ UInt32 m = 1;
++ int i;
++ for (i = 0; i < numBitLevels; i++)
++ {
++ UInt32 bit = symbol & 1;
++ RangeEnc_EncodeBit(rc, probs + m, bit);
++ m = (m << 1) | bit;
++ symbol >>= 1;
++ }
++}
++
++static UInt32 RcTree_GetPrice(const CLzmaProb *probs, int numBitLevels, UInt32 symbol, UInt32 *ProbPrices)
++{
++ UInt32 price = 0;
++ symbol |= (1 << numBitLevels);
++ while (symbol != 1)
++ {
++ price += GET_PRICEa(probs[symbol >> 1], symbol & 1);
++ symbol >>= 1;
++ }
++ return price;
++}
++
++static UInt32 RcTree_ReverseGetPrice(const CLzmaProb *probs, int numBitLevels, UInt32 symbol, UInt32 *ProbPrices)
++{
++ UInt32 price = 0;
++ UInt32 m = 1;
++ int i;
++ for (i = numBitLevels; i != 0; i--)
++ {
++ UInt32 bit = symbol & 1;
++ symbol >>= 1;
++ price += GET_PRICEa(probs[m], bit);
++ m = (m << 1) | bit;
++ }
++ return price;
++}
++
++
++static void LenEnc_Init(CLenEnc *p)
++{
++ unsigned i;
++ p->choice = p->choice2 = kProbInitValue;
++ for (i = 0; i < (LZMA_NUM_PB_STATES_MAX << kLenNumLowBits); i++)
++ p->low[i] = kProbInitValue;
++ for (i = 0; i < (LZMA_NUM_PB_STATES_MAX << kLenNumMidBits); i++)
++ p->mid[i] = kProbInitValue;
++ for (i = 0; i < kLenNumHighSymbols; i++)
++ p->high[i] = kProbInitValue;
++}
++
++static void LenEnc_Encode(CLenEnc *p, CRangeEnc *rc, UInt32 symbol, UInt32 posState)
++{
++ if (symbol < kLenNumLowSymbols)
++ {
++ RangeEnc_EncodeBit(rc, &p->choice, 0);
++ RcTree_Encode(rc, p->low + (posState << kLenNumLowBits), kLenNumLowBits, symbol);
++ }
++ else
++ {
++ RangeEnc_EncodeBit(rc, &p->choice, 1);
++ if (symbol < kLenNumLowSymbols + kLenNumMidSymbols)
++ {
++ RangeEnc_EncodeBit(rc, &p->choice2, 0);
++ RcTree_Encode(rc, p->mid + (posState << kLenNumMidBits), kLenNumMidBits, symbol - kLenNumLowSymbols);
++ }
++ else
++ {
++ RangeEnc_EncodeBit(rc, &p->choice2, 1);
++ RcTree_Encode(rc, p->high, kLenNumHighBits, symbol - kLenNumLowSymbols - kLenNumMidSymbols);
++ }
++ }
++}
++
++static void LenEnc_SetPrices(CLenEnc *p, UInt32 posState, UInt32 numSymbols, UInt32 *prices, UInt32 *ProbPrices)
++{
++ UInt32 a0 = GET_PRICE_0a(p->choice);
++ UInt32 a1 = GET_PRICE_1a(p->choice);
++ UInt32 b0 = a1 + GET_PRICE_0a(p->choice2);
++ UInt32 b1 = a1 + GET_PRICE_1a(p->choice2);
++ UInt32 i = 0;
++ for (i = 0; i < kLenNumLowSymbols; i++)
++ {
++ if (i >= numSymbols)
++ return;
++ prices[i] = a0 + RcTree_GetPrice(p->low + (posState << kLenNumLowBits), kLenNumLowBits, i, ProbPrices);
++ }
++ for (; i < kLenNumLowSymbols + kLenNumMidSymbols; i++)
++ {
++ if (i >= numSymbols)
++ return;
++ prices[i] = b0 + RcTree_GetPrice(p->mid + (posState << kLenNumMidBits), kLenNumMidBits, i - kLenNumLowSymbols, ProbPrices);
++ }
++ for (; i < numSymbols; i++)
++ prices[i] = b1 + RcTree_GetPrice(p->high, kLenNumHighBits, i - kLenNumLowSymbols - kLenNumMidSymbols, ProbPrices);
++}
++
++static void MY_FAST_CALL LenPriceEnc_UpdateTable(CLenPriceEnc *p, UInt32 posState, UInt32 *ProbPrices)
++{
++ LenEnc_SetPrices(&p->p, posState, p->tableSize, p->prices[posState], ProbPrices);
++ p->counters[posState] = p->tableSize;
++}
++
++static void LenPriceEnc_UpdateTables(CLenPriceEnc *p, UInt32 numPosStates, UInt32 *ProbPrices)
++{
++ UInt32 posState;
++ for (posState = 0; posState < numPosStates; posState++)
++ LenPriceEnc_UpdateTable(p, posState, ProbPrices);
++}
++
++static void LenEnc_Encode2(CLenPriceEnc *p, CRangeEnc *rc, UInt32 symbol, UInt32 posState, Bool updatePrice, UInt32 *ProbPrices)
++{
++ LenEnc_Encode(&p->p, rc, symbol, posState);
++ if (updatePrice)
++ if (--p->counters[posState] == 0)
++ LenPriceEnc_UpdateTable(p, posState, ProbPrices);
++}
++
++
++
++
++static void MovePos(CLzmaEnc *p, UInt32 num)
++{
++ #ifdef SHOW_STAT
++ ttt += num;
++ printf("\n MovePos %d", num);
++ #endif
++ if (num != 0)
++ {
++ p->additionalOffset += num;
++ p->matchFinder.Skip(p->matchFinderObj, num);
++ }
++}
++
++static UInt32 ReadMatchDistances(CLzmaEnc *p, UInt32 *numDistancePairsRes)
++{
++ UInt32 lenRes = 0, numPairs;
++ p->numAvail = p->matchFinder.GetNumAvailableBytes(p->matchFinderObj);
++ numPairs = p->matchFinder.GetMatches(p->matchFinderObj, p->matches);
++ #ifdef SHOW_STAT
++ printf("\n i = %d numPairs = %d ", ttt, numPairs / 2);
++ ttt++;
++ {
++ UInt32 i;
++ for (i = 0; i < numPairs; i += 2)
++ printf("%2d %6d | ", p->matches[i], p->matches[i + 1]);
++ }
++ #endif
++ if (numPairs > 0)
++ {
++ lenRes = p->matches[numPairs - 2];
++ if (lenRes == p->numFastBytes)
++ {
++ const Byte *pby = p->matchFinder.GetPointerToCurrentPos(p->matchFinderObj) - 1;
++ UInt32 distance = p->matches[numPairs - 1] + 1;
++ UInt32 numAvail = p->numAvail;
++ if (numAvail > LZMA_MATCH_LEN_MAX)
++ numAvail = LZMA_MATCH_LEN_MAX;
++ {
++ const Byte *pby2 = pby - distance;
++ for (; lenRes < numAvail && pby[lenRes] == pby2[lenRes]; lenRes++);
++ }
++ }
++ }
++ p->additionalOffset++;
++ *numDistancePairsRes = numPairs;
++ return lenRes;
++}
++
++
++#define MakeAsChar(p) (p)->backPrev = (UInt32)(-1); (p)->prev1IsChar = False;
++#define MakeAsShortRep(p) (p)->backPrev = 0; (p)->prev1IsChar = False;
++#define IsShortRep(p) ((p)->backPrev == 0)
++
++static UInt32 GetRepLen1Price(CLzmaEnc *p, UInt32 state, UInt32 posState)
++{
++ return
++ GET_PRICE_0(p->isRepG0[state]) +
++ GET_PRICE_0(p->isRep0Long[state][posState]);
++}
++
++static UInt32 GetPureRepPrice(CLzmaEnc *p, UInt32 repIndex, UInt32 state, UInt32 posState)
++{
++ UInt32 price;
++ if (repIndex == 0)
++ {
++ price = GET_PRICE_0(p->isRepG0[state]);
++ price += GET_PRICE_1(p->isRep0Long[state][posState]);
++ }
++ else
++ {
++ price = GET_PRICE_1(p->isRepG0[state]);
++ if (repIndex == 1)
++ price += GET_PRICE_0(p->isRepG1[state]);
++ else
++ {
++ price += GET_PRICE_1(p->isRepG1[state]);
++ price += GET_PRICE(p->isRepG2[state], repIndex - 2);
++ }
++ }
++ return price;
++}
++
++static UInt32 GetRepPrice(CLzmaEnc *p, UInt32 repIndex, UInt32 len, UInt32 state, UInt32 posState)
++{
++ return p->repLenEnc.prices[posState][len - LZMA_MATCH_LEN_MIN] +
++ GetPureRepPrice(p, repIndex, state, posState);
++}
++
++static UInt32 Backward(CLzmaEnc *p, UInt32 *backRes, UInt32 cur)
++{
++ UInt32 posMem = p->opt[cur].posPrev;
++ UInt32 backMem = p->opt[cur].backPrev;
++ p->optimumEndIndex = cur;
++ do
++ {
++ if (p->opt[cur].prev1IsChar)
++ {
++ MakeAsChar(&p->opt[posMem])
++ p->opt[posMem].posPrev = posMem - 1;
++ if (p->opt[cur].prev2)
++ {
++ p->opt[posMem - 1].prev1IsChar = False;
++ p->opt[posMem - 1].posPrev = p->opt[cur].posPrev2;
++ p->opt[posMem - 1].backPrev = p->opt[cur].backPrev2;
++ }
++ }
++ {
++ UInt32 posPrev = posMem;
++ UInt32 backCur = backMem;
++
++ backMem = p->opt[posPrev].backPrev;
++ posMem = p->opt[posPrev].posPrev;
++
++ p->opt[posPrev].backPrev = backCur;
++ p->opt[posPrev].posPrev = cur;
++ cur = posPrev;
++ }
++ }
++ while (cur != 0);
++ *backRes = p->opt[0].backPrev;
++ p->optimumCurrentIndex = p->opt[0].posPrev;
++ return p->optimumCurrentIndex;
++}
++
++#define LIT_PROBS(pos, prevByte) (p->litProbs + ((((pos) & p->lpMask) << p->lc) + ((prevByte) >> (8 - p->lc))) * 0x300)
++
++static UInt32 GetOptimum(CLzmaEnc *p, UInt32 position, UInt32 *backRes)
++{
++ UInt32 numAvail, mainLen, numPairs, repMaxIndex, i, posState, lenEnd, len, cur;
++ UInt32 matchPrice, repMatchPrice, normalMatchPrice;
++ UInt32 reps[LZMA_NUM_REPS], repLens[LZMA_NUM_REPS];
++ UInt32 *matches;
++ const Byte *data;
++ Byte curByte, matchByte;
++ if (p->optimumEndIndex != p->optimumCurrentIndex)
++ {
++ const COptimal *opt = &p->opt[p->optimumCurrentIndex];
++ UInt32 lenRes = opt->posPrev - p->optimumCurrentIndex;
++ *backRes = opt->backPrev;
++ p->optimumCurrentIndex = opt->posPrev;
++ return lenRes;
++ }
++ p->optimumCurrentIndex = p->optimumEndIndex = 0;
++
++ if (p->additionalOffset == 0)
++ mainLen = ReadMatchDistances(p, &numPairs);
++ else
++ {
++ mainLen = p->longestMatchLength;
++ numPairs = p->numPairs;
++ }
++
++ numAvail = p->numAvail;
++ if (numAvail < 2)
++ {
++ *backRes = (UInt32)(-1);
++ return 1;
++ }
++ if (numAvail > LZMA_MATCH_LEN_MAX)
++ numAvail = LZMA_MATCH_LEN_MAX;
++
++ data = p->matchFinder.GetPointerToCurrentPos(p->matchFinderObj) - 1;
++ repMaxIndex = 0;
++ for (i = 0; i < LZMA_NUM_REPS; i++)
++ {
++ UInt32 lenTest;
++ const Byte *data2;
++ reps[i] = p->reps[i];
++ data2 = data - (reps[i] + 1);
++ if (data[0] != data2[0] || data[1] != data2[1])
++ {
++ repLens[i] = 0;
++ continue;
++ }
++ for (lenTest = 2; lenTest < numAvail && data[lenTest] == data2[lenTest]; lenTest++);
++ repLens[i] = lenTest;
++ if (lenTest > repLens[repMaxIndex])
++ repMaxIndex = i;
++ }
++ if (repLens[repMaxIndex] >= p->numFastBytes)
++ {
++ UInt32 lenRes;
++ *backRes = repMaxIndex;
++ lenRes = repLens[repMaxIndex];
++ MovePos(p, lenRes - 1);
++ return lenRes;
++ }
++
++ matches = p->matches;
++ if (mainLen >= p->numFastBytes)
++ {
++ *backRes = matches[numPairs - 1] + LZMA_NUM_REPS;
++ MovePos(p, mainLen - 1);
++ return mainLen;
++ }
++ curByte = *data;
++ matchByte = *(data - (reps[0] + 1));
++
++ if (mainLen < 2 && curByte != matchByte && repLens[repMaxIndex] < 2)
++ {
++ *backRes = (UInt32)-1;
++ return 1;
++ }
++
++ p->opt[0].state = (CState)p->state;
++
++ posState = (position & p->pbMask);
++
++ {
++ const CLzmaProb *probs = LIT_PROBS(position, *(data - 1));
++ p->opt[1].price = GET_PRICE_0(p->isMatch[p->state][posState]) +
++ (!IsCharState(p->state) ?
++ LitEnc_GetPriceMatched(probs, curByte, matchByte, p->ProbPrices) :
++ LitEnc_GetPrice(probs, curByte, p->ProbPrices));
++ }
++
++ MakeAsChar(&p->opt[1]);
++
++ matchPrice = GET_PRICE_1(p->isMatch[p->state][posState]);
++ repMatchPrice = matchPrice + GET_PRICE_1(p->isRep[p->state]);
++
++ if (matchByte == curByte)
++ {
++ UInt32 shortRepPrice = repMatchPrice + GetRepLen1Price(p, p->state, posState);
++ if (shortRepPrice < p->opt[1].price)
++ {
++ p->opt[1].price = shortRepPrice;
++ MakeAsShortRep(&p->opt[1]);
++ }
++ }
++ lenEnd = ((mainLen >= repLens[repMaxIndex]) ? mainLen : repLens[repMaxIndex]);
++
++ if (lenEnd < 2)
++ {
++ *backRes = p->opt[1].backPrev;
++ return 1;
++ }
++
++ p->opt[1].posPrev = 0;
++ for (i = 0; i < LZMA_NUM_REPS; i++)
++ p->opt[0].backs[i] = reps[i];
++
++ len = lenEnd;
++ do
++ p->opt[len--].price = kInfinityPrice;
++ while (len >= 2);
++
++ for (i = 0; i < LZMA_NUM_REPS; i++)
++ {
++ UInt32 repLen = repLens[i];
++ UInt32 price;
++ if (repLen < 2)
++ continue;
++ price = repMatchPrice + GetPureRepPrice(p, i, p->state, posState);
++ do
++ {
++ UInt32 curAndLenPrice = price + p->repLenEnc.prices[posState][repLen - 2];
++ COptimal *opt = &p->opt[repLen];
++ if (curAndLenPrice < opt->price)
++ {
++ opt->price = curAndLenPrice;
++ opt->posPrev = 0;
++ opt->backPrev = i;
++ opt->prev1IsChar = False;
++ }
++ }
++ while (--repLen >= 2);
++ }
++
++ normalMatchPrice = matchPrice + GET_PRICE_0(p->isRep[p->state]);
++
++ len = ((repLens[0] >= 2) ? repLens[0] + 1 : 2);
++ if (len <= mainLen)
++ {
++ UInt32 offs = 0;
++ while (len > matches[offs])
++ offs += 2;
++ for (; ; len++)
++ {
++ COptimal *opt;
++ UInt32 distance = matches[offs + 1];
++
++ UInt32 curAndLenPrice = normalMatchPrice + p->lenEnc.prices[posState][len - LZMA_MATCH_LEN_MIN];
++ UInt32 lenToPosState = GetLenToPosState(len);
++ if (distance < kNumFullDistances)
++ curAndLenPrice += p->distancesPrices[lenToPosState][distance];
++ else
++ {
++ UInt32 slot;
++ GetPosSlot2(distance, slot);
++ curAndLenPrice += p->alignPrices[distance & kAlignMask] + p->posSlotPrices[lenToPosState][slot];
++ }
++ opt = &p->opt[len];
++ if (curAndLenPrice < opt->price)
++ {
++ opt->price = curAndLenPrice;
++ opt->posPrev = 0;
++ opt->backPrev = distance + LZMA_NUM_REPS;
++ opt->prev1IsChar = False;
++ }
++ if (len == matches[offs])
++ {
++ offs += 2;
++ if (offs == numPairs)
++ break;
++ }
++ }
++ }
++
++ cur = 0;
++
++ #ifdef SHOW_STAT2
++ if (position >= 0)
++ {
++ unsigned i;
++ printf("\n pos = %4X", position);
++ for (i = cur; i <= lenEnd; i++)
++ printf("\nprice[%4X] = %d", position - cur + i, p->opt[i].price);
++ }
++ #endif
++
++ for (;;)
++ {
++ UInt32 numAvailFull, newLen, numPairs, posPrev, state, posState, startLen;
++ UInt32 curPrice, curAnd1Price, matchPrice, repMatchPrice;
++ Bool nextIsChar;
++ Byte curByte, matchByte;
++ const Byte *data;
++ COptimal *curOpt;
++ COptimal *nextOpt;
++
++ cur++;
++ if (cur == lenEnd)
++ return Backward(p, backRes, cur);
++
++ newLen = ReadMatchDistances(p, &numPairs);
++ if (newLen >= p->numFastBytes)
++ {
++ p->numPairs = numPairs;
++ p->longestMatchLength = newLen;
++ return Backward(p, backRes, cur);
++ }
++ position++;
++ curOpt = &p->opt[cur];
++ posPrev = curOpt->posPrev;
++ if (curOpt->prev1IsChar)
++ {
++ posPrev--;
++ if (curOpt->prev2)
++ {
++ state = p->opt[curOpt->posPrev2].state;
++ if (curOpt->backPrev2 < LZMA_NUM_REPS)
++ state = kRepNextStates[state];
++ else
++ state = kMatchNextStates[state];
++ }
++ else
++ state = p->opt[posPrev].state;
++ state = kLiteralNextStates[state];
++ }
++ else
++ state = p->opt[posPrev].state;
++ if (posPrev == cur - 1)
++ {
++ if (IsShortRep(curOpt))
++ state = kShortRepNextStates[state];
++ else
++ state = kLiteralNextStates[state];
++ }
++ else
++ {
++ UInt32 pos;
++ const COptimal *prevOpt;
++ if (curOpt->prev1IsChar && curOpt->prev2)
++ {
++ posPrev = curOpt->posPrev2;
++ pos = curOpt->backPrev2;
++ state = kRepNextStates[state];
++ }
++ else
++ {
++ pos = curOpt->backPrev;
++ if (pos < LZMA_NUM_REPS)
++ state = kRepNextStates[state];
++ else
++ state = kMatchNextStates[state];
++ }
++ prevOpt = &p->opt[posPrev];
++ if (pos < LZMA_NUM_REPS)
++ {
++ UInt32 i;
++ reps[0] = prevOpt->backs[pos];
++ for (i = 1; i <= pos; i++)
++ reps[i] = prevOpt->backs[i - 1];
++ for (; i < LZMA_NUM_REPS; i++)
++ reps[i] = prevOpt->backs[i];
++ }
++ else
++ {
++ UInt32 i;
++ reps[0] = (pos - LZMA_NUM_REPS);
++ for (i = 1; i < LZMA_NUM_REPS; i++)
++ reps[i] = prevOpt->backs[i - 1];
++ }
++ }
++ curOpt->state = (CState)state;
++
++ curOpt->backs[0] = reps[0];
++ curOpt->backs[1] = reps[1];
++ curOpt->backs[2] = reps[2];
++ curOpt->backs[3] = reps[3];
++
++ curPrice = curOpt->price;
++ nextIsChar = False;
++ data = p->matchFinder.GetPointerToCurrentPos(p->matchFinderObj) - 1;
++ curByte = *data;
++ matchByte = *(data - (reps[0] + 1));
++
++ posState = (position & p->pbMask);
++
++ curAnd1Price = curPrice + GET_PRICE_0(p->isMatch[state][posState]);
++ {
++ const CLzmaProb *probs = LIT_PROBS(position, *(data - 1));
++ curAnd1Price +=
++ (!IsCharState(state) ?
++ LitEnc_GetPriceMatched(probs, curByte, matchByte, p->ProbPrices) :
++ LitEnc_GetPrice(probs, curByte, p->ProbPrices));
++ }
++
++ nextOpt = &p->opt[cur + 1];
++
++ if (curAnd1Price < nextOpt->price)
++ {
++ nextOpt->price = curAnd1Price;
++ nextOpt->posPrev = cur;
++ MakeAsChar(nextOpt);
++ nextIsChar = True;
++ }
++
++ matchPrice = curPrice + GET_PRICE_1(p->isMatch[state][posState]);
++ repMatchPrice = matchPrice + GET_PRICE_1(p->isRep[state]);
++
++ if (matchByte == curByte && !(nextOpt->posPrev < cur && nextOpt->backPrev == 0))
++ {
++ UInt32 shortRepPrice = repMatchPrice + GetRepLen1Price(p, state, posState);
++ if (shortRepPrice <= nextOpt->price)
++ {
++ nextOpt->price = shortRepPrice;
++ nextOpt->posPrev = cur;
++ MakeAsShortRep(nextOpt);
++ nextIsChar = True;
++ }
++ }
++ numAvailFull = p->numAvail;
++ {
++ UInt32 temp = kNumOpts - 1 - cur;
++ if (temp < numAvailFull)
++ numAvailFull = temp;
++ }
++
++ if (numAvailFull < 2)
++ continue;
++ numAvail = (numAvailFull <= p->numFastBytes ? numAvailFull : p->numFastBytes);
++
++ if (!nextIsChar && matchByte != curByte) /* speed optimization */
++ {
++ /* try Literal + rep0 */
++ UInt32 temp;
++ UInt32 lenTest2;
++ const Byte *data2 = data - (reps[0] + 1);
++ UInt32 limit = p->numFastBytes + 1;
++ if (limit > numAvailFull)
++ limit = numAvailFull;
++
++ for (temp = 1; temp < limit && data[temp] == data2[temp]; temp++);
++ lenTest2 = temp - 1;
++ if (lenTest2 >= 2)
++ {
++ UInt32 state2 = kLiteralNextStates[state];
++ UInt32 posStateNext = (position + 1) & p->pbMask;
++ UInt32 nextRepMatchPrice = curAnd1Price +
++ GET_PRICE_1(p->isMatch[state2][posStateNext]) +
++ GET_PRICE_1(p->isRep[state2]);
++ /* for (; lenTest2 >= 2; lenTest2--) */
++ {
++ UInt32 curAndLenPrice;
++ COptimal *opt;
++ UInt32 offset = cur + 1 + lenTest2;
++ while (lenEnd < offset)
++ p->opt[++lenEnd].price = kInfinityPrice;
++ curAndLenPrice = nextRepMatchPrice + GetRepPrice(p, 0, lenTest2, state2, posStateNext);
++ opt = &p->opt[offset];
++ if (curAndLenPrice < opt->price)
++ {
++ opt->price = curAndLenPrice;
++ opt->posPrev = cur + 1;
++ opt->backPrev = 0;
++ opt->prev1IsChar = True;
++ opt->prev2 = False;
++ }
++ }
++ }
++ }
++
++ startLen = 2; /* speed optimization */
++ {
++ UInt32 repIndex;
++ for (repIndex = 0; repIndex < LZMA_NUM_REPS; repIndex++)
++ {
++ UInt32 lenTest;
++ UInt32 lenTestTemp;
++ UInt32 price;
++ const Byte *data2 = data - (reps[repIndex] + 1);
++ if (data[0] != data2[0] || data[1] != data2[1])
++ continue;
++ for (lenTest = 2; lenTest < numAvail && data[lenTest] == data2[lenTest]; lenTest++);
++ while (lenEnd < cur + lenTest)
++ p->opt[++lenEnd].price = kInfinityPrice;
++ lenTestTemp = lenTest;
++ price = repMatchPrice + GetPureRepPrice(p, repIndex, state, posState);
++ do
++ {
++ UInt32 curAndLenPrice = price + p->repLenEnc.prices[posState][lenTest - 2];
++ COptimal *opt = &p->opt[cur + lenTest];
++ if (curAndLenPrice < opt->price)
++ {
++ opt->price = curAndLenPrice;
++ opt->posPrev = cur;
++ opt->backPrev = repIndex;
++ opt->prev1IsChar = False;
++ }
++ }
++ while (--lenTest >= 2);
++ lenTest = lenTestTemp;
++
++ if (repIndex == 0)
++ startLen = lenTest + 1;
++
++ /* if (_maxMode) */
++ {
++ UInt32 lenTest2 = lenTest + 1;
++ UInt32 limit = lenTest2 + p->numFastBytes;
++ UInt32 nextRepMatchPrice;
++ if (limit > numAvailFull)
++ limit = numAvailFull;
++ for (; lenTest2 < limit && data[lenTest2] == data2[lenTest2]; lenTest2++);
++ lenTest2 -= lenTest + 1;
++ if (lenTest2 >= 2)
++ {
++ UInt32 state2 = kRepNextStates[state];
++ UInt32 posStateNext = (position + lenTest) & p->pbMask;
++ UInt32 curAndLenCharPrice =
++ price + p->repLenEnc.prices[posState][lenTest - 2] +
++ GET_PRICE_0(p->isMatch[state2][posStateNext]) +
++ LitEnc_GetPriceMatched(LIT_PROBS(position + lenTest, data[lenTest - 1]),
++ data[lenTest], data2[lenTest], p->ProbPrices);
++ state2 = kLiteralNextStates[state2];
++ posStateNext = (position + lenTest + 1) & p->pbMask;
++ nextRepMatchPrice = curAndLenCharPrice +
++ GET_PRICE_1(p->isMatch[state2][posStateNext]) +
++ GET_PRICE_1(p->isRep[state2]);
++
++ /* for (; lenTest2 >= 2; lenTest2--) */
++ {
++ UInt32 curAndLenPrice;
++ COptimal *opt;
++ UInt32 offset = cur + lenTest + 1 + lenTest2;
++ while (lenEnd < offset)
++ p->opt[++lenEnd].price = kInfinityPrice;
++ curAndLenPrice = nextRepMatchPrice + GetRepPrice(p, 0, lenTest2, state2, posStateNext);
++ opt = &p->opt[offset];
++ if (curAndLenPrice < opt->price)
++ {
++ opt->price = curAndLenPrice;
++ opt->posPrev = cur + lenTest + 1;
++ opt->backPrev = 0;
++ opt->prev1IsChar = True;
++ opt->prev2 = True;
++ opt->posPrev2 = cur;
++ opt->backPrev2 = repIndex;
++ }
++ }
++ }
++ }
++ }
++ }
++ /* for (UInt32 lenTest = 2; lenTest <= newLen; lenTest++) */
++ if (newLen > numAvail)
++ {
++ newLen = numAvail;
++ for (numPairs = 0; newLen > matches[numPairs]; numPairs += 2);
++ matches[numPairs] = newLen;
++ numPairs += 2;
++ }
++ if (newLen >= startLen)
++ {
++ UInt32 normalMatchPrice = matchPrice + GET_PRICE_0(p->isRep[state]);
++ UInt32 offs, curBack, posSlot;
++ UInt32 lenTest;
++ while (lenEnd < cur + newLen)
++ p->opt[++lenEnd].price = kInfinityPrice;
++
++ offs = 0;
++ while (startLen > matches[offs])
++ offs += 2;
++ curBack = matches[offs + 1];
++ GetPosSlot2(curBack, posSlot);
++ for (lenTest = /*2*/ startLen; ; lenTest++)
++ {
++ UInt32 curAndLenPrice = normalMatchPrice + p->lenEnc.prices[posState][lenTest - LZMA_MATCH_LEN_MIN];
++ UInt32 lenToPosState = GetLenToPosState(lenTest);
++ COptimal *opt;
++ if (curBack < kNumFullDistances)
++ curAndLenPrice += p->distancesPrices[lenToPosState][curBack];
++ else
++ curAndLenPrice += p->posSlotPrices[lenToPosState][posSlot] + p->alignPrices[curBack & kAlignMask];
++
++ opt = &p->opt[cur + lenTest];
++ if (curAndLenPrice < opt->price)
++ {
++ opt->price = curAndLenPrice;
++ opt->posPrev = cur;
++ opt->backPrev = curBack + LZMA_NUM_REPS;
++ opt->prev1IsChar = False;
++ }
++
++ if (/*_maxMode && */lenTest == matches[offs])
++ {
++ /* Try Match + Literal + Rep0 */
++ const Byte *data2 = data - (curBack + 1);
++ UInt32 lenTest2 = lenTest + 1;
++ UInt32 limit = lenTest2 + p->numFastBytes;
++ UInt32 nextRepMatchPrice;
++ if (limit > numAvailFull)
++ limit = numAvailFull;
++ for (; lenTest2 < limit && data[lenTest2] == data2[lenTest2]; lenTest2++);
++ lenTest2 -= lenTest + 1;
++ if (lenTest2 >= 2)
++ {
++ UInt32 state2 = kMatchNextStates[state];
++ UInt32 posStateNext = (position + lenTest) & p->pbMask;
++ UInt32 curAndLenCharPrice = curAndLenPrice +
++ GET_PRICE_0(p->isMatch[state2][posStateNext]) +
++ LitEnc_GetPriceMatched(LIT_PROBS(position + lenTest, data[lenTest - 1]),
++ data[lenTest], data2[lenTest], p->ProbPrices);
++ state2 = kLiteralNextStates[state2];
++ posStateNext = (posStateNext + 1) & p->pbMask;
++ nextRepMatchPrice = curAndLenCharPrice +
++ GET_PRICE_1(p->isMatch[state2][posStateNext]) +
++ GET_PRICE_1(p->isRep[state2]);
++
++ /* for (; lenTest2 >= 2; lenTest2--) */
++ {
++ UInt32 offset = cur + lenTest + 1 + lenTest2;
++ UInt32 curAndLenPrice;
++ COptimal *opt;
++ while (lenEnd < offset)
++ p->opt[++lenEnd].price = kInfinityPrice;
++ curAndLenPrice = nextRepMatchPrice + GetRepPrice(p, 0, lenTest2, state2, posStateNext);
++ opt = &p->opt[offset];
++ if (curAndLenPrice < opt->price)
++ {
++ opt->price = curAndLenPrice;
++ opt->posPrev = cur + lenTest + 1;
++ opt->backPrev = 0;
++ opt->prev1IsChar = True;
++ opt->prev2 = True;
++ opt->posPrev2 = cur;
++ opt->backPrev2 = curBack + LZMA_NUM_REPS;
++ }
++ }
++ }
++ offs += 2;
++ if (offs == numPairs)
++ break;
++ curBack = matches[offs + 1];
++ if (curBack >= kNumFullDistances)
++ GetPosSlot2(curBack, posSlot);
++ }
++ }
++ }
++ }
++}
++
++#define ChangePair(smallDist, bigDist) (((bigDist) >> 7) > (smallDist))
++
++static UInt32 GetOptimumFast(CLzmaEnc *p, UInt32 *backRes)
++{
++ UInt32 numAvail, mainLen, mainDist, numPairs, repIndex, repLen, i;
++ const Byte *data;
++ const UInt32 *matches;
++
++ if (p->additionalOffset == 0)
++ mainLen = ReadMatchDistances(p, &numPairs);
++ else
++ {
++ mainLen = p->longestMatchLength;
++ numPairs = p->numPairs;
++ }
++
++ numAvail = p->numAvail;
++ *backRes = (UInt32)-1;
++ if (numAvail < 2)
++ return 1;
++ if (numAvail > LZMA_MATCH_LEN_MAX)
++ numAvail = LZMA_MATCH_LEN_MAX;
++ data = p->matchFinder.GetPointerToCurrentPos(p->matchFinderObj) - 1;
++
++ repLen = repIndex = 0;
++ for (i = 0; i < LZMA_NUM_REPS; i++)
++ {
++ UInt32 len;
++ const Byte *data2 = data - (p->reps[i] + 1);
++ if (data[0] != data2[0] || data[1] != data2[1])
++ continue;
++ for (len = 2; len < numAvail && data[len] == data2[len]; len++);
++ if (len >= p->numFastBytes)
++ {
++ *backRes = i;
++ MovePos(p, len - 1);
++ return len;
++ }
++ if (len > repLen)
++ {
++ repIndex = i;
++ repLen = len;
++ }
++ }
++
++ matches = p->matches;
++ if (mainLen >= p->numFastBytes)
++ {
++ *backRes = matches[numPairs - 1] + LZMA_NUM_REPS;
++ MovePos(p, mainLen - 1);
++ return mainLen;
++ }
++
++ mainDist = 0; /* for GCC */
++ if (mainLen >= 2)
++ {
++ mainDist = matches[numPairs - 1];
++ while (numPairs > 2 && mainLen == matches[numPairs - 4] + 1)
++ {
++ if (!ChangePair(matches[numPairs - 3], mainDist))
++ break;
++ numPairs -= 2;
++ mainLen = matches[numPairs - 2];
++ mainDist = matches[numPairs - 1];
++ }
++ if (mainLen == 2 && mainDist >= 0x80)
++ mainLen = 1;
++ }
++
++ if (repLen >= 2 && (
++ (repLen + 1 >= mainLen) ||
++ (repLen + 2 >= mainLen && mainDist >= (1 << 9)) ||
++ (repLen + 3 >= mainLen && mainDist >= (1 << 15))))
++ {
++ *backRes = repIndex;
++ MovePos(p, repLen - 1);
++ return repLen;
++ }
++
++ if (mainLen < 2 || numAvail <= 2)
++ return 1;
++
++ p->longestMatchLength = ReadMatchDistances(p, &p->numPairs);
++ if (p->longestMatchLength >= 2)
++ {
++ UInt32 newDistance = matches[p->numPairs - 1];
++ if ((p->longestMatchLength >= mainLen && newDistance < mainDist) ||
++ (p->longestMatchLength == mainLen + 1 && !ChangePair(mainDist, newDistance)) ||
++ (p->longestMatchLength > mainLen + 1) ||
++ (p->longestMatchLength + 1 >= mainLen && mainLen >= 3 && ChangePair(newDistance, mainDist)))
++ return 1;
++ }
++
++ data = p->matchFinder.GetPointerToCurrentPos(p->matchFinderObj) - 1;
++ for (i = 0; i < LZMA_NUM_REPS; i++)
++ {
++ UInt32 len, limit;
++ const Byte *data2 = data - (p->reps[i] + 1);
++ if (data[0] != data2[0] || data[1] != data2[1])
++ continue;
++ limit = mainLen - 1;
++ for (len = 2; len < limit && data[len] == data2[len]; len++);
++ if (len >= limit)
++ return 1;
++ }
++ *backRes = mainDist + LZMA_NUM_REPS;
++ MovePos(p, mainLen - 2);
++ return mainLen;
++}
++
++static void WriteEndMarker(CLzmaEnc *p, UInt32 posState)
++{
++ UInt32 len;
++ RangeEnc_EncodeBit(&p->rc, &p->isMatch[p->state][posState], 1);
++ RangeEnc_EncodeBit(&p->rc, &p->isRep[p->state], 0);
++ p->state = kMatchNextStates[p->state];
++ len = LZMA_MATCH_LEN_MIN;
++ LenEnc_Encode2(&p->lenEnc, &p->rc, len - LZMA_MATCH_LEN_MIN, posState, !p->fastMode, p->ProbPrices);
++ RcTree_Encode(&p->rc, p->posSlotEncoder[GetLenToPosState(len)], kNumPosSlotBits, (1 << kNumPosSlotBits) - 1);
++ RangeEnc_EncodeDirectBits(&p->rc, (((UInt32)1 << 30) - 1) >> kNumAlignBits, 30 - kNumAlignBits);
++ RcTree_ReverseEncode(&p->rc, p->posAlignEncoder, kNumAlignBits, kAlignMask);
++}
++
++static SRes CheckErrors(CLzmaEnc *p)
++{
++ if (p->result != SZ_OK)
++ return p->result;
++ if (p->rc.res != SZ_OK)
++ p->result = SZ_ERROR_WRITE;
++ if (p->matchFinderBase.result != SZ_OK)
++ p->result = SZ_ERROR_READ;
++ if (p->result != SZ_OK)
++ p->finished = True;
++ return p->result;
++}
++
++static SRes Flush(CLzmaEnc *p, UInt32 nowPos)
++{
++ /* ReleaseMFStream(); */
++ p->finished = True;
++ if (p->writeEndMark)
++ WriteEndMarker(p, nowPos & p->pbMask);
++ RangeEnc_FlushData(&p->rc);
++ RangeEnc_FlushStream(&p->rc);
++ return CheckErrors(p);
++}
++
++static void FillAlignPrices(CLzmaEnc *p)
++{
++ UInt32 i;
++ for (i = 0; i < kAlignTableSize; i++)
++ p->alignPrices[i] = RcTree_ReverseGetPrice(p->posAlignEncoder, kNumAlignBits, i, p->ProbPrices);
++ p->alignPriceCount = 0;
++}
++
++static void FillDistancesPrices(CLzmaEnc *p)
++{
++ UInt32 tempPrices[kNumFullDistances];
++ UInt32 i, lenToPosState;
++ for (i = kStartPosModelIndex; i < kNumFullDistances; i++)
++ {
++ UInt32 posSlot = GetPosSlot1(i);
++ UInt32 footerBits = ((posSlot >> 1) - 1);
++ UInt32 base = ((2 | (posSlot & 1)) << footerBits);
++ tempPrices[i] = RcTree_ReverseGetPrice(p->posEncoders + base - posSlot - 1, footerBits, i - base, p->ProbPrices);
++ }
++
++ for (lenToPosState = 0; lenToPosState < kNumLenToPosStates; lenToPosState++)
++ {
++ UInt32 posSlot;
++ const CLzmaProb *encoder = p->posSlotEncoder[lenToPosState];
++ UInt32 *posSlotPrices = p->posSlotPrices[lenToPosState];
++ for (posSlot = 0; posSlot < p->distTableSize; posSlot++)
++ posSlotPrices[posSlot] = RcTree_GetPrice(encoder, kNumPosSlotBits, posSlot, p->ProbPrices);
++ for (posSlot = kEndPosModelIndex; posSlot < p->distTableSize; posSlot++)
++ posSlotPrices[posSlot] += ((((posSlot >> 1) - 1) - kNumAlignBits) << kNumBitPriceShiftBits);
++
++ {
++ UInt32 *distancesPrices = p->distancesPrices[lenToPosState];
++ UInt32 i;
++ for (i = 0; i < kStartPosModelIndex; i++)
++ distancesPrices[i] = posSlotPrices[i];
++ for (; i < kNumFullDistances; i++)
++ distancesPrices[i] = posSlotPrices[GetPosSlot1(i)] + tempPrices[i];
++ }
++ }
++ p->matchPriceCount = 0;
++}
++
++void LzmaEnc_Construct(CLzmaEnc *p)
++{
++ RangeEnc_Construct(&p->rc);
++ MatchFinder_Construct(&p->matchFinderBase);
++ #ifndef _7ZIP_ST
++ MatchFinderMt_Construct(&p->matchFinderMt);
++ p->matchFinderMt.MatchFinder = &p->matchFinderBase;
++ #endif
++
++ {
++ CLzmaEncProps props;
++ LzmaEncProps_Init(&props);
++ LzmaEnc_SetProps(p, &props);
++ }
++
++ #ifndef LZMA_LOG_BSR
++ LzmaEnc_FastPosInit(p->g_FastPos);
++ #endif
++
++ LzmaEnc_InitPriceTables(p->ProbPrices);
++ p->litProbs = 0;
++ p->saveState.litProbs = 0;
++}
++
++CLzmaEncHandle LzmaEnc_Create(ISzAlloc *alloc)
++{
++ void *p;
++ p = alloc->Alloc(alloc, sizeof(CLzmaEnc));
++ if (p != 0)
++ LzmaEnc_Construct((CLzmaEnc *)p);
++ return p;
++}
++
++void LzmaEnc_FreeLits(CLzmaEnc *p, ISzAlloc *alloc)
++{
++ alloc->Free(alloc, p->litProbs);
++ alloc->Free(alloc, p->saveState.litProbs);
++ p->litProbs = 0;
++ p->saveState.litProbs = 0;
++}
++
++void LzmaEnc_Destruct(CLzmaEnc *p, ISzAlloc *alloc, ISzAlloc *allocBig)
++{
++ #ifndef _7ZIP_ST
++ MatchFinderMt_Destruct(&p->matchFinderMt, allocBig);
++ #endif
++ MatchFinder_Free(&p->matchFinderBase, allocBig);
++ LzmaEnc_FreeLits(p, alloc);
++ RangeEnc_Free(&p->rc, alloc);
++}
++
++void LzmaEnc_Destroy(CLzmaEncHandle p, ISzAlloc *alloc, ISzAlloc *allocBig)
++{
++ LzmaEnc_Destruct((CLzmaEnc *)p, alloc, allocBig);
++ alloc->Free(alloc, p);
++}
++
++static SRes LzmaEnc_CodeOneBlock(CLzmaEnc *p, Bool useLimits, UInt32 maxPackSize, UInt32 maxUnpackSize)
++{
++ UInt32 nowPos32, startPos32;
++ if (p->needInit)
++ {
++ p->matchFinder.Init(p->matchFinderObj);
++ p->needInit = 0;
++ }
++
++ if (p->finished)
++ return p->result;
++ RINOK(CheckErrors(p));
++
++ nowPos32 = (UInt32)p->nowPos64;
++ startPos32 = nowPos32;
++
++ if (p->nowPos64 == 0)
++ {
++ UInt32 numPairs;
++ Byte curByte;
++ if (p->matchFinder.GetNumAvailableBytes(p->matchFinderObj) == 0)
++ return Flush(p, nowPos32);
++ ReadMatchDistances(p, &numPairs);
++ RangeEnc_EncodeBit(&p->rc, &p->isMatch[p->state][0], 0);
++ p->state = kLiteralNextStates[p->state];
++ curByte = p->matchFinder.GetIndexByte(p->matchFinderObj, 0 - p->additionalOffset);
++ LitEnc_Encode(&p->rc, p->litProbs, curByte);
++ p->additionalOffset--;
++ nowPos32++;
++ }
++
++ if (p->matchFinder.GetNumAvailableBytes(p->matchFinderObj) != 0)
++ for (;;)
++ {
++ UInt32 pos, len, posState;
++
++ if (p->fastMode)
++ len = GetOptimumFast(p, &pos);
++ else
++ len = GetOptimum(p, nowPos32, &pos);
++
++ #ifdef SHOW_STAT2
++ printf("\n pos = %4X, len = %d pos = %d", nowPos32, len, pos);
++ #endif
++
++ posState = nowPos32 & p->pbMask;
++ if (len == 1 && pos == (UInt32)-1)
++ {
++ Byte curByte;
++ CLzmaProb *probs;
++ const Byte *data;
++
++ RangeEnc_EncodeBit(&p->rc, &p->isMatch[p->state][posState], 0);
++ data = p->matchFinder.GetPointerToCurrentPos(p->matchFinderObj) - p->additionalOffset;
++ curByte = *data;
++ probs = LIT_PROBS(nowPos32, *(data - 1));
++ if (IsCharState(p->state))
++ LitEnc_Encode(&p->rc, probs, curByte);
++ else
++ LitEnc_EncodeMatched(&p->rc, probs, curByte, *(data - p->reps[0] - 1));
++ p->state = kLiteralNextStates[p->state];
++ }
++ else
++ {
++ RangeEnc_EncodeBit(&p->rc, &p->isMatch[p->state][posState], 1);
++ if (pos < LZMA_NUM_REPS)
++ {
++ RangeEnc_EncodeBit(&p->rc, &p->isRep[p->state], 1);
++ if (pos == 0)
++ {
++ RangeEnc_EncodeBit(&p->rc, &p->isRepG0[p->state], 0);
++ RangeEnc_EncodeBit(&p->rc, &p->isRep0Long[p->state][posState], ((len == 1) ? 0 : 1));
++ }
++ else
++ {
++ UInt32 distance = p->reps[pos];
++ RangeEnc_EncodeBit(&p->rc, &p->isRepG0[p->state], 1);
++ if (pos == 1)
++ RangeEnc_EncodeBit(&p->rc, &p->isRepG1[p->state], 0);
++ else
++ {
++ RangeEnc_EncodeBit(&p->rc, &p->isRepG1[p->state], 1);
++ RangeEnc_EncodeBit(&p->rc, &p->isRepG2[p->state], pos - 2);
++ if (pos == 3)
++ p->reps[3] = p->reps[2];
++ p->reps[2] = p->reps[1];
++ }
++ p->reps[1] = p->reps[0];
++ p->reps[0] = distance;
++ }
++ if (len == 1)
++ p->state = kShortRepNextStates[p->state];
++ else
++ {
++ LenEnc_Encode2(&p->repLenEnc, &p->rc, len - LZMA_MATCH_LEN_MIN, posState, !p->fastMode, p->ProbPrices);
++ p->state = kRepNextStates[p->state];
++ }
++ }
++ else
++ {
++ UInt32 posSlot;
++ RangeEnc_EncodeBit(&p->rc, &p->isRep[p->state], 0);
++ p->state = kMatchNextStates[p->state];
++ LenEnc_Encode2(&p->lenEnc, &p->rc, len - LZMA_MATCH_LEN_MIN, posState, !p->fastMode, p->ProbPrices);
++ pos -= LZMA_NUM_REPS;
++ GetPosSlot(pos, posSlot);
++ RcTree_Encode(&p->rc, p->posSlotEncoder[GetLenToPosState(len)], kNumPosSlotBits, posSlot);
++
++ if (posSlot >= kStartPosModelIndex)
++ {
++ UInt32 footerBits = ((posSlot >> 1) - 1);
++ UInt32 base = ((2 | (posSlot & 1)) << footerBits);
++ UInt32 posReduced = pos - base;
++
++ if (posSlot < kEndPosModelIndex)
++ RcTree_ReverseEncode(&p->rc, p->posEncoders + base - posSlot - 1, footerBits, posReduced);
++ else
++ {
++ RangeEnc_EncodeDirectBits(&p->rc, posReduced >> kNumAlignBits, footerBits - kNumAlignBits);
++ RcTree_ReverseEncode(&p->rc, p->posAlignEncoder, kNumAlignBits, posReduced & kAlignMask);
++ p->alignPriceCount++;
++ }
++ }
++ p->reps[3] = p->reps[2];
++ p->reps[2] = p->reps[1];
++ p->reps[1] = p->reps[0];
++ p->reps[0] = pos;
++ p->matchPriceCount++;
++ }
++ }
++ p->additionalOffset -= len;
++ nowPos32 += len;
++ if (p->additionalOffset == 0)
++ {
++ UInt32 processed;
++ if (!p->fastMode)
++ {
++ if (p->matchPriceCount >= (1 << 7))
++ FillDistancesPrices(p);
++ if (p->alignPriceCount >= kAlignTableSize)
++ FillAlignPrices(p);
++ }
++ if (p->matchFinder.GetNumAvailableBytes(p->matchFinderObj) == 0)
++ break;
++ processed = nowPos32 - startPos32;
++ if (useLimits)
++ {
++ if (processed + kNumOpts + 300 >= maxUnpackSize ||
++ RangeEnc_GetProcessed(&p->rc) + kNumOpts * 2 >= maxPackSize)
++ break;
++ }
++ else if (processed >= (1 << 15))
++ {
++ p->nowPos64 += nowPos32 - startPos32;
++ return CheckErrors(p);
++ }
++ }
++ }
++ p->nowPos64 += nowPos32 - startPos32;
++ return Flush(p, nowPos32);
++}
++
++#define kBigHashDicLimit ((UInt32)1 << 24)
++
++static SRes LzmaEnc_Alloc(CLzmaEnc *p, UInt32 keepWindowSize, ISzAlloc *alloc, ISzAlloc *allocBig)
++{
++ UInt32 beforeSize = kNumOpts;
++ Bool btMode;
++ if (!RangeEnc_Alloc(&p->rc, alloc))
++ return SZ_ERROR_MEM;
++ btMode = (p->matchFinderBase.btMode != 0);
++ #ifndef _7ZIP_ST
++ p->mtMode = (p->multiThread && !p->fastMode && btMode);
++ #endif
++
++ {
++ unsigned lclp = p->lc + p->lp;
++ if (p->litProbs == 0 || p->saveState.litProbs == 0 || p->lclp != lclp)
++ {
++ LzmaEnc_FreeLits(p, alloc);
++ p->litProbs = (CLzmaProb *)alloc->Alloc(alloc, (0x300 << lclp) * sizeof(CLzmaProb));
++ p->saveState.litProbs = (CLzmaProb *)alloc->Alloc(alloc, (0x300 << lclp) * sizeof(CLzmaProb));
++ if (p->litProbs == 0 || p->saveState.litProbs == 0)
++ {
++ LzmaEnc_FreeLits(p, alloc);
++ return SZ_ERROR_MEM;
++ }
++ p->lclp = lclp;
++ }
++ }
++
++ p->matchFinderBase.bigHash = (p->dictSize > kBigHashDicLimit);
++
++ if (beforeSize + p->dictSize < keepWindowSize)
++ beforeSize = keepWindowSize - p->dictSize;
++
++ #ifndef _7ZIP_ST
++ if (p->mtMode)
++ {
++ RINOK(MatchFinderMt_Create(&p->matchFinderMt, p->dictSize, beforeSize, p->numFastBytes, LZMA_MATCH_LEN_MAX, allocBig));
++ p->matchFinderObj = &p->matchFinderMt;
++ MatchFinderMt_CreateVTable(&p->matchFinderMt, &p->matchFinder);
++ }
++ else
++ #endif
++ {
++ if (!MatchFinder_Create(&p->matchFinderBase, p->dictSize, beforeSize, p->numFastBytes, LZMA_MATCH_LEN_MAX, allocBig))
++ return SZ_ERROR_MEM;
++ p->matchFinderObj = &p->matchFinderBase;
++ MatchFinder_CreateVTable(&p->matchFinderBase, &p->matchFinder);
++ }
++ return SZ_OK;
++}
++
++void LzmaEnc_Init(CLzmaEnc *p)
++{
++ UInt32 i;
++ p->state = 0;
++ for (i = 0 ; i < LZMA_NUM_REPS; i++)
++ p->reps[i] = 0;
++
++ RangeEnc_Init(&p->rc);
++
++
++ for (i = 0; i < kNumStates; i++)
++ {
++ UInt32 j;
++ for (j = 0; j < LZMA_NUM_PB_STATES_MAX; j++)
++ {
++ p->isMatch[i][j] = kProbInitValue;
++ p->isRep0Long[i][j] = kProbInitValue;
++ }
++ p->isRep[i] = kProbInitValue;
++ p->isRepG0[i] = kProbInitValue;
++ p->isRepG1[i] = kProbInitValue;
++ p->isRepG2[i] = kProbInitValue;
++ }
++
++ {
++ UInt32 num = 0x300 << (p->lp + p->lc);
++ for (i = 0; i < num; i++)
++ p->litProbs[i] = kProbInitValue;
++ }
++
++ {
++ for (i = 0; i < kNumLenToPosStates; i++)
++ {
++ CLzmaProb *probs = p->posSlotEncoder[i];
++ UInt32 j;
++ for (j = 0; j < (1 << kNumPosSlotBits); j++)
++ probs[j] = kProbInitValue;
++ }
++ }
++ {
++ for (i = 0; i < kNumFullDistances - kEndPosModelIndex; i++)
++ p->posEncoders[i] = kProbInitValue;
++ }
++
++ LenEnc_Init(&p->lenEnc.p);
++ LenEnc_Init(&p->repLenEnc.p);
++
++ for (i = 0; i < (1 << kNumAlignBits); i++)
++ p->posAlignEncoder[i] = kProbInitValue;
++
++ p->optimumEndIndex = 0;
++ p->optimumCurrentIndex = 0;
++ p->additionalOffset = 0;
++
++ p->pbMask = (1 << p->pb) - 1;
++ p->lpMask = (1 << p->lp) - 1;
++}
++
++void LzmaEnc_InitPrices(CLzmaEnc *p)
++{
++ if (!p->fastMode)
++ {
++ FillDistancesPrices(p);
++ FillAlignPrices(p);
++ }
++
++ p->lenEnc.tableSize =
++ p->repLenEnc.tableSize =
++ p->numFastBytes + 1 - LZMA_MATCH_LEN_MIN;
++ LenPriceEnc_UpdateTables(&p->lenEnc, 1 << p->pb, p->ProbPrices);
++ LenPriceEnc_UpdateTables(&p->repLenEnc, 1 << p->pb, p->ProbPrices);
++}
++
++static SRes LzmaEnc_AllocAndInit(CLzmaEnc *p, UInt32 keepWindowSize, ISzAlloc *alloc, ISzAlloc *allocBig)
++{
++ UInt32 i;
++ for (i = 0; i < (UInt32)kDicLogSizeMaxCompress; i++)
++ if (p->dictSize <= ((UInt32)1 << i))
++ break;
++ p->distTableSize = i * 2;
++
++ p->finished = False;
++ p->result = SZ_OK;
++ RINOK(LzmaEnc_Alloc(p, keepWindowSize, alloc, allocBig));
++ LzmaEnc_Init(p);
++ LzmaEnc_InitPrices(p);
++ p->nowPos64 = 0;
++ return SZ_OK;
++}
++
++static SRes LzmaEnc_Prepare(CLzmaEncHandle pp, ISeqOutStream *outStream, ISeqInStream *inStream,
++ ISzAlloc *alloc, ISzAlloc *allocBig)
++{
++ CLzmaEnc *p = (CLzmaEnc *)pp;
++ p->matchFinderBase.stream = inStream;
++ p->needInit = 1;
++ p->rc.outStream = outStream;
++ return LzmaEnc_AllocAndInit(p, 0, alloc, allocBig);
++}
++
++SRes LzmaEnc_PrepareForLzma2(CLzmaEncHandle pp,
++ ISeqInStream *inStream, UInt32 keepWindowSize,
++ ISzAlloc *alloc, ISzAlloc *allocBig)
++{
++ CLzmaEnc *p = (CLzmaEnc *)pp;
++ p->matchFinderBase.stream = inStream;
++ p->needInit = 1;
++ return LzmaEnc_AllocAndInit(p, keepWindowSize, alloc, allocBig);
++}
++
++static void LzmaEnc_SetInputBuf(CLzmaEnc *p, const Byte *src, SizeT srcLen)
++{
++ p->matchFinderBase.directInput = 1;
++ p->matchFinderBase.bufferBase = (Byte *)src;
++ p->matchFinderBase.directInputRem = srcLen;
++}
++
++SRes LzmaEnc_MemPrepare(CLzmaEncHandle pp, const Byte *src, SizeT srcLen,
++ UInt32 keepWindowSize, ISzAlloc *alloc, ISzAlloc *allocBig)
++{
++ CLzmaEnc *p = (CLzmaEnc *)pp;
++ LzmaEnc_SetInputBuf(p, src, srcLen);
++ p->needInit = 1;
++
++ return LzmaEnc_AllocAndInit(p, keepWindowSize, alloc, allocBig);
++}
++
++void LzmaEnc_Finish(CLzmaEncHandle pp)
++{
++ #ifndef _7ZIP_ST
++ CLzmaEnc *p = (CLzmaEnc *)pp;
++ if (p->mtMode)
++ MatchFinderMt_ReleaseStream(&p->matchFinderMt);
++ #else
++ pp = pp;
++ #endif
++}
++
++typedef struct
++{
++ ISeqOutStream funcTable;
++ Byte *data;
++ SizeT rem;
++ Bool overflow;
++} CSeqOutStreamBuf;
++
++static size_t MyWrite(void *pp, const void *data, size_t size)
++{
++ CSeqOutStreamBuf *p = (CSeqOutStreamBuf *)pp;
++ if (p->rem < size)
++ {
++ size = p->rem;
++ p->overflow = True;
++ }
++ memcpy(p->data, data, size);
++ p->rem -= size;
++ p->data += size;
++ return size;
++}
++
++
++UInt32 LzmaEnc_GetNumAvailableBytes(CLzmaEncHandle pp)
++{
++ const CLzmaEnc *p = (CLzmaEnc *)pp;
++ return p->matchFinder.GetNumAvailableBytes(p->matchFinderObj);
++}
++
++const Byte *LzmaEnc_GetCurBuf(CLzmaEncHandle pp)
++{
++ const CLzmaEnc *p = (CLzmaEnc *)pp;
++ return p->matchFinder.GetPointerToCurrentPos(p->matchFinderObj) - p->additionalOffset;
++}
++
++SRes LzmaEnc_CodeOneMemBlock(CLzmaEncHandle pp, Bool reInit,
++ Byte *dest, size_t *destLen, UInt32 desiredPackSize, UInt32 *unpackSize)
++{
++ CLzmaEnc *p = (CLzmaEnc *)pp;
++ UInt64 nowPos64;
++ SRes res;
++ CSeqOutStreamBuf outStream;
++
++ outStream.funcTable.Write = MyWrite;
++ outStream.data = dest;
++ outStream.rem = *destLen;
++ outStream.overflow = False;
++
++ p->writeEndMark = False;
++ p->finished = False;
++ p->result = SZ_OK;
++
++ if (reInit)
++ LzmaEnc_Init(p);
++ LzmaEnc_InitPrices(p);
++ nowPos64 = p->nowPos64;
++ RangeEnc_Init(&p->rc);
++ p->rc.outStream = &outStream.funcTable;
++
++ res = LzmaEnc_CodeOneBlock(p, True, desiredPackSize, *unpackSize);
++
++ *unpackSize = (UInt32)(p->nowPos64 - nowPos64);
++ *destLen -= outStream.rem;
++ if (outStream.overflow)
++ return SZ_ERROR_OUTPUT_EOF;
++
++ return res;
++}
++
++static SRes LzmaEnc_Encode2(CLzmaEnc *p, ICompressProgress *progress)
++{
++ SRes res = SZ_OK;
++
++ #ifndef _7ZIP_ST
++ Byte allocaDummy[0x300];
++ int i = 0;
++ for (i = 0; i < 16; i++)
++ allocaDummy[i] = (Byte)i;
++ #endif
++
++ for (;;)
++ {
++ res = LzmaEnc_CodeOneBlock(p, False, 0, 0);
++ if (res != SZ_OK || p->finished != 0)
++ break;
++ if (progress != 0)
++ {
++ res = progress->Progress(progress, p->nowPos64, RangeEnc_GetProcessed(&p->rc));
++ if (res != SZ_OK)
++ {
++ res = SZ_ERROR_PROGRESS;
++ break;
++ }
++ }
++ }
++ LzmaEnc_Finish(p);
++ return res;
++}
++
++SRes LzmaEnc_Encode(CLzmaEncHandle pp, ISeqOutStream *outStream, ISeqInStream *inStream, ICompressProgress *progress,
++ ISzAlloc *alloc, ISzAlloc *allocBig)
++{
++ RINOK(LzmaEnc_Prepare(pp, outStream, inStream, alloc, allocBig));
++ return LzmaEnc_Encode2((CLzmaEnc *)pp, progress);
++}
++
++SRes LzmaEnc_WriteProperties(CLzmaEncHandle pp, Byte *props, SizeT *size)
++{
++ CLzmaEnc *p = (CLzmaEnc *)pp;
++ int i;
++ UInt32 dictSize = p->dictSize;
++ if (*size < LZMA_PROPS_SIZE)
++ return SZ_ERROR_PARAM;
++ *size = LZMA_PROPS_SIZE;
++ props[0] = (Byte)((p->pb * 5 + p->lp) * 9 + p->lc);
++
++ for (i = 11; i <= 30; i++)
++ {
++ if (dictSize <= ((UInt32)2 << i))
++ {
++ dictSize = (2 << i);
++ break;
++ }
++ if (dictSize <= ((UInt32)3 << i))
++ {
++ dictSize = (3 << i);
++ break;
++ }
++ }
++
++ for (i = 0; i < 4; i++)
++ props[1 + i] = (Byte)(dictSize >> (8 * i));
++ return SZ_OK;
++}
++
++SRes LzmaEnc_MemEncode(CLzmaEncHandle pp, Byte *dest, SizeT *destLen, const Byte *src, SizeT srcLen,
++ int writeEndMark, ICompressProgress *progress, ISzAlloc *alloc, ISzAlloc *allocBig)
++{
++ SRes res;
++ CLzmaEnc *p = (CLzmaEnc *)pp;
++
++ CSeqOutStreamBuf outStream;
++
++ LzmaEnc_SetInputBuf(p, src, srcLen);
++
++ outStream.funcTable.Write = MyWrite;
++ outStream.data = dest;
++ outStream.rem = *destLen;
++ outStream.overflow = False;
++
++ p->writeEndMark = writeEndMark;
++
++ p->rc.outStream = &outStream.funcTable;
++ res = LzmaEnc_MemPrepare(pp, src, srcLen, 0, alloc, allocBig);
++ if (res == SZ_OK)
++ res = LzmaEnc_Encode2(p, progress);
++
++ *destLen -= outStream.rem;
++ if (outStream.overflow)
++ return SZ_ERROR_OUTPUT_EOF;
++ return res;
++}
++
++SRes LzmaEncode(Byte *dest, SizeT *destLen, const Byte *src, SizeT srcLen,
++ const CLzmaEncProps *props, Byte *propsEncoded, SizeT *propsSize, int writeEndMark,
++ ICompressProgress *progress, ISzAlloc *alloc, ISzAlloc *allocBig)
++{
++ CLzmaEnc *p = (CLzmaEnc *)LzmaEnc_Create(alloc);
++ SRes res;
++ if (p == 0)
++ return SZ_ERROR_MEM;
++
++ res = LzmaEnc_SetProps(p, props);
++ if (res == SZ_OK)
++ {
++ res = LzmaEnc_WriteProperties(p, propsEncoded, propsSize);
++ if (res == SZ_OK)
++ res = LzmaEnc_MemEncode(p, dest, destLen, src, srcLen,
++ writeEndMark, progress, alloc, allocBig);
++ }
++
++ LzmaEnc_Destroy(p, alloc, allocBig);
++ return res;
++}
+--- /dev/null
++++ b/lib/lzma/Makefile
+@@ -0,0 +1,7 @@
++lzma_compress-objs := LzFind.o LzmaEnc.o
++lzma_decompress-objs := LzmaDec.o
++
++obj-$(CONFIG_LZMA_COMPRESS) += lzma_compress.o
++obj-$(CONFIG_LZMA_DECOMPRESS) += lzma_decompress.o
++
++EXTRA_CFLAGS += -Iinclude/linux -Iinclude/linux/lzma -include types.h
diff --git a/target/linux/generic/pending-5.4/532-jffs2_eofdetect.patch b/target/linux/generic/pending-5.4/532-jffs2_eofdetect.patch
new file mode 100644
index 0000000000..e9952c6d84
--- /dev/null
+++ b/target/linux/generic/pending-5.4/532-jffs2_eofdetect.patch
@@ -0,0 +1,65 @@
+From: Felix Fietkau <nbd@nbd.name>
+Subject: fs: jffs2: EOF marker
+
+Signed-off-by: Felix Fietkau <nbd@nbd.name>
+---
+ fs/jffs2/build.c | 10 ++++++++++
+ fs/jffs2/scan.c | 21 +++++++++++++++++++--
+ 2 files changed, 29 insertions(+), 2 deletions(-)
+
+--- a/fs/jffs2/build.c
++++ b/fs/jffs2/build.c
+@@ -117,6 +117,16 @@ static int jffs2_build_filesystem(struct
+ dbg_fsbuild("scanned flash completely\n");
+ jffs2_dbg_dump_block_lists_nolock(c);
+
++ if (c->flags & (1 << 7)) {
++ printk("%s(): unlocking the mtd device... ", __func__);
++ mtd_unlock(c->mtd, 0, c->mtd->size);
++ printk("done.\n");
++
++ printk("%s(): erasing all blocks after the end marker... ", __func__);
++ jffs2_erase_pending_blocks(c, -1);
++ printk("done.\n");
++ }
++
+ dbg_fsbuild("pass 1 starting\n");
+ c->flags |= JFFS2_SB_FLAG_BUILDING;
+ /* Now scan the directory tree, increasing nlink according to every dirent found. */
+--- a/fs/jffs2/scan.c
++++ b/fs/jffs2/scan.c
+@@ -148,8 +148,14 @@ int jffs2_scan_medium(struct jffs2_sb_in
+ /* reset summary info for next eraseblock scan */
+ jffs2_sum_reset_collected(s);
+
+- ret = jffs2_scan_eraseblock(c, jeb, buf_size?flashbuf:(flashbuf+jeb->offset),
+- buf_size, s);
++ if (c->flags & (1 << 7)) {
++ if (mtd_block_isbad(c->mtd, jeb->offset))
++ ret = BLK_STATE_BADBLOCK;
++ else
++ ret = BLK_STATE_ALLFF;
++ } else
++ ret = jffs2_scan_eraseblock(c, jeb, buf_size?flashbuf:(flashbuf+jeb->offset),
++ buf_size, s);
+
+ if (ret < 0)
+ goto out;
+@@ -561,6 +567,17 @@ full_scan:
+ return err;
+ }
+
++ if ((buf[0] == 0xde) &&
++ (buf[1] == 0xad) &&
++ (buf[2] == 0xc0) &&
++ (buf[3] == 0xde)) {
++ /* end of filesystem. erase everything after this point */
++ printk("%s(): End of filesystem marker found at 0x%x\n", __func__, jeb->offset);
++ c->flags |= (1 << 7);
++
++ return BLK_STATE_ALLFF;
++ }
++
+ /* We temporarily use 'ofs' as a pointer into the buffer/jeb */
+ ofs = 0;
+ max_ofs = EMPTY_SCAN_SIZE(c->sector_size);
diff --git a/target/linux/generic/pending-5.4/553-ubifs-Add-option-to-create-UBI-FS-version-4-on-empty.patch b/target/linux/generic/pending-5.4/553-ubifs-Add-option-to-create-UBI-FS-version-4-on-empty.patch
new file mode 100644
index 0000000000..02c9b4c0fa
--- /dev/null
+++ b/target/linux/generic/pending-5.4/553-ubifs-Add-option-to-create-UBI-FS-version-4-on-empty.patch
@@ -0,0 +1,63 @@
+From 93c33e6a7f3b0aef99d02252e6232a3d8b80f2d5 Mon Sep 17 00:00:00 2001
+From: Hauke Mehrtens <hauke@hauke-m.de>
+Date: Sun, 21 Jan 2018 15:47:50 +0100
+Subject: ubifs: Add option to create UBI FS version 4 on empty UBI volume
+
+Instead of creating an ubifs file system with format version 5 by
+default on empty UBI volumes add a compile option to create an older ubi
+with file system format version 4 instated. This allows it to be mount
+as a volume on kernel versions < 4.10, which does not support format
+version 5.
+We saw that some people can not access their older data when they
+downgraded from kernel 4.14 to kernel 4.9 to prevent this this option
+would help.
+
+Signed-off-by: Hauke Mehrtens <hauke@hauke-m.de>
+---
+ fs/ubifs/Kconfig | 13 +++++++++++++
+ fs/ubifs/sb.c | 6 ++++++
+ 2 files changed, 19 insertions(+)
+
+--- a/fs/ubifs/Kconfig
++++ b/fs/ubifs/Kconfig
+@@ -85,3 +85,16 @@ config UBIFS_FS_SECURITY
+ the extended attribute support in advance.
+
+ If you are not using a security module, say N.
++
++config UBIFS_FS_FORMAT4
++ bool "Use file system format version 4 for new file systems"
++ depends on UBIFS_FS
++ help
++ Instead of creating new file systems with the new ubifs file
++ system version 5, use the old format version 4 for implicitly
++ by the driver created file systems on an empty UBI volume. This
++ makes it possible to mount these file systems also with kernel
++ versions before 4.10. The driver will still support file system
++ format version 5 for ubifs file systems created with version 5.
++
++ If you are unsure, say N.
+--- a/fs/ubifs/sb.c
++++ b/fs/ubifs/sb.c
+@@ -176,7 +176,9 @@ static int create_default_filesystem(str
+ tmp64 = (long long)max_buds * c->leb_size;
+ if (big_lpt)
+ sup_flags |= UBIFS_FLG_BIGLPT;
++#ifndef CONFIG_UBIFS_FS_FORMAT4
+ sup_flags |= UBIFS_FLG_DOUBLE_HASH;
++#endif
+
+ sup->ch.node_type = UBIFS_SB_NODE;
+ sup->key_hash = UBIFS_KEY_HASH_R5;
+@@ -192,7 +194,11 @@ static int create_default_filesystem(str
+ sup->jhead_cnt = cpu_to_le32(DEFAULT_JHEADS_CNT);
+ sup->fanout = cpu_to_le32(DEFAULT_FANOUT);
+ sup->lsave_cnt = cpu_to_le32(c->lsave_cnt);
++#ifdef CONFIG_UBIFS_FS_FORMAT4
++ sup->fmt_version = cpu_to_le32(4);
++#else
+ sup->fmt_version = cpu_to_le32(UBIFS_FORMAT_VERSION);
++#endif
+ sup->time_gran = cpu_to_le32(DEFAULT_TIME_GRAN);
+ if (c->mount_opts.override_compr)
+ sup->default_compr = cpu_to_le16(c->mount_opts.compr_type);
diff --git a/target/linux/generic/pending-5.4/600-netfilter_conntrack_flush.patch b/target/linux/generic/pending-5.4/600-netfilter_conntrack_flush.patch
new file mode 100644
index 0000000000..fa32f88e17
--- /dev/null
+++ b/target/linux/generic/pending-5.4/600-netfilter_conntrack_flush.patch
@@ -0,0 +1,88 @@
+From: Felix Fietkau <nbd@nbd.name>
+Subject: netfilter: add support for flushing conntrack via /proc
+
+lede-commit 8193bbe59a74d34d6a26d4a8cb857b1952905314
+Signed-off-by: Felix Fietkau <nbd@nbd.name>
+---
+ net/netfilter/nf_conntrack_standalone.c | 59 ++++++++++++++++++++++++++++++++-
+ 1 file changed, 58 insertions(+), 1 deletion(-)
+
+--- a/net/netfilter/nf_conntrack_standalone.c
++++ b/net/netfilter/nf_conntrack_standalone.c
+@@ -9,6 +9,7 @@
+ #include <linux/percpu.h>
+ #include <linux/netdevice.h>
+ #include <linux/security.h>
++#include <linux/inet.h>
+ #include <net/net_namespace.h>
+ #ifdef CONFIG_SYSCTL
+ #include <linux/sysctl.h>
+@@ -433,6 +434,56 @@ static int ct_cpu_seq_show(struct seq_fi
+ return 0;
+ }
+
++struct kill_request {
++ u16 family;
++ union nf_inet_addr addr;
++};
++
++static int kill_matching(struct nf_conn *i, void *data)
++{
++ struct kill_request *kr = data;
++ struct nf_conntrack_tuple *t1 = &i->tuplehash[IP_CT_DIR_ORIGINAL].tuple;
++ struct nf_conntrack_tuple *t2 = &i->tuplehash[IP_CT_DIR_REPLY].tuple;
++
++ if (!kr->family)
++ return 1;
++
++ if (t1->src.l3num != kr->family)
++ return 0;
++
++ return (nf_inet_addr_cmp(&kr->addr, &t1->src.u3) ||
++ nf_inet_addr_cmp(&kr->addr, &t1->dst.u3) ||
++ nf_inet_addr_cmp(&kr->addr, &t2->src.u3) ||
++ nf_inet_addr_cmp(&kr->addr, &t2->dst.u3));
++}
++
++static int ct_file_write(struct file *file, char *buf, size_t count)
++{
++ struct seq_file *seq = file->private_data;
++ struct net *net = seq_file_net(seq);
++ struct kill_request kr = { };
++
++ if (count == 0)
++ return 0;
++
++ if (count >= INET6_ADDRSTRLEN)
++ count = INET6_ADDRSTRLEN - 1;
++
++ if (strnchr(buf, count, ':')) {
++ kr.family = AF_INET6;
++ if (!in6_pton(buf, count, (void *)&kr.addr, '\n', NULL))
++ return -EINVAL;
++ } else if (strnchr(buf, count, '.')) {
++ kr.family = AF_INET;
++ if (!in4_pton(buf, count, (void *)&kr.addr, '\n', NULL))
++ return -EINVAL;
++ }
++
++ nf_ct_iterate_cleanup_net(net, kill_matching, &kr, 0, 0);
++
++ return 0;
++}
++
+ static const struct seq_operations ct_cpu_seq_ops = {
+ .start = ct_cpu_seq_start,
+ .next = ct_cpu_seq_next,
+@@ -446,8 +497,9 @@ static int nf_conntrack_standalone_init_
+ kuid_t root_uid;
+ kgid_t root_gid;
+
+- pde = proc_create_net("nf_conntrack", 0440, net->proc_net, &ct_seq_ops,
+- sizeof(struct ct_iter_state));
++ pde = proc_create_net_data_write("nf_conntrack", 0440, net->proc_net,
++ &ct_seq_ops, &ct_file_write,
++ sizeof(struct ct_iter_state), NULL);
+ if (!pde)
+ goto out_nf_conntrack;
+
diff --git a/target/linux/generic/pending-5.4/610-netfilter_match_bypass_default_checks.patch b/target/linux/generic/pending-5.4/610-netfilter_match_bypass_default_checks.patch
new file mode 100644
index 0000000000..1691146825
--- /dev/null
+++ b/target/linux/generic/pending-5.4/610-netfilter_match_bypass_default_checks.patch
@@ -0,0 +1,110 @@
+From: Felix Fietkau <nbd@nbd.name>
+Subject: kernel: add a new version of my netfilter speedup patches for linux 2.6.39 and 3.0
+
+Signed-off-by: Felix Fietkau <nbd@nbd.name>
+---
+ include/uapi/linux/netfilter_ipv4/ip_tables.h | 1 +
+ net/ipv4/netfilter/ip_tables.c | 37 +++++++++++++++++++++++++++
+ 2 files changed, 38 insertions(+)
+
+--- a/include/uapi/linux/netfilter_ipv4/ip_tables.h
++++ b/include/uapi/linux/netfilter_ipv4/ip_tables.h
+@@ -89,6 +89,7 @@ struct ipt_ip {
+ #define IPT_F_FRAG 0x01 /* Set if rule is a fragment rule */
+ #define IPT_F_GOTO 0x02 /* Set if jump is a goto */
+ #define IPT_F_MASK 0x03 /* All possible flag bits mask. */
++#define IPT_F_NO_DEF_MATCH 0x80 /* Internal: no default match rules present */
+
+ /* Values for "inv" field in struct ipt_ip. */
+ #define IPT_INV_VIA_IN 0x01 /* Invert the sense of IN IFACE. */
+--- a/net/ipv4/netfilter/ip_tables.c
++++ b/net/ipv4/netfilter/ip_tables.c
+@@ -53,6 +53,9 @@ ip_packet_match(const struct iphdr *ip,
+ {
+ unsigned long ret;
+
++ if (ipinfo->flags & IPT_F_NO_DEF_MATCH)
++ return true;
++
+ if (NF_INVF(ipinfo, IPT_INV_SRCIP,
+ (ip->saddr & ipinfo->smsk.s_addr) != ipinfo->src.s_addr) ||
+ NF_INVF(ipinfo, IPT_INV_DSTIP,
+@@ -83,6 +86,29 @@ ip_packet_match(const struct iphdr *ip,
+ return true;
+ }
+
++static void
++ip_checkdefault(struct ipt_ip *ip)
++{
++ static const char iface_mask[IFNAMSIZ] = {};
++
++ if (ip->invflags || ip->flags & IPT_F_FRAG)
++ return;
++
++ if (memcmp(ip->iniface_mask, iface_mask, IFNAMSIZ) != 0)
++ return;
++
++ if (memcmp(ip->outiface_mask, iface_mask, IFNAMSIZ) != 0)
++ return;
++
++ if (ip->smsk.s_addr || ip->dmsk.s_addr)
++ return;
++
++ if (ip->proto)
++ return;
++
++ ip->flags |= IPT_F_NO_DEF_MATCH;
++}
++
+ static bool
+ ip_checkentry(const struct ipt_ip *ip)
+ {
+@@ -527,6 +553,8 @@ find_check_entry(struct ipt_entry *e, st
+ struct xt_mtchk_param mtpar;
+ struct xt_entry_match *ematch;
+
++ ip_checkdefault(&e->ip);
++
+ if (!xt_percpu_counter_alloc(alloc_state, &e->counters))
+ return -ENOMEM;
+
+@@ -821,6 +849,7 @@ copy_entries_to_user(unsigned int total_
+ const struct xt_table_info *private = table->private;
+ int ret = 0;
+ const void *loc_cpu_entry;
++ u8 flags;
+
+ counters = alloc_counters(table);
+ if (IS_ERR(counters))
+@@ -848,6 +877,14 @@ copy_entries_to_user(unsigned int total_
+ goto free_counters;
+ }
+
++ flags = e->ip.flags & IPT_F_MASK;
++ if (copy_to_user(userptr + off
++ + offsetof(struct ipt_entry, ip.flags),
++ &flags, sizeof(flags)) != 0) {
++ ret = -EFAULT;
++ goto free_counters;
++ }
++
+ for (i = sizeof(struct ipt_entry);
+ i < e->target_offset;
+ i += m->u.match_size) {
+@@ -1228,12 +1265,15 @@ compat_copy_entry_to_user(struct ipt_ent
+ compat_uint_t origsize;
+ const struct xt_entry_match *ematch;
+ int ret = 0;
++ u8 flags = e->ip.flags & IPT_F_MASK;
+
+ origsize = *size;
+ ce = *dstptr;
+ if (copy_to_user(ce, e, sizeof(struct ipt_entry)) != 0 ||
+ copy_to_user(&ce->counters, &counters[i],
+- sizeof(counters[i])) != 0)
++ sizeof(counters[i])) != 0 ||
++ copy_to_user(&ce->ip.flags, &flags,
++ sizeof(flags)) != 0)
+ return -EFAULT;
+
+ *dstptr += sizeof(struct compat_ipt_entry);
diff --git a/target/linux/generic/pending-5.4/611-netfilter_match_bypass_default_table.patch b/target/linux/generic/pending-5.4/611-netfilter_match_bypass_default_table.patch
new file mode 100644
index 0000000000..ba976b0751
--- /dev/null
+++ b/target/linux/generic/pending-5.4/611-netfilter_match_bypass_default_table.patch
@@ -0,0 +1,106 @@
+From: Felix Fietkau <nbd@nbd.name>
+Subject: netfilter: match bypass default table
+
+Signed-off-by: Felix Fietkau <nbd@nbd.name>
+---
+ net/ipv4/netfilter/ip_tables.c | 79 +++++++++++++++++++++++++++++++-----------
+ 1 file changed, 58 insertions(+), 21 deletions(-)
+
+--- a/net/ipv4/netfilter/ip_tables.c
++++ b/net/ipv4/netfilter/ip_tables.c
+@@ -249,6 +249,33 @@ struct ipt_entry *ipt_next_entry(const s
+ return (void *)entry + entry->next_offset;
+ }
+
++static bool
++ipt_handle_default_rule(struct ipt_entry *e, unsigned int *verdict)
++{
++ struct xt_entry_target *t;
++ struct xt_standard_target *st;
++
++ if (e->target_offset != sizeof(struct ipt_entry))
++ return false;
++
++ if (!(e->ip.flags & IPT_F_NO_DEF_MATCH))
++ return false;
++
++ t = ipt_get_target(e);
++ if (t->u.kernel.target->target)
++ return false;
++
++ st = (struct xt_standard_target *) t;
++ if (st->verdict == XT_RETURN)
++ return false;
++
++ if (st->verdict >= 0)
++ return false;
++
++ *verdict = (unsigned)(-st->verdict) - 1;
++ return true;
++}
++
+ /* Returns one of the generic firewall policies, like NF_ACCEPT. */
+ unsigned int
+ ipt_do_table(struct sk_buff *skb,
+@@ -269,27 +296,28 @@ ipt_do_table(struct sk_buff *skb,
+ unsigned int addend;
+
+ /* Initialization */
++ WARN_ON(!(table->valid_hooks & (1 << hook)));
++ local_bh_disable();
++ private = READ_ONCE(table->private); /* Address dependency. */
++ cpu = smp_processor_id();
++ table_base = private->entries;
++
++ e = get_entry(table_base, private->hook_entry[hook]);
++ if (ipt_handle_default_rule(e, &verdict)) {
++ struct xt_counters *counter;
++
++ counter = xt_get_this_cpu_counter(&e->counters);
++ ADD_COUNTER(*counter, skb->len, 1);
++ local_bh_enable();
++ return verdict;
++ }
++
+ stackidx = 0;
+ ip = ip_hdr(skb);
+ indev = state->in ? state->in->name : nulldevname;
+ outdev = state->out ? state->out->name : nulldevname;
+- /* We handle fragments by dealing with the first fragment as
+- * if it was a normal packet. All other fragments are treated
+- * normally, except that they will NEVER match rules that ask
+- * things we don't know, ie. tcp syn flag or ports). If the
+- * rule is also a fragment-specific rule, non-fragments won't
+- * match it. */
+- acpar.fragoff = ntohs(ip->frag_off) & IP_OFFSET;
+- acpar.thoff = ip_hdrlen(skb);
+- acpar.hotdrop = false;
+- acpar.state = state;
+
+- WARN_ON(!(table->valid_hooks & (1 << hook)));
+- local_bh_disable();
+ addend = xt_write_recseq_begin();
+- private = READ_ONCE(table->private); /* Address dependency. */
+- cpu = smp_processor_id();
+- table_base = private->entries;
+ jumpstack = (struct ipt_entry **)private->jumpstack[cpu];
+
+ /* Switch to alternate jumpstack if we're being invoked via TEE.
+@@ -302,7 +330,16 @@ ipt_do_table(struct sk_buff *skb,
+ if (static_key_false(&xt_tee_enabled))
+ jumpstack += private->stacksize * __this_cpu_read(nf_skb_duplicated);
+
+- e = get_entry(table_base, private->hook_entry[hook]);
++ /* We handle fragments by dealing with the first fragment as
++ * if it was a normal packet. All other fragments are treated
++ * normally, except that they will NEVER match rules that ask
++ * things we don't know, ie. tcp syn flag or ports). If the
++ * rule is also a fragment-specific rule, non-fragments won't
++ * match it. */
++ acpar.fragoff = ntohs(ip->frag_off) & IP_OFFSET;
++ acpar.thoff = ip_hdrlen(skb);
++ acpar.hotdrop = false;
++ acpar.state = state;
+
+ do {
+ const struct xt_entry_target *t;
diff --git a/target/linux/generic/pending-5.4/612-netfilter_match_reduce_memory_access.patch b/target/linux/generic/pending-5.4/612-netfilter_match_reduce_memory_access.patch
new file mode 100644
index 0000000000..cd6fcf8e74
--- /dev/null
+++ b/target/linux/generic/pending-5.4/612-netfilter_match_reduce_memory_access.patch
@@ -0,0 +1,22 @@
+From: Felix Fietkau <nbd@nbd.name>
+Subject: netfilter: reduce match memory access
+
+Signed-off-by: Felix Fietkau <nbd@nbd.name>
+---
+ net/ipv4/netfilter/ip_tables.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/net/ipv4/netfilter/ip_tables.c
++++ b/net/ipv4/netfilter/ip_tables.c
+@@ -56,9 +56,9 @@ ip_packet_match(const struct iphdr *ip,
+ if (ipinfo->flags & IPT_F_NO_DEF_MATCH)
+ return true;
+
+- if (NF_INVF(ipinfo, IPT_INV_SRCIP,
++ if (NF_INVF(ipinfo, IPT_INV_SRCIP, ipinfo->smsk.s_addr &&
+ (ip->saddr & ipinfo->smsk.s_addr) != ipinfo->src.s_addr) ||
+- NF_INVF(ipinfo, IPT_INV_DSTIP,
++ NF_INVF(ipinfo, IPT_INV_DSTIP, ipinfo->dmsk.s_addr &&
+ (ip->daddr & ipinfo->dmsk.s_addr) != ipinfo->dst.s_addr))
+ return false;
+
diff --git a/target/linux/generic/pending-5.4/613-netfilter_optional_tcp_window_check.patch b/target/linux/generic/pending-5.4/613-netfilter_optional_tcp_window_check.patch
new file mode 100644
index 0000000000..634218dfc3
--- /dev/null
+++ b/target/linux/generic/pending-5.4/613-netfilter_optional_tcp_window_check.patch
@@ -0,0 +1,53 @@
+From: Felix Fietkau <nbd@nbd.name>
+Subject: netfilter: optional tcp window check
+
+Signed-off-by: Felix Fietkau <nbd@nbd.name>
+---
+ net/netfilter/nf_conntrack_proto_tcp.c | 13 +++++++++++++
+ 1 file changed, 13 insertions(+)
+
+--- a/net/netfilter/nf_conntrack_proto_tcp.c
++++ b/net/netfilter/nf_conntrack_proto_tcp.c
+@@ -34,6 +34,9 @@
+ #include <net/netfilter/ipv4/nf_conntrack_ipv4.h>
+ #include <net/netfilter/ipv6/nf_conntrack_ipv6.h>
+
++/* Do not check the TCP window for incoming packets */
++static int nf_ct_tcp_no_window_check __read_mostly = 1;
++
+ /* "Be conservative in what you do,
+ be liberal in what you accept from others."
+ If it's non-zero, we mark only out of window RST segments as INVALID. */
+@@ -484,6 +487,9 @@ static bool tcp_in_window(const struct n
+ s32 receiver_offset;
+ bool res, in_recv_win;
+
++ if (nf_ct_tcp_no_window_check)
++ return true;
++
+ /*
+ * Get the required data from the packet.
+ */
+@@ -1059,7 +1065,7 @@ static int tcp_packet(struct nf_conn *ct
+ IP_CT_TCP_FLAG_DATA_UNACKNOWLEDGED &&
+ timeouts[new_state] > timeouts[TCP_CONNTRACK_UNACK])
+ timeout = timeouts[TCP_CONNTRACK_UNACK];
+- else if (ct->proto.tcp.last_win == 0 &&
++ else if (!nf_ct_tcp_no_window_check && ct->proto.tcp.last_win == 0 &&
+ timeouts[new_state] > timeouts[TCP_CONNTRACK_RETRANS])
+ timeout = timeouts[TCP_CONNTRACK_RETRANS];
+ else
+@@ -1508,6 +1514,13 @@ static struct ctl_table tcp_sysctl_table
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
+ },
++ {
++ .procname = "nf_conntrack_tcp_no_window_check",
++ .data = &nf_ct_tcp_no_window_check,
++ .maxlen = sizeof(unsigned int),
++ .mode = 0644,
++ .proc_handler = proc_dointvec,
++ },
+ { }
+ };
+ #endif /* CONFIG_SYSCTL */
diff --git a/target/linux/generic/pending-5.4/616-net_optimize_xfrm_calls.patch b/target/linux/generic/pending-5.4/616-net_optimize_xfrm_calls.patch
new file mode 100644
index 0000000000..6a5801027c
--- /dev/null
+++ b/target/linux/generic/pending-5.4/616-net_optimize_xfrm_calls.patch
@@ -0,0 +1,20 @@
+From: Felix Fietkau <nbd@nbd.name>
+Subject: kernel: add a small xfrm related performance optimization
+
+Signed-off-by: Felix Fietkau <nbd@nbd.name>
+---
+ net/netfilter/nf_nat_core.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/net/netfilter/nf_nat_core.c
++++ b/net/netfilter/nf_nat_core.c
+@@ -110,6 +110,9 @@ int nf_xfrm_me_harder(struct net *net, s
+ struct sock *sk = skb->sk;
+ int err;
+
++ if (skb->dev && !dev_net(skb->dev)->xfrm.policy_count[XFRM_POLICY_OUT])
++ return 0;
++
+ err = xfrm_decode_session(skb, &fl, family);
+ if (err < 0)
+ return err;
diff --git a/target/linux/generic/pending-5.4/620-net_sched-codel-do-not-defer-queue-length-update.patch b/target/linux/generic/pending-5.4/620-net_sched-codel-do-not-defer-queue-length-update.patch
new file mode 100644
index 0000000000..457c812841
--- /dev/null
+++ b/target/linux/generic/pending-5.4/620-net_sched-codel-do-not-defer-queue-length-update.patch
@@ -0,0 +1,86 @@
+From: Konstantin Khlebnikov <khlebnikov@yandex-team.ru>
+Date: Mon, 21 Aug 2017 11:14:14 +0300
+Subject: [PATCH] net_sched/codel: do not defer queue length update
+
+When codel wants to drop last packet in ->dequeue() it cannot call
+qdisc_tree_reduce_backlog() right away - it will notify parent qdisc
+about zero qlen and HTB/HFSC will deactivate class. The same class will
+be deactivated second time by caller of ->dequeue(). Currently codel and
+fq_codel defer update. This triggers warning in HFSC when it's qlen != 0
+but there is no active classes.
+
+This patch update parent queue length immediately: just temporary increase
+qlen around qdisc_tree_reduce_backlog() to prevent first class deactivation
+if we have skb to return.
+
+This might open another problem in HFSC - now operation peek could fail and
+deactivate parent class.
+
+Signed-off-by: Konstantin Khlebnikov <khlebnikov@yandex-team.ru>
+Link: https://bugzilla.kernel.org/show_bug.cgi?id=109581
+---
+
+--- a/net/sched/sch_codel.c
++++ b/net/sched/sch_codel.c
+@@ -95,11 +95,17 @@ static struct sk_buff *codel_qdisc_deque
+ &q->stats, qdisc_pkt_len, codel_get_enqueue_time,
+ drop_func, dequeue_func);
+
+- /* We cant call qdisc_tree_reduce_backlog() if our qlen is 0,
+- * or HTB crashes. Defer it for next round.
++ /* If our qlen is 0 qdisc_tree_reduce_backlog() will deactivate
++ * parent class, dequeue in parent qdisc will do the same if we
++ * return skb. Temporary increment qlen if we have skb.
+ */
+- if (q->stats.drop_count && sch->q.qlen) {
+- qdisc_tree_reduce_backlog(sch, q->stats.drop_count, q->stats.drop_len);
++ if (q->stats.drop_count) {
++ if (skb)
++ sch->q.qlen++;
++ qdisc_tree_reduce_backlog(sch, q->stats.drop_count,
++ q->stats.drop_len);
++ if (skb)
++ sch->q.qlen--;
+ q->stats.drop_count = 0;
+ q->stats.drop_len = 0;
+ }
+--- a/net/sched/sch_fq_codel.c
++++ b/net/sched/sch_fq_codel.c
+@@ -317,6 +317,21 @@ begin:
+ flow->dropped += q->cstats.drop_count - prev_drop_count;
+ flow->dropped += q->cstats.ecn_mark - prev_ecn_mark;
+
++ /* If our qlen is 0 qdisc_tree_reduce_backlog() will deactivate
++ * parent class, dequeue in parent qdisc will do the same if we
++ * return skb. Temporary increment qlen if we have skb.
++ */
++ if (q->cstats.drop_count) {
++ if (skb)
++ sch->q.qlen++;
++ qdisc_tree_reduce_backlog(sch, q->cstats.drop_count,
++ q->cstats.drop_len);
++ if (skb)
++ sch->q.qlen--;
++ q->cstats.drop_count = 0;
++ q->cstats.drop_len = 0;
++ }
++
+ if (!skb) {
+ /* force a pass through old_flows to prevent starvation */
+ if ((head == &q->new_flows) && !list_empty(&q->old_flows))
+@@ -327,15 +342,6 @@ begin:
+ }
+ qdisc_bstats_update(sch, skb);
+ flow->deficit -= qdisc_pkt_len(skb);
+- /* We cant call qdisc_tree_reduce_backlog() if our qlen is 0,
+- * or HTB crashes. Defer it for next round.
+- */
+- if (q->cstats.drop_count && sch->q.qlen) {
+- qdisc_tree_reduce_backlog(sch, q->cstats.drop_count,
+- q->cstats.drop_len);
+- q->cstats.drop_count = 0;
+- q->cstats.drop_len = 0;
+- }
+ return skb;
+ }
+
diff --git a/target/linux/generic/pending-5.4/630-packet_socket_type.patch b/target/linux/generic/pending-5.4/630-packet_socket_type.patch
new file mode 100644
index 0000000000..25f44b466a
--- /dev/null
+++ b/target/linux/generic/pending-5.4/630-packet_socket_type.patch
@@ -0,0 +1,138 @@
+From: Felix Fietkau <nbd@nbd.name>
+Subject: net: add an optimization for dealing with raw sockets
+
+lede-commit: 4898039703d7315f0f3431c860123338ec3be0f6
+Signed-off-by: Felix Fietkau <nbd@nbd.name>
+---
+ include/uapi/linux/if_packet.h | 3 +++
+ net/packet/af_packet.c | 34 +++++++++++++++++++++++++++-------
+ net/packet/internal.h | 1 +
+ 3 files changed, 31 insertions(+), 7 deletions(-)
+
+--- a/include/uapi/linux/if_packet.h
++++ b/include/uapi/linux/if_packet.h
+@@ -32,6 +32,8 @@ struct sockaddr_ll {
+ #define PACKET_KERNEL 7 /* To kernel space */
+ /* Unused, PACKET_FASTROUTE and PACKET_LOOPBACK are invisible to user space */
+ #define PACKET_FASTROUTE 6 /* Fastrouted frame */
++#define PACKET_MASK_ANY 0xffffffff /* mask for packet type bits */
++
+
+ /* Packet socket options */
+
+@@ -57,6 +59,7 @@ struct sockaddr_ll {
+ #define PACKET_QDISC_BYPASS 20
+ #define PACKET_ROLLOVER_STATS 21
+ #define PACKET_FANOUT_DATA 22
++#define PACKET_RECV_TYPE 23
+
+ #define PACKET_FANOUT_HASH 0
+ #define PACKET_FANOUT_LB 1
+--- a/net/packet/af_packet.c
++++ b/net/packet/af_packet.c
+@@ -1796,6 +1796,7 @@ static int packet_rcv_spkt(struct sk_buf
+ {
+ struct sock *sk;
+ struct sockaddr_pkt *spkt;
++ struct packet_sock *po;
+
+ /*
+ * When we registered the protocol we saved the socket in the data
+@@ -1803,6 +1804,7 @@ static int packet_rcv_spkt(struct sk_buf
+ */
+
+ sk = pt->af_packet_priv;
++ po = pkt_sk(sk);
+
+ /*
+ * Yank back the headers [hope the device set this
+@@ -1815,7 +1817,7 @@ static int packet_rcv_spkt(struct sk_buf
+ * so that this procedure is noop.
+ */
+
+- if (skb->pkt_type == PACKET_LOOPBACK)
++ if (!(po->pkt_type & (1 << skb->pkt_type)))
+ goto out;
+
+ if (!net_eq(dev_net(dev), sock_net(sk)))
+@@ -2043,12 +2045,12 @@ static int packet_rcv(struct sk_buff *sk
+ unsigned int snaplen, res;
+ bool is_drop_n_account = false;
+
+- if (skb->pkt_type == PACKET_LOOPBACK)
+- goto drop;
+-
+ sk = pt->af_packet_priv;
+ po = pkt_sk(sk);
+
++ if (!(po->pkt_type & (1 << skb->pkt_type)))
++ goto drop;
++
+ if (!net_eq(dev_net(dev), sock_net(sk)))
+ goto drop;
+
+@@ -2174,12 +2176,12 @@ static int tpacket_rcv(struct sk_buff *s
+ BUILD_BUG_ON(TPACKET_ALIGN(sizeof(*h.h2)) != 32);
+ BUILD_BUG_ON(TPACKET_ALIGN(sizeof(*h.h3)) != 48);
+
+- if (skb->pkt_type == PACKET_LOOPBACK)
+- goto drop;
+-
+ sk = pt->af_packet_priv;
+ po = pkt_sk(sk);
+
++ if (!(po->pkt_type & (1 << skb->pkt_type)))
++ goto drop;
++
+ if (!net_eq(dev_net(dev), sock_net(sk)))
+ goto drop;
+
+@@ -3265,6 +3267,7 @@ static int packet_create(struct net *net
+ mutex_init(&po->pg_vec_lock);
+ po->rollover = NULL;
+ po->prot_hook.func = packet_rcv;
++ po->pkt_type = PACKET_MASK_ANY & ~(1 << PACKET_LOOPBACK);
+
+ if (sock->type == SOCK_PACKET)
+ po->prot_hook.func = packet_rcv_spkt;
+@@ -3885,6 +3888,16 @@ packet_setsockopt(struct socket *sock, i
+ po->xmit = val ? packet_direct_xmit : dev_queue_xmit;
+ return 0;
+ }
++ case PACKET_RECV_TYPE:
++ {
++ unsigned int val;
++ if (optlen != sizeof(val))
++ return -EINVAL;
++ if (copy_from_user(&val, optval, sizeof(val)))
++ return -EFAULT;
++ po->pkt_type = val & ~BIT(PACKET_LOOPBACK);
++ return 0;
++ }
+ default:
+ return -ENOPROTOOPT;
+ }
+@@ -3937,6 +3950,13 @@ static int packet_getsockopt(struct sock
+ case PACKET_VNET_HDR:
+ val = po->has_vnet_hdr;
+ break;
++ case PACKET_RECV_TYPE:
++ if (len > sizeof(unsigned int))
++ len = sizeof(unsigned int);
++ val = po->pkt_type;
++
++ data = &val;
++ break;
+ case PACKET_VERSION:
+ val = po->tp_version;
+ break;
+--- a/net/packet/internal.h
++++ b/net/packet/internal.h
+@@ -132,6 +132,7 @@ struct packet_sock {
+ struct net_device __rcu *cached_dev;
+ int (*xmit)(struct sk_buff *skb);
+ struct packet_type prot_hook ____cacheline_aligned_in_smp;
++ unsigned int pkt_type;
+ };
+
+ static struct packet_sock *pkt_sk(struct sock *sk)
diff --git a/target/linux/generic/pending-5.4/640-netfilter-nf_flow_table-add-hardware-offload-support.patch b/target/linux/generic/pending-5.4/640-netfilter-nf_flow_table-add-hardware-offload-support.patch
new file mode 100644
index 0000000000..f0946e8dd2
--- /dev/null
+++ b/target/linux/generic/pending-5.4/640-netfilter-nf_flow_table-add-hardware-offload-support.patch
@@ -0,0 +1,564 @@
+From: Pablo Neira Ayuso <pablo@netfilter.org>
+Date: Thu, 11 Jan 2018 16:32:00 +0100
+Subject: [PATCH] netfilter: nf_flow_table: add hardware offload support
+
+This patch adds the infrastructure to offload flows to hardware, in case
+the nic/switch comes with built-in flow tables capabilities.
+
+If the hardware comes with no hardware flow tables or they have
+limitations in terms of features, the existing infrastructure falls back
+to the software flow table implementation.
+
+The software flow table garbage collector skips entries that resides in
+the hardware, so the hardware will be responsible for releasing this
+flow table entry too via flow_offload_dead().
+
+Hardware configuration, either to add or to delete entries, is done from
+the hardware offload workqueue, to ensure this is done from user context
+given that we may sleep when grabbing the mdio mutex.
+
+Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
+---
+ create mode 100644 net/netfilter/nf_flow_table_hw.c
+
+--- a/include/linux/netdevice.h
++++ b/include/linux/netdevice.h
+@@ -918,6 +918,13 @@ struct dev_ifalias {
+ char ifalias[];
+ };
+
++struct flow_offload;
++
++enum flow_offload_type {
++ FLOW_OFFLOAD_ADD = 0,
++ FLOW_OFFLOAD_DEL,
++};
++
+ /*
+ * This structure defines the management hooks for network devices.
+ * The following hooks can be defined; unless noted otherwise, they are
+@@ -1150,6 +1157,10 @@ struct dev_ifalias {
+ * int (*ndo_bridge_dellink)(struct net_device *dev, struct nlmsghdr *nlh,
+ * u16 flags);
+ *
++ * int (*ndo_flow_offload)(enum flow_offload_type type,
++ * struct flow_offload *flow);
++ * Adds/deletes flow entry to/from net device flowtable.
++ *
+ * int (*ndo_change_carrier)(struct net_device *dev, bool new_carrier);
+ * Called to change device carrier. Soft-devices (like dummy, team, etc)
+ * which do not represent real hardware may define this to allow their
+@@ -1377,6 +1388,8 @@ struct net_device_ops {
+ int (*ndo_bridge_dellink)(struct net_device *dev,
+ struct nlmsghdr *nlh,
+ u16 flags);
++ int (*ndo_flow_offload)(enum flow_offload_type type,
++ struct flow_offload *flow);
+ int (*ndo_change_carrier)(struct net_device *dev,
+ bool new_carrier);
+ int (*ndo_get_phys_port_id)(struct net_device *dev,
+--- a/include/net/netfilter/nf_flow_table.h
++++ b/include/net/netfilter/nf_flow_table.h
+@@ -20,11 +20,17 @@ struct nf_flowtable_type {
+ struct module *owner;
+ };
+
++enum nf_flowtable_flags {
++ NF_FLOWTABLE_F_HW = 0x1,
++};
++
+ struct nf_flowtable {
+ struct list_head list;
+ struct rhashtable rhashtable;
+ const struct nf_flowtable_type *type;
++ u32 flags;
+ struct delayed_work gc_work;
++ possible_net_t ft_net;
+ };
+
+ enum flow_offload_tuple_dir {
+@@ -69,6 +75,7 @@ struct flow_offload_tuple_rhash {
+ #define FLOW_OFFLOAD_DNAT 0x2
+ #define FLOW_OFFLOAD_DYING 0x4
+ #define FLOW_OFFLOAD_TEARDOWN 0x8
++#define FLOW_OFFLOAD_HW 0x10
+
+ struct flow_offload {
+ struct flow_offload_tuple_rhash tuplehash[FLOW_OFFLOAD_DIR_MAX];
+@@ -125,6 +132,22 @@ unsigned int nf_flow_offload_ip_hook(voi
+ unsigned int nf_flow_offload_ipv6_hook(void *priv, struct sk_buff *skb,
+ const struct nf_hook_state *state);
+
++void nf_flow_offload_hw_add(struct net *net, struct flow_offload *flow,
++ struct nf_conn *ct);
++void nf_flow_offload_hw_del(struct net *net, struct flow_offload *flow);
++
++struct nf_flow_table_hw {
++ struct module *owner;
++ void (*add)(struct net *net, struct flow_offload *flow,
++ struct nf_conn *ct);
++ void (*del)(struct net *net, struct flow_offload *flow);
++};
++
++int nf_flow_table_hw_register(const struct nf_flow_table_hw *offload);
++void nf_flow_table_hw_unregister(const struct nf_flow_table_hw *offload);
++
++extern struct work_struct nf_flow_offload_hw_work;
++
+ #define MODULE_ALIAS_NF_FLOWTABLE(family) \
+ MODULE_ALIAS("nf-flowtable-" __stringify(family))
+
+--- a/include/uapi/linux/netfilter/nf_tables.h
++++ b/include/uapi/linux/netfilter/nf_tables.h
+@@ -1464,6 +1464,7 @@ enum nft_object_attributes {
+ * @NFTA_FLOWTABLE_HOOK: netfilter hook configuration(NLA_U32)
+ * @NFTA_FLOWTABLE_USE: number of references to this flow table (NLA_U32)
+ * @NFTA_FLOWTABLE_HANDLE: object handle (NLA_U64)
++ * @NFTA_FLOWTABLE_FLAGS: flags (NLA_U32)
+ */
+ enum nft_flowtable_attributes {
+ NFTA_FLOWTABLE_UNSPEC,
+@@ -1473,6 +1474,7 @@ enum nft_flowtable_attributes {
+ NFTA_FLOWTABLE_USE,
+ NFTA_FLOWTABLE_HANDLE,
+ NFTA_FLOWTABLE_PAD,
++ NFTA_FLOWTABLE_FLAGS,
+ __NFTA_FLOWTABLE_MAX
+ };
+ #define NFTA_FLOWTABLE_MAX (__NFTA_FLOWTABLE_MAX - 1)
+--- a/net/netfilter/Kconfig
++++ b/net/netfilter/Kconfig
+@@ -714,6 +714,15 @@ config NF_FLOW_TABLE
+
+ To compile it as a module, choose M here.
+
++config NF_FLOW_TABLE_HW
++ tristate "Netfilter flow table hardware offload module"
++ depends on NF_FLOW_TABLE
++ help
++ This option adds hardware offload support for the flow table core
++ infrastructure.
++
++ To compile it as a module, choose M here.
++
+ config NETFILTER_XTABLES
+ tristate "Netfilter Xtables support (required for ip_tables)"
+ default m if NETFILTER_ADVANCED=n
+--- a/net/netfilter/Makefile
++++ b/net/netfilter/Makefile
+@@ -126,6 +126,7 @@ obj-$(CONFIG_NF_FLOW_TABLE) += nf_flow_t
+ nf_flow_table-objs := nf_flow_table_core.o nf_flow_table_ip.o
+
+ obj-$(CONFIG_NF_FLOW_TABLE_INET) += nf_flow_table_inet.o
++obj-$(CONFIG_NF_FLOW_TABLE_HW) += nf_flow_table_hw.o
+
+ # generic X tables
+ obj-$(CONFIG_NETFILTER_XTABLES) += x_tables.o xt_tcpudp.o
+--- a/net/netfilter/nf_flow_table_core.c
++++ b/net/netfilter/nf_flow_table_core.c
+@@ -228,10 +228,16 @@ int flow_offload_add(struct nf_flowtable
+ }
+ EXPORT_SYMBOL_GPL(flow_offload_add);
+
++static inline bool nf_flow_in_hw(const struct flow_offload *flow)
++{
++ return flow->flags & FLOW_OFFLOAD_HW;
++}
++
+ static void flow_offload_del(struct nf_flowtable *flow_table,
+ struct flow_offload *flow)
+ {
+ struct flow_offload_entry *e;
++ struct net *net = read_pnet(&flow_table->ft_net);
+
+ rhashtable_remove_fast(&flow_table->rhashtable,
+ &flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].node,
+@@ -246,6 +252,9 @@ static void flow_offload_del(struct nf_f
+ if (!(flow->flags & FLOW_OFFLOAD_TEARDOWN))
+ flow_offload_fixup_ct_state(e->ct);
+
++ if (nf_flow_in_hw(flow))
++ nf_flow_offload_hw_del(net, flow);
++
+ flow_offload_free(flow);
+ }
+
+@@ -359,6 +368,9 @@ static int nf_flow_offload_gc_step(struc
+ if (!teardown)
+ nf_ct_offload_timeout(flow);
+
++ if (nf_flow_in_hw(flow) && !teardown)
++ continue;
++
+ if (nf_flow_has_expired(flow) || teardown)
+ flow_offload_del(flow_table, flow);
+ }
+@@ -494,10 +506,43 @@ int nf_flow_dnat_port(const struct flow_
+ }
+ EXPORT_SYMBOL_GPL(nf_flow_dnat_port);
+
++static const struct nf_flow_table_hw __rcu *nf_flow_table_hw_hook __read_mostly;
++
++static int nf_flow_offload_hw_init(struct nf_flowtable *flow_table)
++{
++ const struct nf_flow_table_hw *offload;
++
++ if (!rcu_access_pointer(nf_flow_table_hw_hook))
++ request_module("nf-flow-table-hw");
++
++ rcu_read_lock();
++ offload = rcu_dereference(nf_flow_table_hw_hook);
++ if (!offload)
++ goto err_no_hw_offload;
++
++ if (!try_module_get(offload->owner))
++ goto err_no_hw_offload;
++
++ rcu_read_unlock();
++
++ return 0;
++
++err_no_hw_offload:
++ rcu_read_unlock();
++
++ return -EOPNOTSUPP;
++}
++
+ int nf_flow_table_init(struct nf_flowtable *flowtable)
+ {
+ int err;
+
++ if (flowtable->flags & NF_FLOWTABLE_F_HW) {
++ err = nf_flow_offload_hw_init(flowtable);
++ if (err)
++ return err;
++ }
++
+ INIT_DEFERRABLE_WORK(&flowtable->gc_work, nf_flow_offload_work_gc);
+
+ err = rhashtable_init(&flowtable->rhashtable,
+@@ -538,6 +583,8 @@ static void nf_flow_table_iterate_cleanu
+ {
+ nf_flow_table_iterate(flowtable, nf_flow_table_do_cleanup, dev);
+ flush_delayed_work(&flowtable->gc_work);
++ if (flowtable->flags & NF_FLOWTABLE_F_HW)
++ flush_work(&nf_flow_offload_hw_work);
+ }
+
+ void nf_flow_table_cleanup(struct net *net, struct net_device *dev)
+@@ -551,6 +598,26 @@ void nf_flow_table_cleanup(struct net *n
+ }
+ EXPORT_SYMBOL_GPL(nf_flow_table_cleanup);
+
++struct work_struct nf_flow_offload_hw_work;
++EXPORT_SYMBOL_GPL(nf_flow_offload_hw_work);
++
++/* Give the hardware workqueue the chance to remove entries from hardware.*/
++static void nf_flow_offload_hw_free(struct nf_flowtable *flowtable)
++{
++ const struct nf_flow_table_hw *offload;
++
++ flush_work(&nf_flow_offload_hw_work);
++
++ rcu_read_lock();
++ offload = rcu_dereference(nf_flow_table_hw_hook);
++ if (!offload) {
++ rcu_read_unlock();
++ return;
++ }
++ module_put(offload->owner);
++ rcu_read_unlock();
++}
++
+ void nf_flow_table_free(struct nf_flowtable *flow_table)
+ {
+ mutex_lock(&flowtable_lock);
+@@ -560,9 +627,58 @@ void nf_flow_table_free(struct nf_flowta
+ nf_flow_table_iterate(flow_table, nf_flow_table_do_cleanup, NULL);
+ WARN_ON(!nf_flow_offload_gc_step(flow_table));
+ rhashtable_destroy(&flow_table->rhashtable);
++ if (flow_table->flags & NF_FLOWTABLE_F_HW)
++ nf_flow_offload_hw_free(flow_table);
+ }
+ EXPORT_SYMBOL_GPL(nf_flow_table_free);
+
++/* Must be called from user context. */
++void nf_flow_offload_hw_add(struct net *net, struct flow_offload *flow,
++ struct nf_conn *ct)
++{
++ const struct nf_flow_table_hw *offload;
++
++ rcu_read_lock();
++ offload = rcu_dereference(nf_flow_table_hw_hook);
++ if (offload)
++ offload->add(net, flow, ct);
++ rcu_read_unlock();
++}
++EXPORT_SYMBOL_GPL(nf_flow_offload_hw_add);
++
++/* Must be called from user context. */
++void nf_flow_offload_hw_del(struct net *net, struct flow_offload *flow)
++{
++ const struct nf_flow_table_hw *offload;
++
++ rcu_read_lock();
++ offload = rcu_dereference(nf_flow_table_hw_hook);
++ if (offload)
++ offload->del(net, flow);
++ rcu_read_unlock();
++}
++EXPORT_SYMBOL_GPL(nf_flow_offload_hw_del);
++
++int nf_flow_table_hw_register(const struct nf_flow_table_hw *offload)
++{
++ if (rcu_access_pointer(nf_flow_table_hw_hook))
++ return -EBUSY;
++
++ rcu_assign_pointer(nf_flow_table_hw_hook, offload);
++
++ return 0;
++}
++EXPORT_SYMBOL_GPL(nf_flow_table_hw_register);
++
++void nf_flow_table_hw_unregister(const struct nf_flow_table_hw *offload)
++{
++ WARN_ON(rcu_access_pointer(nf_flow_table_hw_hook) != offload);
++ rcu_assign_pointer(nf_flow_table_hw_hook, NULL);
++
++ synchronize_rcu();
++}
++EXPORT_SYMBOL_GPL(nf_flow_table_hw_unregister);
++
+ static int nf_flow_table_netdev_event(struct notifier_block *this,
+ unsigned long event, void *ptr)
+ {
+--- /dev/null
++++ b/net/netfilter/nf_flow_table_hw.c
+@@ -0,0 +1,169 @@
++#include <linux/kernel.h>
++#include <linux/init.h>
++#include <linux/module.h>
++#include <linux/netfilter.h>
++#include <linux/rhashtable.h>
++#include <linux/netdevice.h>
++#include <net/netfilter/nf_flow_table.h>
++#include <net/netfilter/nf_conntrack.h>
++#include <net/netfilter/nf_conntrack_core.h>
++#include <net/netfilter/nf_conntrack_tuple.h>
++
++static DEFINE_SPINLOCK(flow_offload_hw_pending_list_lock);
++static LIST_HEAD(flow_offload_hw_pending_list);
++
++static DEFINE_MUTEX(nf_flow_offload_hw_mutex);
++
++struct flow_offload_hw {
++ struct list_head list;
++ enum flow_offload_type type;
++ struct flow_offload *flow;
++ struct nf_conn *ct;
++ possible_net_t flow_hw_net;
++};
++
++static int do_flow_offload_hw(struct net *net, struct flow_offload *flow,
++ int type)
++{
++ struct net_device *indev;
++ int ret, ifindex;
++
++ ifindex = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.iifidx;
++ indev = dev_get_by_index(net, ifindex);
++ if (WARN_ON(!indev))
++ return 0;
++
++ mutex_lock(&nf_flow_offload_hw_mutex);
++ ret = indev->netdev_ops->ndo_flow_offload(type, flow);
++ mutex_unlock(&nf_flow_offload_hw_mutex);
++
++ dev_put(indev);
++
++ return ret;
++}
++
++static void flow_offload_hw_work_add(struct flow_offload_hw *offload)
++{
++ struct net *net;
++ int ret;
++
++ if (nf_ct_is_dying(offload->ct))
++ return;
++
++ net = read_pnet(&offload->flow_hw_net);
++ ret = do_flow_offload_hw(net, offload->flow, FLOW_OFFLOAD_ADD);
++ if (ret >= 0)
++ offload->flow->flags |= FLOW_OFFLOAD_HW;
++}
++
++static void flow_offload_hw_work_del(struct flow_offload_hw *offload)
++{
++ struct net *net = read_pnet(&offload->flow_hw_net);
++
++ do_flow_offload_hw(net, offload->flow, FLOW_OFFLOAD_DEL);
++}
++
++static void flow_offload_hw_work(struct work_struct *work)
++{
++ struct flow_offload_hw *offload, *next;
++ LIST_HEAD(hw_offload_pending);
++
++ spin_lock_bh(&flow_offload_hw_pending_list_lock);
++ list_replace_init(&flow_offload_hw_pending_list, &hw_offload_pending);
++ spin_unlock_bh(&flow_offload_hw_pending_list_lock);
++
++ list_for_each_entry_safe(offload, next, &hw_offload_pending, list) {
++ switch (offload->type) {
++ case FLOW_OFFLOAD_ADD:
++ flow_offload_hw_work_add(offload);
++ break;
++ case FLOW_OFFLOAD_DEL:
++ flow_offload_hw_work_del(offload);
++ break;
++ }
++ if (offload->ct)
++ nf_conntrack_put(&offload->ct->ct_general);
++ list_del(&offload->list);
++ kfree(offload);
++ }
++}
++
++static void flow_offload_queue_work(struct flow_offload_hw *offload)
++{
++ spin_lock_bh(&flow_offload_hw_pending_list_lock);
++ list_add_tail(&offload->list, &flow_offload_hw_pending_list);
++ spin_unlock_bh(&flow_offload_hw_pending_list_lock);
++
++ schedule_work(&nf_flow_offload_hw_work);
++}
++
++static void flow_offload_hw_add(struct net *net, struct flow_offload *flow,
++ struct nf_conn *ct)
++{
++ struct flow_offload_hw *offload;
++
++ offload = kmalloc(sizeof(struct flow_offload_hw), GFP_ATOMIC);
++ if (!offload)
++ return;
++
++ nf_conntrack_get(&ct->ct_general);
++ offload->type = FLOW_OFFLOAD_ADD;
++ offload->ct = ct;
++ offload->flow = flow;
++ write_pnet(&offload->flow_hw_net, net);
++
++ flow_offload_queue_work(offload);
++}
++
++static void flow_offload_hw_del(struct net *net, struct flow_offload *flow)
++{
++ struct flow_offload_hw *offload;
++
++ offload = kmalloc(sizeof(struct flow_offload_hw), GFP_ATOMIC);
++ if (!offload)
++ return;
++
++ offload->type = FLOW_OFFLOAD_DEL;
++ offload->ct = NULL;
++ offload->flow = flow;
++ write_pnet(&offload->flow_hw_net, net);
++
++ flow_offload_queue_work(offload);
++}
++
++static const struct nf_flow_table_hw flow_offload_hw = {
++ .add = flow_offload_hw_add,
++ .del = flow_offload_hw_del,
++ .owner = THIS_MODULE,
++};
++
++static int __init nf_flow_table_hw_module_init(void)
++{
++ INIT_WORK(&nf_flow_offload_hw_work, flow_offload_hw_work);
++ nf_flow_table_hw_register(&flow_offload_hw);
++
++ return 0;
++}
++
++static void __exit nf_flow_table_hw_module_exit(void)
++{
++ struct flow_offload_hw *offload, *next;
++ LIST_HEAD(hw_offload_pending);
++
++ nf_flow_table_hw_unregister(&flow_offload_hw);
++ cancel_work_sync(&nf_flow_offload_hw_work);
++
++ list_for_each_entry_safe(offload, next, &hw_offload_pending, list) {
++ if (offload->ct)
++ nf_conntrack_put(&offload->ct->ct_general);
++ list_del(&offload->list);
++ kfree(offload);
++ }
++}
++
++module_init(nf_flow_table_hw_module_init);
++module_exit(nf_flow_table_hw_module_exit);
++
++MODULE_LICENSE("GPL");
++MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>");
++MODULE_ALIAS("nf-flow-table-hw");
+--- a/net/netfilter/nf_tables_api.c
++++ b/net/netfilter/nf_tables_api.c
+@@ -5503,6 +5503,13 @@ static int nf_tables_flowtable_parse_hoo
+ if (err < 0)
+ return err;
+
++ for (i = 0; i < n; i++) {
++ if (flowtable->data.flags & NF_FLOWTABLE_F_HW &&
++ !dev_array[i]->netdev_ops->ndo_flow_offload) {
++ return -EOPNOTSUPP;
++ }
++ }
++
+ ops = kcalloc(n, sizeof(struct nf_hook_ops), GFP_KERNEL);
+ if (!ops)
+ return -ENOMEM;
+@@ -5634,10 +5641,19 @@ static int nf_tables_newflowtable(struct
+ }
+
+ flowtable->data.type = type;
++ write_pnet(&flowtable->data.ft_net, net);
++
+ err = type->init(&flowtable->data);
+ if (err < 0)
+ goto err3;
+
++ if (nla[NFTA_FLOWTABLE_FLAGS]) {
++ flowtable->data.flags =
++ ntohl(nla_get_be32(nla[NFTA_FLOWTABLE_FLAGS]));
++ if (flowtable->data.flags & ~NF_FLOWTABLE_F_HW)
++ goto err4;
++ }
++
+ err = nf_tables_flowtable_parse_hook(&ctx, nla[NFTA_FLOWTABLE_HOOK],
+ flowtable);
+ if (err < 0)
+@@ -5763,7 +5779,8 @@ static int nf_tables_fill_flowtable_info
+ nla_put_string(skb, NFTA_FLOWTABLE_NAME, flowtable->name) ||
+ nla_put_be32(skb, NFTA_FLOWTABLE_USE, htonl(flowtable->use)) ||
+ nla_put_be64(skb, NFTA_FLOWTABLE_HANDLE, cpu_to_be64(flowtable->handle),
+- NFTA_FLOWTABLE_PAD))
++ NFTA_FLOWTABLE_PAD) ||
++ nla_put_be32(skb, NFTA_FLOWTABLE_FLAGS, htonl(flowtable->data.flags)))
+ goto nla_put_failure;
+
+ nest = nla_nest_start(skb, NFTA_FLOWTABLE_HOOK);
+--- a/net/netfilter/nft_flow_offload.c
++++ b/net/netfilter/nft_flow_offload.c
+@@ -126,6 +126,9 @@ static void nft_flow_offload_eval(const
+ if (ret < 0)
+ goto err_flow_add;
+
++ if (flowtable->flags & NF_FLOWTABLE_F_HW)
++ nf_flow_offload_hw_add(nft_net(pkt), flow, ct);
++
+ dst_release(route.tuple[!dir].dst);
+ return;
+
diff --git a/target/linux/generic/pending-5.4/641-netfilter-nf_flow_table-support-hw-offload-through-v.patch b/target/linux/generic/pending-5.4/641-netfilter-nf_flow_table-support-hw-offload-through-v.patch
new file mode 100644
index 0000000000..87ff634793
--- /dev/null
+++ b/target/linux/generic/pending-5.4/641-netfilter-nf_flow_table-support-hw-offload-through-v.patch
@@ -0,0 +1,303 @@
+From: Felix Fietkau <nbd@nbd.name>
+Date: Thu, 15 Mar 2018 20:46:31 +0100
+Subject: [PATCH] netfilter: nf_flow_table: support hw offload through
+ virtual interfaces
+
+There are hardware offload devices that support offloading VLANs and
+PPPoE devices. Additionally, it is useful to be able to offload packets
+routed through bridge interfaces as well.
+Add support for finding the path to the offload device through these
+virtual interfaces, while collecting useful parameters for the offload
+device, like VLAN ID/protocol, PPPoE session and Ethernet MAC address.
+
+Signed-off-by: Felix Fietkau <nbd@nbd.name>
+---
+
+--- a/include/linux/netdevice.h
++++ b/include/linux/netdevice.h
+@@ -919,6 +919,7 @@ struct dev_ifalias {
+ };
+
+ struct flow_offload;
++struct flow_offload_hw_path;
+
+ enum flow_offload_type {
+ FLOW_OFFLOAD_ADD = 0,
+@@ -1157,8 +1158,15 @@ enum flow_offload_type {
+ * int (*ndo_bridge_dellink)(struct net_device *dev, struct nlmsghdr *nlh,
+ * u16 flags);
+ *
++ * int (*ndo_flow_offload_check)(struct flow_offload_hw_path *path);
++ * For virtual devices like bridges, vlan, and pppoe, fill in the
++ * underlying network device that can be used for offloading connections.
++ * Return an error if offloading is not supported.
++ *
+ * int (*ndo_flow_offload)(enum flow_offload_type type,
+- * struct flow_offload *flow);
++ * struct flow_offload *flow,
++ * struct flow_offload_hw_path *src,
++ * struct flow_offload_hw_path *dest);
+ * Adds/deletes flow entry to/from net device flowtable.
+ *
+ * int (*ndo_change_carrier)(struct net_device *dev, bool new_carrier);
+@@ -1388,8 +1396,11 @@ struct net_device_ops {
+ int (*ndo_bridge_dellink)(struct net_device *dev,
+ struct nlmsghdr *nlh,
+ u16 flags);
++ int (*ndo_flow_offload_check)(struct flow_offload_hw_path *path);
+ int (*ndo_flow_offload)(enum flow_offload_type type,
+- struct flow_offload *flow);
++ struct flow_offload *flow,
++ struct flow_offload_hw_path *src,
++ struct flow_offload_hw_path *dest);
+ int (*ndo_change_carrier)(struct net_device *dev,
+ bool new_carrier);
+ int (*ndo_get_phys_port_id)(struct net_device *dev,
+--- a/include/net/netfilter/nf_flow_table.h
++++ b/include/net/netfilter/nf_flow_table.h
+@@ -86,6 +86,21 @@ struct flow_offload {
+ };
+ };
+
++#define FLOW_OFFLOAD_PATH_ETHERNET BIT(0)
++#define FLOW_OFFLOAD_PATH_VLAN BIT(1)
++#define FLOW_OFFLOAD_PATH_PPPOE BIT(2)
++
++struct flow_offload_hw_path {
++ struct net_device *dev;
++ u32 flags;
++
++ u8 eth_src[ETH_ALEN];
++ u8 eth_dest[ETH_ALEN];
++ u16 vlan_proto;
++ u16 vlan_id;
++ u16 pppoe_sid;
++};
++
+ #define NF_FLOW_TIMEOUT (30 * HZ)
+
+ struct nf_flow_route {
+--- a/net/netfilter/nf_flow_table_hw.c
++++ b/net/netfilter/nf_flow_table_hw.c
+@@ -19,48 +19,75 @@ struct flow_offload_hw {
+ enum flow_offload_type type;
+ struct flow_offload *flow;
+ struct nf_conn *ct;
+- possible_net_t flow_hw_net;
++
++ struct flow_offload_hw_path src;
++ struct flow_offload_hw_path dest;
+ };
+
+-static int do_flow_offload_hw(struct net *net, struct flow_offload *flow,
+- int type)
++static void flow_offload_check_ethernet(struct flow_offload_tuple *tuple,
++ struct flow_offload_hw_path *path)
+ {
+- struct net_device *indev;
+- int ret, ifindex;
++ struct net_device *dev = path->dev;
++ struct neighbour *n;
+
+- ifindex = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.iifidx;
+- indev = dev_get_by_index(net, ifindex);
+- if (WARN_ON(!indev))
+- return 0;
+-
+- mutex_lock(&nf_flow_offload_hw_mutex);
+- ret = indev->netdev_ops->ndo_flow_offload(type, flow);
+- mutex_unlock(&nf_flow_offload_hw_mutex);
++ if (dev->type != ARPHRD_ETHER)
++ return;
+
+- dev_put(indev);
++ memcpy(path->eth_src, path->dev->dev_addr, ETH_ALEN);
++ n = dst_neigh_lookup(tuple->dst_cache, &tuple->src_v4);
++ if (!n)
++ return;
+
+- return ret;
++ memcpy(path->eth_dest, n->ha, ETH_ALEN);
++ path->flags |= FLOW_OFFLOAD_PATH_ETHERNET;
++ neigh_release(n);
+ }
+
+-static void flow_offload_hw_work_add(struct flow_offload_hw *offload)
++static int flow_offload_check_path(struct net *net,
++ struct flow_offload_tuple *tuple,
++ struct flow_offload_hw_path *path)
+ {
+- struct net *net;
+- int ret;
++ struct net_device *dev;
+
+- if (nf_ct_is_dying(offload->ct))
+- return;
++ dev = dev_get_by_index_rcu(net, tuple->iifidx);
++ if (!dev)
++ return -ENOENT;
++
++ path->dev = dev;
++ flow_offload_check_ethernet(tuple, path);
+
+- net = read_pnet(&offload->flow_hw_net);
+- ret = do_flow_offload_hw(net, offload->flow, FLOW_OFFLOAD_ADD);
+- if (ret >= 0)
+- offload->flow->flags |= FLOW_OFFLOAD_HW;
++ if (dev->netdev_ops->ndo_flow_offload_check)
++ return dev->netdev_ops->ndo_flow_offload_check(path);
++
++ return 0;
+ }
+
+-static void flow_offload_hw_work_del(struct flow_offload_hw *offload)
++static int do_flow_offload_hw(struct flow_offload_hw *offload)
+ {
+- struct net *net = read_pnet(&offload->flow_hw_net);
++ struct net_device *src_dev = offload->src.dev;
++ struct net_device *dest_dev = offload->dest.dev;
++ int ret;
++
++ ret = src_dev->netdev_ops->ndo_flow_offload(offload->type,
++ offload->flow,
++ &offload->src,
++ &offload->dest);
++
++ /* restore devices in case the driver mangled them */
++ offload->src.dev = src_dev;
++ offload->dest.dev = dest_dev;
++
++ return ret;
++}
+
+- do_flow_offload_hw(net, offload->flow, FLOW_OFFLOAD_DEL);
++static void flow_offload_hw_free(struct flow_offload_hw *offload)
++{
++ dev_put(offload->src.dev);
++ dev_put(offload->dest.dev);
++ if (offload->ct)
++ nf_conntrack_put(&offload->ct->ct_general);
++ list_del(&offload->list);
++ kfree(offload);
+ }
+
+ static void flow_offload_hw_work(struct work_struct *work)
+@@ -73,18 +100,22 @@ static void flow_offload_hw_work(struct
+ spin_unlock_bh(&flow_offload_hw_pending_list_lock);
+
+ list_for_each_entry_safe(offload, next, &hw_offload_pending, list) {
++ mutex_lock(&nf_flow_offload_hw_mutex);
+ switch (offload->type) {
+ case FLOW_OFFLOAD_ADD:
+- flow_offload_hw_work_add(offload);
++ if (nf_ct_is_dying(offload->ct))
++ break;
++
++ if (do_flow_offload_hw(offload) >= 0)
++ offload->flow->flags |= FLOW_OFFLOAD_HW;
+ break;
+ case FLOW_OFFLOAD_DEL:
+- flow_offload_hw_work_del(offload);
++ do_flow_offload_hw(offload);
+ break;
+ }
+- if (offload->ct)
+- nf_conntrack_put(&offload->ct->ct_general);
+- list_del(&offload->list);
+- kfree(offload);
++ mutex_unlock(&nf_flow_offload_hw_mutex);
++
++ flow_offload_hw_free(offload);
+ }
+ }
+
+@@ -97,20 +128,55 @@ static void flow_offload_queue_work(stru
+ schedule_work(&nf_flow_offload_hw_work);
+ }
+
++static struct flow_offload_hw *
++flow_offload_hw_prepare(struct net *net, struct flow_offload *flow)
++{
++ struct flow_offload_hw_path src = {};
++ struct flow_offload_hw_path dest = {};
++ struct flow_offload_tuple *tuple;
++ struct flow_offload_hw *offload = NULL;
++
++ rcu_read_lock_bh();
++
++ tuple = &flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple;
++ if (flow_offload_check_path(net, tuple, &src))
++ goto out;
++
++ tuple = &flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple;
++ if (flow_offload_check_path(net, tuple, &dest))
++ goto out;
++
++ if (!src.dev->netdev_ops->ndo_flow_offload)
++ goto out;
++
++ offload = kzalloc(sizeof(struct flow_offload_hw), GFP_ATOMIC);
++ if (!offload)
++ goto out;
++
++ dev_hold(src.dev);
++ dev_hold(dest.dev);
++ offload->src = src;
++ offload->dest = dest;
++ offload->flow = flow;
++
++out:
++ rcu_read_unlock_bh();
++
++ return offload;
++}
++
+ static void flow_offload_hw_add(struct net *net, struct flow_offload *flow,
+ struct nf_conn *ct)
+ {
+ struct flow_offload_hw *offload;
+
+- offload = kmalloc(sizeof(struct flow_offload_hw), GFP_ATOMIC);
++ offload = flow_offload_hw_prepare(net, flow);
+ if (!offload)
+ return;
+
+ nf_conntrack_get(&ct->ct_general);
+ offload->type = FLOW_OFFLOAD_ADD;
+ offload->ct = ct;
+- offload->flow = flow;
+- write_pnet(&offload->flow_hw_net, net);
+
+ flow_offload_queue_work(offload);
+ }
+@@ -119,14 +185,11 @@ static void flow_offload_hw_del(struct n
+ {
+ struct flow_offload_hw *offload;
+
+- offload = kmalloc(sizeof(struct flow_offload_hw), GFP_ATOMIC);
++ offload = flow_offload_hw_prepare(net, flow);
+ if (!offload)
+ return;
+
+ offload->type = FLOW_OFFLOAD_DEL;
+- offload->ct = NULL;
+- offload->flow = flow;
+- write_pnet(&offload->flow_hw_net, net);
+
+ flow_offload_queue_work(offload);
+ }
+@@ -153,12 +216,8 @@ static void __exit nf_flow_table_hw_modu
+ nf_flow_table_hw_unregister(&flow_offload_hw);
+ cancel_work_sync(&nf_flow_offload_hw_work);
+
+- list_for_each_entry_safe(offload, next, &hw_offload_pending, list) {
+- if (offload->ct)
+- nf_conntrack_put(&offload->ct->ct_general);
+- list_del(&offload->list);
+- kfree(offload);
+- }
++ list_for_each_entry_safe(offload, next, &hw_offload_pending, list)
++ flow_offload_hw_free(offload);
+ }
+
+ module_init(nf_flow_table_hw_module_init);
diff --git a/target/linux/generic/pending-5.4/642-net-8021q-support-hardware-flow-table-offload.patch b/target/linux/generic/pending-5.4/642-net-8021q-support-hardware-flow-table-offload.patch
new file mode 100644
index 0000000000..7547c14bed
--- /dev/null
+++ b/target/linux/generic/pending-5.4/642-net-8021q-support-hardware-flow-table-offload.patch
@@ -0,0 +1,60 @@
+From: Felix Fietkau <nbd@nbd.name>
+Date: Thu, 15 Mar 2018 20:49:58 +0100
+Subject: [PATCH] net: 8021q: support hardware flow table offload
+
+Add the VLAN ID and protocol information
+
+Signed-off-by: Felix Fietkau <nbd@nbd.name>
+---
+
+--- a/net/8021q/vlan_dev.c
++++ b/net/8021q/vlan_dev.c
+@@ -32,6 +32,10 @@
+ #include <linux/phy.h>
+ #include <net/arp.h>
+ #include <net/switchdev.h>
++#if IS_ENABLED(CONFIG_NF_FLOW_TABLE)
++#include <linux/netfilter.h>
++#include <net/netfilter/nf_flow_table.h>
++#endif
+
+ #include "vlan.h"
+ #include "vlanproc.h"
+@@ -771,6 +775,27 @@ static int vlan_dev_get_iflink(const str
+ return real_dev->ifindex;
+ }
+
++#if IS_ENABLED(CONFIG_NF_FLOW_TABLE)
++static int vlan_dev_flow_offload_check(struct flow_offload_hw_path *path)
++{
++ struct net_device *dev = path->dev;
++ struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
++
++ if (path->flags & FLOW_OFFLOAD_PATH_VLAN)
++ return -EEXIST;
++
++ path->flags |= FLOW_OFFLOAD_PATH_VLAN;
++ path->vlan_proto = vlan->vlan_proto;
++ path->vlan_id = vlan->vlan_id;
++ path->dev = vlan->real_dev;
++
++ if (vlan->real_dev->netdev_ops->ndo_flow_offload_check)
++ return vlan->real_dev->netdev_ops->ndo_flow_offload_check(path);
++
++ return 0;
++}
++#endif /* CONFIG_NF_FLOW_TABLE */
++
+ static const struct ethtool_ops vlan_ethtool_ops = {
+ .get_link_ksettings = vlan_ethtool_get_link_ksettings,
+ .get_drvinfo = vlan_ethtool_get_drvinfo,
+@@ -808,6 +833,9 @@ static const struct net_device_ops vlan_
+ .ndo_fix_features = vlan_dev_fix_features,
+ .ndo_get_lock_subclass = vlan_dev_get_lock_subclass,
+ .ndo_get_iflink = vlan_dev_get_iflink,
++#if IS_ENABLED(CONFIG_NF_FLOW_TABLE)
++ .ndo_flow_offload_check = vlan_dev_flow_offload_check,
++#endif
+ };
+
+ static void vlan_dev_free(struct net_device *dev)
diff --git a/target/linux/generic/pending-5.4/643-net-bridge-support-hardware-flow-table-offload.patch b/target/linux/generic/pending-5.4/643-net-bridge-support-hardware-flow-table-offload.patch
new file mode 100644
index 0000000000..8b09fabf12
--- /dev/null
+++ b/target/linux/generic/pending-5.4/643-net-bridge-support-hardware-flow-table-offload.patch
@@ -0,0 +1,61 @@
+From: Felix Fietkau <nbd@nbd.name>
+Date: Thu, 15 Mar 2018 20:50:37 +0100
+Subject: [PATCH] net: bridge: support hardware flow table offload
+
+Look up the real device and pass it on
+
+Signed-off-by: Felix Fietkau <nbd@nbd.name>
+---
+
+--- a/net/bridge/br_device.c
++++ b/net/bridge/br_device.c
+@@ -18,6 +18,10 @@
+ #include <linux/ethtool.h>
+ #include <linux/list.h>
+ #include <linux/netfilter_bridge.h>
++#if IS_ENABLED(CONFIG_NF_FLOW_TABLE)
++#include <linux/netfilter.h>
++#include <net/netfilter/nf_flow_table.h>
++#endif
+
+ #include <linux/uaccess.h>
+ #include "br_private.h"
+@@ -376,6 +380,28 @@ static const struct ethtool_ops br_ethto
+ .get_link = ethtool_op_get_link,
+ };
+
++#if IS_ENABLED(CONFIG_NF_FLOW_TABLE)
++static int br_flow_offload_check(struct flow_offload_hw_path *path)
++{
++ struct net_device *dev = path->dev;
++ struct net_bridge *br = netdev_priv(dev);
++ struct net_bridge_fdb_entry *dst;
++
++ if (!(path->flags & FLOW_OFFLOAD_PATH_ETHERNET))
++ return -EINVAL;
++
++ dst = br_fdb_find_rcu(br, path->eth_dest, path->vlan_id);
++ if (!dst || !dst->dst)
++ return -ENOENT;
++
++ path->dev = dst->dst->dev;
++ if (path->dev->netdev_ops->ndo_flow_offload_check)
++ return path->dev->netdev_ops->ndo_flow_offload_check(path);
++
++ return 0;
++}
++#endif /* CONFIG_NF_FLOW_TABLE */
++
+ static const struct net_device_ops br_netdev_ops = {
+ .ndo_open = br_dev_open,
+ .ndo_stop = br_dev_stop,
+@@ -403,6 +429,9 @@ static const struct net_device_ops br_ne
+ .ndo_bridge_setlink = br_setlink,
+ .ndo_bridge_dellink = br_dellink,
+ .ndo_features_check = passthru_features_check,
++#if IS_ENABLED(CONFIG_NF_FLOW_TABLE)
++ .ndo_flow_offload_check = br_flow_offload_check,
++#endif
+ };
+
+ static struct device_type br_type = {
diff --git a/target/linux/generic/pending-5.4/644-net-pppoe-support-hardware-flow-table-offload.patch b/target/linux/generic/pending-5.4/644-net-pppoe-support-hardware-flow-table-offload.patch
new file mode 100644
index 0000000000..26d46ec005
--- /dev/null
+++ b/target/linux/generic/pending-5.4/644-net-pppoe-support-hardware-flow-table-offload.patch
@@ -0,0 +1,125 @@
+From: Felix Fietkau <nbd@nbd.name>
+Date: Thu, 15 Mar 2018 21:15:00 +0100
+Subject: [PATCH] net: pppoe: support hardware flow table offload
+
+Pass on the PPPoE session ID and the remote MAC address
+
+Signed-off-by: Felix Fietkau <nbd@nbd.name>
+---
+
+--- a/drivers/net/ppp/ppp_generic.c
++++ b/drivers/net/ppp/ppp_generic.c
+@@ -57,6 +57,11 @@
+ #include <net/net_namespace.h>
+ #include <net/netns/generic.h>
+
++#if IS_ENABLED(CONFIG_NF_FLOW_TABLE)
++#include <linux/netfilter.h>
++#include <net/netfilter/nf_flow_table.h>
++#endif
++
+ #define PPP_VERSION "2.4.2"
+
+ /*
+@@ -1368,12 +1373,37 @@ static void ppp_dev_priv_destructor(stru
+ ppp_destroy_interface(ppp);
+ }
+
++#if IS_ENABLED(CONFIG_NF_FLOW_TABLE)
++static int ppp_flow_offload_check(struct flow_offload_hw_path *path)
++{
++ struct ppp *ppp = netdev_priv(path->dev);
++ struct ppp_channel *chan;
++ struct channel *pch;
++
++ if (ppp->flags & SC_MULTILINK)
++ return -EOPNOTSUPP;
++
++ if (list_empty(&ppp->channels))
++ return -ENODEV;
++
++ pch = list_first_entry(&ppp->channels, struct channel, clist);
++ chan = pch->chan;
++ if (!chan->ops->flow_offload_check)
++ return -EOPNOTSUPP;
++
++ return chan->ops->flow_offload_check(chan, path);
++}
++#endif /* CONFIG_NF_FLOW_TABLE */
++
+ static const struct net_device_ops ppp_netdev_ops = {
+ .ndo_init = ppp_dev_init,
+ .ndo_uninit = ppp_dev_uninit,
+ .ndo_start_xmit = ppp_start_xmit,
+ .ndo_do_ioctl = ppp_net_ioctl,
+ .ndo_get_stats64 = ppp_get_stats64,
++#if IS_ENABLED(CONFIG_NF_FLOW_TABLE)
++ .ndo_flow_offload_check = ppp_flow_offload_check,
++#endif
+ };
+
+ static struct device_type ppp_type = {
+--- a/drivers/net/ppp/pppoe.c
++++ b/drivers/net/ppp/pppoe.c
+@@ -78,6 +78,11 @@
+ #include <linux/proc_fs.h>
+ #include <linux/seq_file.h>
+
++#if IS_ENABLED(CONFIG_NF_FLOW_TABLE)
++#include <linux/netfilter.h>
++#include <net/netfilter/nf_flow_table.h>
++#endif
++
+ #include <linux/nsproxy.h>
+ #include <net/net_namespace.h>
+ #include <net/netns/generic.h>
+@@ -976,8 +981,36 @@ static int pppoe_xmit(struct ppp_channel
+ return __pppoe_xmit(sk, skb);
+ }
+
++#if IS_ENABLED(CONFIG_NF_FLOW_TABLE)
++static int pppoe_flow_offload_check(struct ppp_channel *chan,
++ struct flow_offload_hw_path *path)
++{
++ struct sock *sk = (struct sock *)chan->private;
++ struct pppox_sock *po = pppox_sk(sk);
++ struct net_device *dev = po->pppoe_dev;
++
++ if (sock_flag(sk, SOCK_DEAD) ||
++ !(sk->sk_state & PPPOX_CONNECTED) || !dev)
++ return -ENODEV;
++
++ path->dev = po->pppoe_dev;
++ path->flags |= FLOW_OFFLOAD_PATH_PPPOE;
++ memcpy(path->eth_src, po->pppoe_dev->dev_addr, ETH_ALEN);
++ memcpy(path->eth_dest, po->pppoe_pa.remote, ETH_ALEN);
++ path->pppoe_sid = be16_to_cpu(po->num);
++
++ if (path->dev->netdev_ops->ndo_flow_offload_check)
++ return path->dev->netdev_ops->ndo_flow_offload_check(path);
++
++ return 0;
++}
++#endif /* CONFIG_NF_FLOW_TABLE */
++
+ static const struct ppp_channel_ops pppoe_chan_ops = {
+ .start_xmit = pppoe_xmit,
++#if IS_ENABLED(CONFIG_NF_FLOW_TABLE)
++ .flow_offload_check = pppoe_flow_offload_check,
++#endif
+ };
+
+ static int pppoe_recvmsg(struct socket *sock, struct msghdr *m,
+--- a/include/linux/ppp_channel.h
++++ b/include/linux/ppp_channel.h
+@@ -32,6 +32,10 @@ struct ppp_channel_ops {
+ int (*start_xmit)(struct ppp_channel *, struct sk_buff *);
+ /* Handle an ioctl call that has come in via /dev/ppp. */
+ int (*ioctl)(struct ppp_channel *, unsigned int, unsigned long);
++
++#if IS_ENABLED(CONFIG_NF_FLOW_TABLE)
++ int (*flow_offload_check)(struct ppp_channel *, struct flow_offload_hw_path *);
++#endif
+ };
+
+ struct ppp_channel {
diff --git a/target/linux/generic/pending-5.4/645-netfilter-nf_flow_table-rework-hardware-offload-time.patch b/target/linux/generic/pending-5.4/645-netfilter-nf_flow_table-rework-hardware-offload-time.patch
new file mode 100644
index 0000000000..bfb7a9c190
--- /dev/null
+++ b/target/linux/generic/pending-5.4/645-netfilter-nf_flow_table-rework-hardware-offload-time.patch
@@ -0,0 +1,37 @@
+From: Felix Fietkau <nbd@nbd.name>
+Date: Sun, 25 Mar 2018 21:10:55 +0200
+Subject: [PATCH] netfilter: nf_flow_table: rework hardware offload timeout
+ handling
+
+Some offload implementations send keepalive packets + explicit
+notifications of TCP FIN/RST packets. In this case it is more convenient
+to simply let the driver update flow->timeout handling and use the
+regular flow offload gc step.
+
+For drivers that manage their own lifetime, a separate flag can be set
+to avoid gc timeouts.
+
+Signed-off-by: Felix Fietkau <nbd@nbd.name>
+---
+
+--- a/include/net/netfilter/nf_flow_table.h
++++ b/include/net/netfilter/nf_flow_table.h
+@@ -76,6 +76,7 @@ struct flow_offload_tuple_rhash {
+ #define FLOW_OFFLOAD_DYING 0x4
+ #define FLOW_OFFLOAD_TEARDOWN 0x8
+ #define FLOW_OFFLOAD_HW 0x10
++#define FLOW_OFFLOAD_KEEP 0x20
+
+ struct flow_offload {
+ struct flow_offload_tuple_rhash tuplehash[FLOW_OFFLOAD_DIR_MAX];
+--- a/net/netfilter/nf_flow_table_core.c
++++ b/net/netfilter/nf_flow_table_core.c
+@@ -368,7 +368,7 @@ static int nf_flow_offload_gc_step(struc
+ if (!teardown)
+ nf_ct_offload_timeout(flow);
+
+- if (nf_flow_in_hw(flow) && !teardown)
++ if ((flow->flags & FLOW_OFFLOAD_KEEP) && !teardown)
+ continue;
+
+ if (nf_flow_has_expired(flow) || teardown)
diff --git a/target/linux/generic/pending-5.4/646-netfilter-nf_flow_table-rework-private-driver-data.patch b/target/linux/generic/pending-5.4/646-netfilter-nf_flow_table-rework-private-driver-data.patch
new file mode 100644
index 0000000000..f94d7ad301
--- /dev/null
+++ b/target/linux/generic/pending-5.4/646-netfilter-nf_flow_table-rework-private-driver-data.patch
@@ -0,0 +1,25 @@
+From: Felix Fietkau <nbd@nbd.name>
+Date: Fri, 27 Apr 2018 14:42:14 +0200
+Subject: [PATCH] netfilter: nf_flow_table: rework private driver data
+
+Move the timeout out of the union, since it can be shared between the
+driver and the stack. Add a private pointer that the driver can use to
+point to its own data structures
+
+Signed-off-by: Felix Fietkau <nbd@nbd.name>
+---
+
+--- a/include/net/netfilter/nf_flow_table.h
++++ b/include/net/netfilter/nf_flow_table.h
+@@ -81,9 +81,10 @@ struct flow_offload_tuple_rhash {
+ struct flow_offload {
+ struct flow_offload_tuple_rhash tuplehash[FLOW_OFFLOAD_DIR_MAX];
+ u32 flags;
++ u32 timeout;
+ union {
+ /* Your private driver data here. */
+- u32 timeout;
++ void *priv;
+ };
+ };
+
diff --git a/target/linux/generic/pending-5.4/655-increase_skb_pad.patch b/target/linux/generic/pending-5.4/655-increase_skb_pad.patch
new file mode 100644
index 0000000000..94e325b0cc
--- /dev/null
+++ b/target/linux/generic/pending-5.4/655-increase_skb_pad.patch
@@ -0,0 +1,20 @@
+From: Felix Fietkau <nbd@nbd.name>
+Subject: kernel: add a few patches for avoiding unnecessary skb reallocations - significantly improves ethernet<->wireless performance
+
+lede-commit: 6f89cffc9add6939d44a6b54cf9a5e77849aa7fd
+Signed-off-by: Felix Fietkau <nbd@nbd.name>
+---
+ include/linux/skbuff.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/include/linux/skbuff.h
++++ b/include/linux/skbuff.h
+@@ -2511,7 +2511,7 @@ static inline int pskb_network_may_pull(
+ * NET_IP_ALIGN(2) + ethernet_header(14) + IP_header(20/40) + ports(8)
+ */
+ #ifndef NET_SKB_PAD
+-#define NET_SKB_PAD max(32, L1_CACHE_BYTES)
++#define NET_SKB_PAD max(64, L1_CACHE_BYTES)
+ #endif
+
+ int ___pskb_trim(struct sk_buff *skb, unsigned int len);
diff --git a/target/linux/generic/pending-5.4/666-Add-support-for-MAP-E-FMRs-mesh-mode.patch b/target/linux/generic/pending-5.4/666-Add-support-for-MAP-E-FMRs-mesh-mode.patch
new file mode 100644
index 0000000000..8b7594e9cf
--- /dev/null
+++ b/target/linux/generic/pending-5.4/666-Add-support-for-MAP-E-FMRs-mesh-mode.patch
@@ -0,0 +1,500 @@
+From: Steven Barth <steven@midlink.org>
+Subject: Add support for MAP-E FMRs (mesh mode)
+
+MAP-E FMRs (draft-ietf-softwire-map-10) are rules for IPv4-communication
+between MAP CEs (mesh mode) without the need to forward such data to a
+border relay. This is similar to how 6rd works but for IPv4 over IPv6.
+
+Signed-off-by: Steven Barth <cyrus@openwrt.org>
+---
+ include/net/ip6_tunnel.h | 13 ++
+ include/uapi/linux/if_tunnel.h | 13 ++
+ net/ipv6/ip6_tunnel.c | 276 +++++++++++++++++++++++++++++++++++++++--
+ 3 files changed, 291 insertions(+), 11 deletions(-)
+
+--- a/include/net/ip6_tunnel.h
++++ b/include/net/ip6_tunnel.h
+@@ -18,6 +18,18 @@
+ /* determine capability on a per-packet basis */
+ #define IP6_TNL_F_CAP_PER_PACKET 0x40000
+
++/* IPv6 tunnel FMR */
++struct __ip6_tnl_fmr {
++ struct __ip6_tnl_fmr *next; /* next fmr in list */
++ struct in6_addr ip6_prefix;
++ struct in_addr ip4_prefix;
++
++ __u8 ip6_prefix_len;
++ __u8 ip4_prefix_len;
++ __u8 ea_len;
++ __u8 offset;
++};
++
+ struct __ip6_tnl_parm {
+ char name[IFNAMSIZ]; /* name of tunnel device */
+ int link; /* ifindex of underlying L2 interface */
+@@ -29,6 +41,7 @@ struct __ip6_tnl_parm {
+ __u32 flags; /* tunnel flags */
+ struct in6_addr laddr; /* local tunnel end-point address */
+ struct in6_addr raddr; /* remote tunnel end-point address */
++ struct __ip6_tnl_fmr *fmrs; /* FMRs */
+
+ __be16 i_flags;
+ __be16 o_flags;
+--- a/include/uapi/linux/if_tunnel.h
++++ b/include/uapi/linux/if_tunnel.h
+@@ -77,10 +77,23 @@ enum {
+ IFLA_IPTUN_ENCAP_DPORT,
+ IFLA_IPTUN_COLLECT_METADATA,
+ IFLA_IPTUN_FWMARK,
++ IFLA_IPTUN_FMRS,
+ __IFLA_IPTUN_MAX,
+ };
+ #define IFLA_IPTUN_MAX (__IFLA_IPTUN_MAX - 1)
+
++enum {
++ IFLA_IPTUN_FMR_UNSPEC,
++ IFLA_IPTUN_FMR_IP6_PREFIX,
++ IFLA_IPTUN_FMR_IP4_PREFIX,
++ IFLA_IPTUN_FMR_IP6_PREFIX_LEN,
++ IFLA_IPTUN_FMR_IP4_PREFIX_LEN,
++ IFLA_IPTUN_FMR_EA_LEN,
++ IFLA_IPTUN_FMR_OFFSET,
++ __IFLA_IPTUN_FMR_MAX,
++};
++#define IFLA_IPTUN_FMR_MAX (__IFLA_IPTUN_FMR_MAX - 1)
++
+ enum tunnel_encap_types {
+ TUNNEL_ENCAP_NONE,
+ TUNNEL_ENCAP_FOU,
+--- a/net/ipv6/ip6_tunnel.c
++++ b/net/ipv6/ip6_tunnel.c
+@@ -16,6 +16,8 @@
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
++ * Changes:
++ * Steven Barth <cyrus@openwrt.org>: MAP-E FMR support
+ */
+
+ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+@@ -72,9 +74,9 @@ static bool log_ecn_error = true;
+ module_param(log_ecn_error, bool, 0644);
+ MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
+
+-static u32 HASH(const struct in6_addr *addr1, const struct in6_addr *addr2)
++static u32 HASH(const struct in6_addr *addr)
+ {
+- u32 hash = ipv6_addr_hash(addr1) ^ ipv6_addr_hash(addr2);
++ u32 hash = ipv6_addr_hash(addr);
+
+ return hash_32(hash, IP6_TUNNEL_HASH_SIZE_SHIFT);
+ }
+@@ -141,20 +143,29 @@ static struct net_device_stats *ip6_get_
+ static struct ip6_tnl *
+ ip6_tnl_lookup(struct net *net, const struct in6_addr *remote, const struct in6_addr *local)
+ {
+- unsigned int hash = HASH(remote, local);
++ unsigned int hash = HASH(local);
+ struct ip6_tnl *t;
+ struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
+ struct in6_addr any;
++ struct __ip6_tnl_fmr *fmr;
+
+ for_each_ip6_tunnel_rcu(ip6n->tnls_r_l[hash]) {
+- if (ipv6_addr_equal(local, &t->parms.laddr) &&
+- ipv6_addr_equal(remote, &t->parms.raddr) &&
+- (t->dev->flags & IFF_UP))
++ if (!ipv6_addr_equal(local, &t->parms.laddr) ||
++ !(t->dev->flags & IFF_UP))
++ continue;
++
++ if (ipv6_addr_equal(remote, &t->parms.raddr))
+ return t;
++
++ for (fmr = t->parms.fmrs; fmr; fmr = fmr->next) {
++ if (ipv6_prefix_equal(remote, &fmr->ip6_prefix,
++ fmr->ip6_prefix_len))
++ return t;
++ }
+ }
+
+ memset(&any, 0, sizeof(any));
+- hash = HASH(&any, local);
++ hash = HASH(local);
+ for_each_ip6_tunnel_rcu(ip6n->tnls_r_l[hash]) {
+ if (ipv6_addr_equal(local, &t->parms.laddr) &&
+ ipv6_addr_any(&t->parms.raddr) &&
+@@ -162,7 +173,7 @@ ip6_tnl_lookup(struct net *net, const st
+ return t;
+ }
+
+- hash = HASH(remote, &any);
++ hash = HASH(&any);
+ for_each_ip6_tunnel_rcu(ip6n->tnls_r_l[hash]) {
+ if (ipv6_addr_equal(remote, &t->parms.raddr) &&
+ ipv6_addr_any(&t->parms.laddr) &&
+@@ -202,7 +213,7 @@ ip6_tnl_bucket(struct ip6_tnl_net *ip6n,
+
+ if (!ipv6_addr_any(remote) || !ipv6_addr_any(local)) {
+ prio = 1;
+- h = HASH(remote, local);
++ h = HASH(local);
+ }
+ return &ip6n->tnls[prio][h];
+ }
+@@ -383,6 +394,12 @@ ip6_tnl_dev_uninit(struct net_device *de
+ struct net *net = t->net;
+ struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
+
++ while (t->parms.fmrs) {
++ struct __ip6_tnl_fmr *next = t->parms.fmrs->next;
++ kfree(t->parms.fmrs);
++ t->parms.fmrs = next;
++ }
++
+ if (dev == ip6n->fb_tnl_dev)
+ RCU_INIT_POINTER(ip6n->tnls_wc[0], NULL);
+ else
+@@ -772,6 +789,107 @@ int ip6_tnl_rcv_ctl(struct ip6_tnl *t,
+ }
+ EXPORT_SYMBOL_GPL(ip6_tnl_rcv_ctl);
+
++/**
++ * ip4ip6_fmr_calc - calculate target / source IPv6-address based on FMR
++ * @dest: destination IPv6 address buffer
++ * @skb: received socket buffer
++ * @fmr: MAP FMR
++ * @xmit: Calculate for xmit or rcv
++ **/
++static void ip4ip6_fmr_calc(struct in6_addr *dest,
++ const struct iphdr *iph, const uint8_t *end,
++ const struct __ip6_tnl_fmr *fmr, bool xmit)
++{
++ int psidlen = fmr->ea_len - (32 - fmr->ip4_prefix_len);
++ u8 *portp = NULL;
++ bool use_dest_addr;
++ const struct iphdr *dsth = iph;
++
++ if ((u8*)dsth >= end)
++ return;
++
++ /* find significant IP header */
++ if (iph->protocol == IPPROTO_ICMP) {
++ struct icmphdr *ih = (struct icmphdr*)(((u8*)dsth) + dsth->ihl * 4);
++ if (ih && ((u8*)&ih[1]) <= end && (
++ ih->type == ICMP_DEST_UNREACH ||
++ ih->type == ICMP_SOURCE_QUENCH ||
++ ih->type == ICMP_TIME_EXCEEDED ||
++ ih->type == ICMP_PARAMETERPROB ||
++ ih->type == ICMP_REDIRECT))
++ dsth = (const struct iphdr*)&ih[1];
++ }
++
++ /* in xmit-path use dest port by default and source port only if
++ this is an ICMP reply to something else; vice versa in rcv-path */
++ use_dest_addr = (xmit && dsth == iph) || (!xmit && dsth != iph);
++
++ /* get dst port */
++ if (((u8*)&dsth[1]) <= end && (
++ dsth->protocol == IPPROTO_UDP ||
++ dsth->protocol == IPPROTO_TCP ||
++ dsth->protocol == IPPROTO_SCTP ||
++ dsth->protocol == IPPROTO_DCCP)) {
++ /* for UDP, TCP, SCTP and DCCP source and dest port
++ follow IPv4 header directly */
++ portp = ((u8*)dsth) + dsth->ihl * 4;
++
++ if (use_dest_addr)
++ portp += sizeof(u16);
++ } else if (iph->protocol == IPPROTO_ICMP) {
++ struct icmphdr *ih = (struct icmphdr*)(((u8*)dsth) + dsth->ihl * 4);
++
++ /* use icmp identifier as port */
++ if (((u8*)&ih) <= end && (
++ (use_dest_addr && (
++ ih->type == ICMP_ECHOREPLY ||
++ ih->type == ICMP_TIMESTAMPREPLY ||
++ ih->type == ICMP_INFO_REPLY ||
++ ih->type == ICMP_ADDRESSREPLY)) ||
++ (!use_dest_addr && (
++ ih->type == ICMP_ECHO ||
++ ih->type == ICMP_TIMESTAMP ||
++ ih->type == ICMP_INFO_REQUEST ||
++ ih->type == ICMP_ADDRESS)
++ )))
++ portp = (u8*)&ih->un.echo.id;
++ }
++
++ if ((portp && &portp[2] <= end) || psidlen == 0) {
++ int frombyte = fmr->ip6_prefix_len / 8;
++ int fromrem = fmr->ip6_prefix_len % 8;
++ int bytes = sizeof(struct in6_addr) - frombyte;
++ const u32 *addr = (use_dest_addr) ? &iph->daddr : &iph->saddr;
++ u64 eabits = ((u64)ntohl(*addr)) << (32 + fmr->ip4_prefix_len);
++ u64 t = 0;
++
++ /* extract PSID from port and add it to eabits */
++ u16 psidbits = 0;
++ if (psidlen > 0) {
++ psidbits = ((u16)portp[0]) << 8 | ((u16)portp[1]);
++ psidbits >>= 16 - psidlen - fmr->offset;
++ psidbits = (u16)(psidbits << (16 - psidlen));
++ eabits |= ((u64)psidbits) << (48 - (fmr->ea_len - psidlen));
++ }
++
++ /* rewrite destination address */
++ *dest = fmr->ip6_prefix;
++ memcpy(&dest->s6_addr[10], addr, sizeof(*addr));
++ dest->s6_addr16[7] = htons(psidbits >> (16 - psidlen));
++
++ if (bytes > sizeof(u64))
++ bytes = sizeof(u64);
++
++ /* insert eabits */
++ memcpy(&t, &dest->s6_addr[frombyte], bytes);
++ t = be64_to_cpu(t) & ~(((((u64)1) << fmr->ea_len) - 1)
++ << (64 - fmr->ea_len - fromrem));
++ t = cpu_to_be64(t | (eabits >> fromrem));
++ memcpy(&dest->s6_addr[frombyte], &t, bytes);
++ }
++}
++
++
+ static int __ip6_tnl_rcv(struct ip6_tnl *tunnel, struct sk_buff *skb,
+ const struct tnl_ptk_info *tpi,
+ struct metadata_dst *tun_dst,
+@@ -824,6 +942,27 @@ static int __ip6_tnl_rcv(struct ip6_tnl
+ skb_reset_network_header(skb);
+ memset(skb->cb, 0, sizeof(struct inet6_skb_parm));
+
++ if (tpi->proto == htons(ETH_P_IP) && tunnel->parms.fmrs &&
++ !ipv6_addr_equal(&ipv6h->saddr, &tunnel->parms.raddr)) {
++ /* Packet didn't come from BR, so lookup FMR */
++ struct __ip6_tnl_fmr *fmr;
++ struct in6_addr expected = tunnel->parms.raddr;
++ for (fmr = tunnel->parms.fmrs; fmr; fmr = fmr->next)
++ if (ipv6_prefix_equal(&ipv6h->saddr,
++ &fmr->ip6_prefix, fmr->ip6_prefix_len))
++ break;
++
++ /* Check that IPv6 matches IPv4 source to prevent spoofing */
++ if (fmr)
++ ip4ip6_fmr_calc(&expected, ip_hdr(skb),
++ skb_tail_pointer(skb), fmr, false);
++
++ if (!ipv6_addr_equal(&ipv6h->saddr, &expected)) {
++ rcu_read_unlock();
++ goto drop;
++ }
++ }
++
+ __skb_tunnel_rx(skb, tunnel->dev, tunnel->net);
+
+ err = dscp_ecn_decapsulate(tunnel, ipv6h, skb);
+@@ -956,6 +1095,7 @@ static void init_tel_txopt(struct ipv6_t
+ opt->ops.opt_nflen = 8;
+ }
+
++
+ /**
+ * ip6_tnl_addr_conflict - compare packet addresses to tunnel's own
+ * @t: the outgoing tunnel device
+@@ -1308,6 +1448,7 @@ ip6ip6_tnl_xmit(struct sk_buff *skb, str
+ {
+ struct ip6_tnl *t = netdev_priv(dev);
+ struct ipv6hdr *ipv6h;
++ struct __ip6_tnl_fmr *fmr;
+ int encap_limit = -1;
+ __u16 offset;
+ struct flowi6 fl6;
+@@ -1373,6 +1514,18 @@ ip6ip6_tnl_xmit(struct sk_buff *skb, str
+ fl6.flowi6_uid = sock_net_uid(dev_net(dev), NULL);
+ dsfield = INET_ECN_encapsulate(dsfield, ipv6_get_dsfield(ipv6h));
+
++ /* try to find matching FMR */
++ for (fmr = t->parms.fmrs; fmr; fmr = fmr->next) {
++ unsigned mshift = 32 - fmr->ip4_prefix_len;
++ if (ntohl(fmr->ip4_prefix.s_addr) >> mshift ==
++ ntohl(ip_hdr(skb)->daddr) >> mshift)
++ break;
++ }
++
++ /* change dstaddr according to FMR */
++ if (fmr)
++ ip4ip6_fmr_calc(&fl6.daddr, ip_hdr(skb), skb_tail_pointer(skb), fmr, true);
++
+ if (iptunnel_handle_offloads(skb, SKB_GSO_IPXIP6))
+ return -1;
+
+@@ -1502,6 +1655,14 @@ ip6_tnl_change(struct ip6_tnl *t, const
+ t->parms.link = p->link;
+ t->parms.proto = p->proto;
+ t->parms.fwmark = p->fwmark;
++
++ while (t->parms.fmrs) {
++ struct __ip6_tnl_fmr *next = t->parms.fmrs->next;
++ kfree(t->parms.fmrs);
++ t->parms.fmrs = next;
++ }
++ t->parms.fmrs = p->fmrs;
++
+ dst_cache_reset(&t->dst_cache);
+ ip6_tnl_link_config(t);
+ return 0;
+@@ -1540,6 +1701,7 @@ ip6_tnl_parm_from_user(struct __ip6_tnl_
+ p->flowinfo = u->flowinfo;
+ p->link = u->link;
+ p->proto = u->proto;
++ p->fmrs = NULL;
+ memcpy(p->name, u->name, sizeof(u->name));
+ }
+
+@@ -1924,6 +2086,15 @@ static int ip6_tnl_validate(struct nlatt
+ return 0;
+ }
+
++static const struct nla_policy ip6_tnl_fmr_policy[IFLA_IPTUN_FMR_MAX + 1] = {
++ [IFLA_IPTUN_FMR_IP6_PREFIX] = { .len = sizeof(struct in6_addr) },
++ [IFLA_IPTUN_FMR_IP4_PREFIX] = { .len = sizeof(struct in_addr) },
++ [IFLA_IPTUN_FMR_IP6_PREFIX_LEN] = { .type = NLA_U8 },
++ [IFLA_IPTUN_FMR_IP4_PREFIX_LEN] = { .type = NLA_U8 },
++ [IFLA_IPTUN_FMR_EA_LEN] = { .type = NLA_U8 },
++ [IFLA_IPTUN_FMR_OFFSET] = { .type = NLA_U8 }
++};
++
+ static void ip6_tnl_netlink_parms(struct nlattr *data[],
+ struct __ip6_tnl_parm *parms)
+ {
+@@ -1961,6 +2132,46 @@ static void ip6_tnl_netlink_parms(struct
+
+ if (data[IFLA_IPTUN_FWMARK])
+ parms->fwmark = nla_get_u32(data[IFLA_IPTUN_FWMARK]);
++
++ if (data[IFLA_IPTUN_FMRS]) {
++ unsigned rem;
++ struct nlattr *fmr;
++ nla_for_each_nested(fmr, data[IFLA_IPTUN_FMRS], rem) {
++ struct nlattr *fmrd[IFLA_IPTUN_FMR_MAX + 1], *c;
++ struct __ip6_tnl_fmr *nfmr;
++
++ nla_parse_nested(fmrd, IFLA_IPTUN_FMR_MAX,
++ fmr, ip6_tnl_fmr_policy, NULL);
++
++ if (!(nfmr = kzalloc(sizeof(*nfmr), GFP_KERNEL)))
++ continue;
++
++ nfmr->offset = 6;
++
++ if ((c = fmrd[IFLA_IPTUN_FMR_IP6_PREFIX]))
++ nla_memcpy(&nfmr->ip6_prefix, fmrd[IFLA_IPTUN_FMR_IP6_PREFIX],
++ sizeof(nfmr->ip6_prefix));
++
++ if ((c = fmrd[IFLA_IPTUN_FMR_IP4_PREFIX]))
++ nla_memcpy(&nfmr->ip4_prefix, fmrd[IFLA_IPTUN_FMR_IP4_PREFIX],
++ sizeof(nfmr->ip4_prefix));
++
++ if ((c = fmrd[IFLA_IPTUN_FMR_IP6_PREFIX_LEN]))
++ nfmr->ip6_prefix_len = nla_get_u8(c);
++
++ if ((c = fmrd[IFLA_IPTUN_FMR_IP4_PREFIX_LEN]))
++ nfmr->ip4_prefix_len = nla_get_u8(c);
++
++ if ((c = fmrd[IFLA_IPTUN_FMR_EA_LEN]))
++ nfmr->ea_len = nla_get_u8(c);
++
++ if ((c = fmrd[IFLA_IPTUN_FMR_OFFSET]))
++ nfmr->offset = nla_get_u8(c);
++
++ nfmr->next = parms->fmrs;
++ parms->fmrs = nfmr;
++ }
++ }
+ }
+
+ static bool ip6_tnl_netlink_encap_parms(struct nlattr *data[],
+@@ -2076,6 +2287,12 @@ static void ip6_tnl_dellink(struct net_d
+
+ static size_t ip6_tnl_get_size(const struct net_device *dev)
+ {
++ const struct ip6_tnl *t = netdev_priv(dev);
++ struct __ip6_tnl_fmr *c;
++ int fmrs = 0;
++ for (c = t->parms.fmrs; c; c = c->next)
++ ++fmrs;
++
+ return
+ /* IFLA_IPTUN_LINK */
+ nla_total_size(4) +
+@@ -2105,6 +2322,24 @@ static size_t ip6_tnl_get_size(const str
+ nla_total_size(0) +
+ /* IFLA_IPTUN_FWMARK */
+ nla_total_size(4) +
++ /* IFLA_IPTUN_FMRS */
++ nla_total_size(0) +
++ (
++ /* nest */
++ nla_total_size(0) +
++ /* IFLA_IPTUN_FMR_IP6_PREFIX */
++ nla_total_size(sizeof(struct in6_addr)) +
++ /* IFLA_IPTUN_FMR_IP4_PREFIX */
++ nla_total_size(sizeof(struct in_addr)) +
++ /* IFLA_IPTUN_FMR_EA_LEN */
++ nla_total_size(1) +
++ /* IFLA_IPTUN_FMR_IP6_PREFIX_LEN */
++ nla_total_size(1) +
++ /* IFLA_IPTUN_FMR_IP4_PREFIX_LEN */
++ nla_total_size(1) +
++ /* IFLA_IPTUN_FMR_OFFSET */
++ nla_total_size(1)
++ ) * fmrs +
+ 0;
+ }
+
+@@ -2112,6 +2347,9 @@ static int ip6_tnl_fill_info(struct sk_b
+ {
+ struct ip6_tnl *tunnel = netdev_priv(dev);
+ struct __ip6_tnl_parm *parm = &tunnel->parms;
++ struct __ip6_tnl_fmr *c;
++ int fmrcnt = 0;
++ struct nlattr *fmrs;
+
+ if (nla_put_u32(skb, IFLA_IPTUN_LINK, parm->link) ||
+ nla_put_in6_addr(skb, IFLA_IPTUN_LOCAL, &parm->laddr) ||
+@@ -2121,9 +2359,27 @@ static int ip6_tnl_fill_info(struct sk_b
+ nla_put_be32(skb, IFLA_IPTUN_FLOWINFO, parm->flowinfo) ||
+ nla_put_u32(skb, IFLA_IPTUN_FLAGS, parm->flags) ||
+ nla_put_u8(skb, IFLA_IPTUN_PROTO, parm->proto) ||
+- nla_put_u32(skb, IFLA_IPTUN_FWMARK, parm->fwmark))
++ nla_put_u32(skb, IFLA_IPTUN_FWMARK, parm->fwmark) ||
++ !(fmrs = nla_nest_start(skb, IFLA_IPTUN_FMRS)))
+ goto nla_put_failure;
+
++ for (c = parm->fmrs; c; c = c->next) {
++ struct nlattr *fmr = nla_nest_start(skb, ++fmrcnt);
++ if (!fmr ||
++ nla_put(skb, IFLA_IPTUN_FMR_IP6_PREFIX,
++ sizeof(c->ip6_prefix), &c->ip6_prefix) ||
++ nla_put(skb, IFLA_IPTUN_FMR_IP4_PREFIX,
++ sizeof(c->ip4_prefix), &c->ip4_prefix) ||
++ nla_put_u8(skb, IFLA_IPTUN_FMR_IP6_PREFIX_LEN, c->ip6_prefix_len) ||
++ nla_put_u8(skb, IFLA_IPTUN_FMR_IP4_PREFIX_LEN, c->ip4_prefix_len) ||
++ nla_put_u8(skb, IFLA_IPTUN_FMR_EA_LEN, c->ea_len) ||
++ nla_put_u8(skb, IFLA_IPTUN_FMR_OFFSET, c->offset))
++ goto nla_put_failure;
++
++ nla_nest_end(skb, fmr);
++ }
++ nla_nest_end(skb, fmrs);
++
+ if (nla_put_u16(skb, IFLA_IPTUN_ENCAP_TYPE, tunnel->encap.type) ||
+ nla_put_be16(skb, IFLA_IPTUN_ENCAP_SPORT, tunnel->encap.sport) ||
+ nla_put_be16(skb, IFLA_IPTUN_ENCAP_DPORT, tunnel->encap.dport) ||
+@@ -2163,6 +2419,7 @@ static const struct nla_policy ip6_tnl_p
+ [IFLA_IPTUN_ENCAP_DPORT] = { .type = NLA_U16 },
+ [IFLA_IPTUN_COLLECT_METADATA] = { .type = NLA_FLAG },
+ [IFLA_IPTUN_FWMARK] = { .type = NLA_U32 },
++ [IFLA_IPTUN_FMRS] = { .type = NLA_NESTED },
+ };
+
+ static struct rtnl_link_ops ip6_link_ops __read_mostly = {
diff --git a/target/linux/generic/pending-5.4/670-ipv6-allow-rejecting-with-source-address-failed-policy.patch b/target/linux/generic/pending-5.4/670-ipv6-allow-rejecting-with-source-address-failed-policy.patch
new file mode 100644
index 0000000000..c35aca8a18
--- /dev/null
+++ b/target/linux/generic/pending-5.4/670-ipv6-allow-rejecting-with-source-address-failed-policy.patch
@@ -0,0 +1,247 @@
+From: Jonas Gorski <jogo@openwrt.org>
+Subject: ipv6: allow rejecting with "source address failed policy"
+
+RFC6204 L-14 requires rejecting traffic from invalid addresses with
+ICMPv6 Destination Unreachable, Code 5 (Source address failed ingress/
+egress policy) on the LAN side, so add an appropriate rule for that.
+
+Signed-off-by: Jonas Gorski <jogo@openwrt.org>
+---
+ include/net/netns/ipv6.h | 1 +
+ include/uapi/linux/fib_rules.h | 4 +++
+ include/uapi/linux/rtnetlink.h | 1 +
+ net/ipv4/fib_semantics.c | 4 +++
+ net/ipv4/fib_trie.c | 1 +
+ net/ipv4/ipmr.c | 1 +
+ net/ipv6/fib6_rules.c | 4 +++
+ net/ipv6/ip6mr.c | 2 ++
+ net/ipv6/route.c | 58 +++++++++++++++++++++++++++++++++++++++++-
+ 9 files changed, 75 insertions(+), 1 deletion(-)
+
+--- a/include/net/netns/ipv6.h
++++ b/include/net/netns/ipv6.h
+@@ -78,6 +78,7 @@ struct netns_ipv6 {
+ unsigned int fib6_rules_require_fldissect;
+ bool fib6_has_custom_rules;
+ struct rt6_info *ip6_prohibit_entry;
++ struct rt6_info *ip6_policy_failed_entry;
+ struct rt6_info *ip6_blk_hole_entry;
+ struct fib6_table *fib6_local_tbl;
+ struct fib_rules_ops *fib6_rules_ops;
+--- a/include/uapi/linux/fib_rules.h
++++ b/include/uapi/linux/fib_rules.h
+@@ -82,6 +82,10 @@ enum {
+ FR_ACT_BLACKHOLE, /* Drop without notification */
+ FR_ACT_UNREACHABLE, /* Drop with ENETUNREACH */
+ FR_ACT_PROHIBIT, /* Drop with EACCES */
++ FR_ACT_RES9,
++ FR_ACT_RES10,
++ FR_ACT_RES11,
++ FR_ACT_POLICY_FAILED, /* Drop with EACCES */
+ __FR_ACT_MAX,
+ };
+
+--- a/include/uapi/linux/rtnetlink.h
++++ b/include/uapi/linux/rtnetlink.h
+@@ -228,6 +228,7 @@ enum {
+ RTN_THROW, /* Not in this table */
+ RTN_NAT, /* Translate this address */
+ RTN_XRESOLVE, /* Use external resolver */
++ RTN_POLICY_FAILED, /* Failed ingress/egress policy */
+ __RTN_MAX
+ };
+
+--- a/net/ipv4/fib_semantics.c
++++ b/net/ipv4/fib_semantics.c
+@@ -139,6 +139,10 @@ const struct fib_prop fib_props[RTN_MAX
+ .error = -EINVAL,
+ .scope = RT_SCOPE_NOWHERE,
+ },
++ [RTN_POLICY_FAILED] = {
++ .error = -EACCES,
++ .scope = RT_SCOPE_UNIVERSE,
++ },
+ };
+
+ static void rt_fibinfo_free(struct rtable __rcu **rtp)
+--- a/net/ipv4/fib_trie.c
++++ b/net/ipv4/fib_trie.c
+@@ -2474,6 +2474,7 @@ static const char *const rtn_type_names[
+ [RTN_THROW] = "THROW",
+ [RTN_NAT] = "NAT",
+ [RTN_XRESOLVE] = "XRESOLVE",
++ [RTN_POLICY_FAILED] = "POLICY_FAILED",
+ };
+
+ static inline const char *rtn_type(char *buf, size_t len, unsigned int t)
+--- a/net/ipv4/ipmr.c
++++ b/net/ipv4/ipmr.c
+@@ -179,6 +179,7 @@ static int ipmr_rule_action(struct fib_r
+ case FR_ACT_UNREACHABLE:
+ return -ENETUNREACH;
+ case FR_ACT_PROHIBIT:
++ case FR_ACT_POLICY_FAILED:
+ return -EACCES;
+ case FR_ACT_BLACKHOLE:
+ default:
+--- a/net/ipv6/fib6_rules.c
++++ b/net/ipv6/fib6_rules.c
+@@ -221,6 +221,10 @@ static int __fib6_rule_action(struct fib
+ err = -EACCES;
+ rt = net->ipv6.ip6_prohibit_entry;
+ goto discard_pkt;
++ case FR_ACT_POLICY_FAILED:
++ err = -EACCES;
++ rt = net->ipv6.ip6_policy_failed_entry;
++ goto discard_pkt;
+ }
+
+ tb_id = fib_rule_get_table(rule, arg);
+--- a/net/ipv6/ip6mr.c
++++ b/net/ipv6/ip6mr.c
+@@ -162,6 +162,8 @@ static int ip6mr_rule_action(struct fib_
+ return -ENETUNREACH;
+ case FR_ACT_PROHIBIT:
+ return -EACCES;
++ case FR_ACT_POLICY_FAILED:
++ return -EACCES;
+ case FR_ACT_BLACKHOLE:
+ default:
+ return -EINVAL;
+--- a/net/ipv6/route.c
++++ b/net/ipv6/route.c
+@@ -97,6 +97,8 @@ static int ip6_pkt_discard(struct sk_bu
+ static int ip6_pkt_discard_out(struct net *net, struct sock *sk, struct sk_buff *skb);
+ static int ip6_pkt_prohibit(struct sk_buff *skb);
+ static int ip6_pkt_prohibit_out(struct net *net, struct sock *sk, struct sk_buff *skb);
++static int ip6_pkt_policy_failed(struct sk_buff *skb);
++static int ip6_pkt_policy_failed_out(struct net *net, struct sock *sk, struct sk_buff *skb);
+ static void ip6_link_failure(struct sk_buff *skb);
+ static void ip6_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
+ struct sk_buff *skb, u32 mtu,
+@@ -328,6 +330,18 @@ static const struct rt6_info ip6_prohibi
+ .rt6i_flags = (RTF_REJECT | RTF_NONEXTHOP),
+ };
+
++static const struct rt6_info ip6_policy_failed_entry_template = {
++ .dst = {
++ .__refcnt = ATOMIC_INIT(1),
++ .__use = 1,
++ .obsolete = DST_OBSOLETE_FORCE_CHK,
++ .error = -EACCES,
++ .input = ip6_pkt_policy_failed,
++ .output = ip6_pkt_policy_failed_out,
++ },
++ .rt6i_flags = (RTF_REJECT | RTF_NONEXTHOP),
++};
++
+ static const struct rt6_info ip6_blk_hole_entry_template = {
+ .dst = {
+ .__refcnt = ATOMIC_INIT(1),
+@@ -906,6 +920,7 @@ static const int fib6_prop[RTN_MAX + 1]
+ [RTN_BLACKHOLE] = -EINVAL,
+ [RTN_UNREACHABLE] = -EHOSTUNREACH,
+ [RTN_PROHIBIT] = -EACCES,
++ [RTN_POLICY_FAILED] = -EACCES,
+ [RTN_THROW] = -EAGAIN,
+ [RTN_NAT] = -EINVAL,
+ [RTN_XRESOLVE] = -EINVAL,
+@@ -943,6 +958,10 @@ static void ip6_rt_init_dst_reject(struc
+ rt->dst.output = ip6_pkt_prohibit_out;
+ rt->dst.input = ip6_pkt_prohibit;
+ break;
++ case RTN_POLICY_FAILED:
++ rt->dst.output = ip6_pkt_policy_failed_out;
++ rt->dst.input = ip6_pkt_policy_failed;
++ break;
+ case RTN_THROW:
+ case RTN_UNREACHABLE:
+ default:
+@@ -3789,6 +3808,17 @@ static int ip6_pkt_prohibit_out(struct n
+ return ip6_pkt_drop(skb, ICMPV6_ADM_PROHIBITED, IPSTATS_MIB_OUTNOROUTES);
+ }
+
++static int ip6_pkt_policy_failed(struct sk_buff *skb)
++{
++ return ip6_pkt_drop(skb, ICMPV6_POLICY_FAIL, IPSTATS_MIB_INNOROUTES);
++}
++
++static int ip6_pkt_policy_failed_out(struct net *net, struct sock *sk, struct sk_buff *skb)
++{
++ skb->dev = skb_dst(skb)->dev;
++ return ip6_pkt_drop(skb, ICMPV6_POLICY_FAIL, IPSTATS_MIB_OUTNOROUTES);
++}
++
+ /*
+ * Allocate a dst for local (unicast / anycast) address.
+ */
+@@ -4236,7 +4266,8 @@ static int rtm_to_fib6_config(struct sk_
+ if (rtm->rtm_type == RTN_UNREACHABLE ||
+ rtm->rtm_type == RTN_BLACKHOLE ||
+ rtm->rtm_type == RTN_PROHIBIT ||
+- rtm->rtm_type == RTN_THROW)
++ rtm->rtm_type == RTN_THROW ||
++ rtm->rtm_type == RTN_POLICY_FAILED)
+ cfg->fc_flags |= RTF_REJECT;
+
+ if (rtm->rtm_type == RTN_LOCAL)
+@@ -5084,6 +5115,8 @@ static int ip6_route_dev_notify(struct n
+ #ifdef CONFIG_IPV6_MULTIPLE_TABLES
+ net->ipv6.ip6_prohibit_entry->dst.dev = dev;
+ net->ipv6.ip6_prohibit_entry->rt6i_idev = in6_dev_get(dev);
++ net->ipv6.ip6_policy_failed_entry->dst.dev = dev;
++ net->ipv6.ip6_policy_failed_entry->rt6i_idev = in6_dev_get(dev);
+ net->ipv6.ip6_blk_hole_entry->dst.dev = dev;
+ net->ipv6.ip6_blk_hole_entry->rt6i_idev = in6_dev_get(dev);
+ #endif
+@@ -5095,6 +5128,7 @@ static int ip6_route_dev_notify(struct n
+ in6_dev_put_clear(&net->ipv6.ip6_null_entry->rt6i_idev);
+ #ifdef CONFIG_IPV6_MULTIPLE_TABLES
+ in6_dev_put_clear(&net->ipv6.ip6_prohibit_entry->rt6i_idev);
++ in6_dev_put_clear(&net->ipv6.ip6_policy_failed_entry->rt6i_idev);
+ in6_dev_put_clear(&net->ipv6.ip6_blk_hole_entry->rt6i_idev);
+ #endif
+ }
+@@ -5289,6 +5323,15 @@ static int __net_init ip6_route_net_init
+ net->ipv6.ip6_blk_hole_entry->dst.ops = &net->ipv6.ip6_dst_ops;
+ dst_init_metrics(&net->ipv6.ip6_blk_hole_entry->dst,
+ ip6_template_metrics, true);
++
++ net->ipv6.ip6_policy_failed_entry =
++ kmemdup(&ip6_policy_failed_entry_template,
++ sizeof(*net->ipv6.ip6_policy_failed_entry), GFP_KERNEL);
++ if (!net->ipv6.ip6_policy_failed_entry)
++ goto out_ip6_blk_hole_entry;
++ net->ipv6.ip6_policy_failed_entry->dst.ops = &net->ipv6.ip6_dst_ops;
++ dst_init_metrics(&net->ipv6.ip6_policy_failed_entry->dst,
++ ip6_template_metrics, true);
+ #endif
+
+ net->ipv6.sysctl.flush_delay = 0;
+@@ -5307,6 +5350,8 @@ out:
+ return ret;
+
+ #ifdef CONFIG_IPV6_MULTIPLE_TABLES
++out_ip6_blk_hole_entry:
++ kfree(net->ipv6.ip6_blk_hole_entry);
+ out_ip6_prohibit_entry:
+ kfree(net->ipv6.ip6_prohibit_entry);
+ out_ip6_null_entry:
+@@ -5327,6 +5372,7 @@ static void __net_exit ip6_route_net_exi
+ #ifdef CONFIG_IPV6_MULTIPLE_TABLES
+ kfree(net->ipv6.ip6_prohibit_entry);
+ kfree(net->ipv6.ip6_blk_hole_entry);
++ kfree(net->ipv6.ip6_policy_failed_entry);
+ #endif
+ dst_entries_destroy(&net->ipv6.ip6_dst_ops);
+ }
+@@ -5403,6 +5449,9 @@ void __init ip6_route_init_special_entri
+ init_net.ipv6.ip6_prohibit_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev);
+ init_net.ipv6.ip6_blk_hole_entry->dst.dev = init_net.loopback_dev;
+ init_net.ipv6.ip6_blk_hole_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev);
++ init_net.ipv6.ip6_policy_failed_entry->dst.dev = init_net.loopback_dev;
++ init_net.ipv6.ip6_policy_failed_entry->rt6i_idev =
++ in6_dev_get(init_net.loopback_dev);
+ #endif
+ }
+
diff --git a/target/linux/generic/pending-5.4/671-net-provide-defines-for-_POLICY_FAILED-until-all-cod.patch b/target/linux/generic/pending-5.4/671-net-provide-defines-for-_POLICY_FAILED-until-all-cod.patch
new file mode 100644
index 0000000000..85adfedc94
--- /dev/null
+++ b/target/linux/generic/pending-5.4/671-net-provide-defines-for-_POLICY_FAILED-until-all-cod.patch
@@ -0,0 +1,50 @@
+From: Jonas Gorski <jogo@openwrt.org>
+Subject: net: provide defines for _POLICY_FAILED until all code is updated
+
+Upstream introduced ICMPV6_POLICY_FAIL for code 5 of destination
+unreachable, conflicting with our name.
+
+Add appropriate defines to allow our code to build with the new
+name until we have updated our local patches for older kernels
+and userspace packages.
+
+Signed-off-by: Jonas Gorski <jogo@openwrt.org>
+---
+ include/uapi/linux/fib_rules.h | 2 ++
+ include/uapi/linux/icmpv6.h | 2 ++
+ include/uapi/linux/rtnetlink.h | 2 ++
+ 3 files changed, 6 insertions(+)
+
+--- a/include/uapi/linux/fib_rules.h
++++ b/include/uapi/linux/fib_rules.h
+@@ -89,6 +89,8 @@ enum {
+ __FR_ACT_MAX,
+ };
+
++#define FR_ACT_FAILED_POLICY FR_ACT_POLICY_FAILED
++
+ #define FR_ACT_MAX (__FR_ACT_MAX - 1)
+
+ #endif
+--- a/include/uapi/linux/icmpv6.h
++++ b/include/uapi/linux/icmpv6.h
+@@ -119,6 +119,8 @@ struct icmp6hdr {
+ #define ICMPV6_POLICY_FAIL 5
+ #define ICMPV6_REJECT_ROUTE 6
+
++#define ICMPV6_FAILED_POLICY ICMPV6_POLICY_FAIL
++
+ /*
+ * Codes for Time Exceeded
+ */
+--- a/include/uapi/linux/rtnetlink.h
++++ b/include/uapi/linux/rtnetlink.h
+@@ -232,6 +232,8 @@ enum {
+ __RTN_MAX
+ };
+
++#define RTN_FAILED_POLICY RTN_POLICY_FAILED
++
+ #define RTN_MAX (__RTN_MAX - 1)
+
+
diff --git a/target/linux/generic/pending-5.4/680-NET-skip-GRO-for-foreign-MAC-addresses.patch b/target/linux/generic/pending-5.4/680-NET-skip-GRO-for-foreign-MAC-addresses.patch
new file mode 100644
index 0000000000..61cc4e830a
--- /dev/null
+++ b/target/linux/generic/pending-5.4/680-NET-skip-GRO-for-foreign-MAC-addresses.patch
@@ -0,0 +1,152 @@
+From: Felix Fietkau <nbd@nbd.name>
+Subject: net: replace GRO optimization patch with a new one that supports VLANs/bridges with different MAC addresses
+
+Signed-off-by: Felix Fietkau <nbd@nbd.name>
+---
+ include/linux/netdevice.h | 2 ++
+ include/linux/skbuff.h | 3 ++-
+ net/core/dev.c | 48 +++++++++++++++++++++++++++++++++++++++++++++++
+ net/ethernet/eth.c | 18 +++++++++++++++++-
+ 4 files changed, 69 insertions(+), 2 deletions(-)
+
+--- a/include/linux/netdevice.h
++++ b/include/linux/netdevice.h
+@@ -1891,6 +1891,8 @@ struct net_device {
+ struct netdev_hw_addr_list mc;
+ struct netdev_hw_addr_list dev_addrs;
+
++ unsigned char local_addr_mask[MAX_ADDR_LEN];
++
+ #ifdef CONFIG_SYSFS
+ struct kset *queues_kset;
+ #endif
+--- a/include/linux/skbuff.h
++++ b/include/linux/skbuff.h
+@@ -799,6 +799,7 @@ struct sk_buff {
+ #ifdef CONFIG_TLS_DEVICE
+ __u8 decrypted:1;
+ #endif
++ __u8 gro_skip:1;
+
+ #ifdef CONFIG_NET_SCHED
+ __u16 tc_index; /* traffic control index */
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -5454,6 +5454,9 @@ static enum gro_result dev_gro_receive(s
+ int same_flow;
+ int grow;
+
++ if (skb->gro_skip)
++ goto normal;
++
+ if (netif_elide_gro(skb->dev))
+ goto normal;
+
+@@ -7112,6 +7115,48 @@ static void __netdev_adjacent_dev_unlink
+ &upper_dev->adj_list.lower);
+ }
+
++static void __netdev_addr_mask(unsigned char *mask, const unsigned char *addr,
++ struct net_device *dev)
++{
++ int i;
++
++ for (i = 0; i < dev->addr_len; i++)
++ mask[i] |= addr[i] ^ dev->dev_addr[i];
++}
++
++static void __netdev_upper_mask(unsigned char *mask, struct net_device *dev,
++ struct net_device *lower)
++{
++ struct net_device *cur;
++ struct list_head *iter;
++
++ netdev_for_each_upper_dev_rcu(dev, cur, iter) {
++ __netdev_addr_mask(mask, cur->dev_addr, lower);
++ __netdev_upper_mask(mask, cur, lower);
++ }
++}
++
++static void __netdev_update_addr_mask(struct net_device *dev)
++{
++ unsigned char mask[MAX_ADDR_LEN];
++ struct net_device *cur;
++ struct list_head *iter;
++
++ memset(mask, 0, sizeof(mask));
++ __netdev_upper_mask(mask, dev, dev);
++ memcpy(dev->local_addr_mask, mask, dev->addr_len);
++
++ netdev_for_each_lower_dev(dev, cur, iter)
++ __netdev_update_addr_mask(cur);
++}
++
++static void netdev_update_addr_mask(struct net_device *dev)
++{
++ rcu_read_lock();
++ __netdev_update_addr_mask(dev);
++ rcu_read_unlock();
++}
++
+ static int __netdev_upper_dev_link(struct net_device *dev,
+ struct net_device *upper_dev, bool master,
+ void *upper_priv, void *upper_info,
+@@ -7162,6 +7207,7 @@ static int __netdev_upper_dev_link(struc
+ if (ret)
+ return ret;
+
++ netdev_update_addr_mask(dev);
+ ret = call_netdevice_notifiers_info(NETDEV_CHANGEUPPER,
+ &changeupper_info.info);
+ ret = notifier_to_errno(ret);
+@@ -7254,6 +7300,7 @@ void netdev_upper_dev_unlink(struct net_
+
+ __netdev_adjacent_dev_unlink_neighbour(dev, upper_dev);
+
++ netdev_update_addr_mask(dev);
+ call_netdevice_notifiers_info(NETDEV_CHANGEUPPER,
+ &changeupper_info.info);
+
+@@ -7893,6 +7940,7 @@ int dev_set_mac_address(struct net_devic
+ if (err)
+ return err;
+ dev->addr_assign_type = NET_ADDR_SET;
++ netdev_update_addr_mask(dev);
+ call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
+ add_device_randomness(dev->dev_addr, dev->addr_len);
+ return 0;
+--- a/net/ethernet/eth.c
++++ b/net/ethernet/eth.c
+@@ -144,6 +144,18 @@ u32 eth_get_headlen(void *data, unsigned
+ }
+ EXPORT_SYMBOL(eth_get_headlen);
+
++static inline bool
++eth_check_local_mask(const void *addr1, const void *addr2, const void *mask)
++{
++ const u16 *a1 = addr1;
++ const u16 *a2 = addr2;
++ const u16 *m = mask;
++
++ return (((a1[0] ^ a2[0]) & ~m[0]) |
++ ((a1[1] ^ a2[1]) & ~m[1]) |
++ ((a1[2] ^ a2[2]) & ~m[2]));
++}
++
+ /**
+ * eth_type_trans - determine the packet's protocol ID.
+ * @skb: received socket data
+@@ -172,8 +184,12 @@ __be16 eth_type_trans(struct sk_buff *sk
+ skb->pkt_type = PACKET_MULTICAST;
+ }
+ else if (unlikely(!ether_addr_equal_64bits(eth->h_dest,
+- dev->dev_addr)))
++ dev->dev_addr))) {
+ skb->pkt_type = PACKET_OTHERHOST;
++ if (eth_check_local_mask(eth->h_dest, dev->dev_addr,
++ dev->local_addr_mask))
++ skb->gro_skip = 1;
++ }
+
+ /*
+ * Some variants of DSA tagging don't have an ethertype field
diff --git a/target/linux/generic/pending-5.4/681-NET-add-of_get_mac_address_mtd.patch b/target/linux/generic/pending-5.4/681-NET-add-of_get_mac_address_mtd.patch
new file mode 100644
index 0000000000..13f5640590
--- /dev/null
+++ b/target/linux/generic/pending-5.4/681-NET-add-of_get_mac_address_mtd.patch
@@ -0,0 +1,133 @@
+From: John Crispin <blogic@openwrt.org>
+Subject: NET: add mtd-mac-address support to of_get_mac_address()
+
+Many embedded devices have information such as mac addresses stored inside mtd
+devices. This patch allows us to add a property inside a node describing a
+network interface. The new property points at a mtd partition with an offset
+where the mac address can be found.
+
+Signed-off-by: John Crispin <blogic@openwrt.org>
+Signed-off-by: Felix Fietkau <nbd@nbd.name>
+---
+ drivers/of/of_net.c | 37 +++++++++++++++++++++++++++++++++++++
+ include/linux/of_net.h | 1 +
+ 2 files changed, 38 insertions(+)
+
+--- a/drivers/of/of_net.c
++++ b/drivers/of/of_net.c
+@@ -11,6 +11,7 @@
+ #include <linux/of_net.h>
+ #include <linux/phy.h>
+ #include <linux/export.h>
++#include <linux/mtd/mtd.h>
+
+ /**
+ * of_get_phy_mode - Get phy mode for given device_node
+@@ -39,7 +40,7 @@ int of_get_phy_mode(struct device_node *
+ }
+ EXPORT_SYMBOL_GPL(of_get_phy_mode);
+
+-static const void *of_get_mac_addr(struct device_node *np, const char *name)
++static void *of_get_mac_addr(struct device_node *np, const char *name)
+ {
+ struct property *pp = of_find_property(np, name, NULL);
+
+@@ -48,6 +49,79 @@ static const void *of_get_mac_addr(struc
+ return NULL;
+ }
+
++static const void *of_get_mac_address_mtd(struct device_node *np)
++{
++#ifdef CONFIG_MTD
++ struct device_node *mtd_np = NULL;
++ struct property *prop;
++ size_t retlen;
++ int size, ret;
++ struct mtd_info *mtd;
++ const char *part;
++ const __be32 *list;
++ phandle phandle;
++ u32 mac_inc = 0;
++ u8 mac[ETH_ALEN];
++ void *addr;
++ u32 inc_idx;
++
++ list = of_get_property(np, "mtd-mac-address", &size);
++ if (!list || (size != (2 * sizeof(*list))))
++ return NULL;
++
++ phandle = be32_to_cpup(list++);
++ if (phandle)
++ mtd_np = of_find_node_by_phandle(phandle);
++
++ if (!mtd_np)
++ return NULL;
++
++ part = of_get_property(mtd_np, "label", NULL);
++ if (!part)
++ part = mtd_np->name;
++
++ mtd = get_mtd_device_nm(part);
++ if (IS_ERR(mtd))
++ return NULL;
++
++ ret = mtd_read(mtd, be32_to_cpup(list), 6, &retlen, mac);
++ put_mtd_device(mtd);
++
++ if (of_property_read_u32(np, "mtd-mac-address-increment-byte", &inc_idx))
++ inc_idx = 5;
++ if (inc_idx > 5)
++ return NULL;
++
++ if (!of_property_read_u32(np, "mtd-mac-address-increment", &mac_inc))
++ mac[inc_idx] += mac_inc;
++
++ if (!is_valid_ether_addr(mac))
++ return NULL;
++
++ addr = of_get_mac_addr(np, "mac-address");
++ if (addr) {
++ memcpy(addr, mac, ETH_ALEN);
++ return addr;
++ }
++
++ prop = kzalloc(sizeof(*prop), GFP_KERNEL);
++ if (!prop)
++ return NULL;
++
++ prop->name = "mac-address";
++ prop->length = ETH_ALEN;
++ prop->value = kmemdup(mac, ETH_ALEN, GFP_KERNEL);
++ if (!prop->value || of_add_property(np, prop))
++ goto free;
++
++ return prop->value;
++free:
++ kfree(prop->value);
++ kfree(prop);
++#endif
++ return NULL;
++}
++
+ /**
+ * Search the device tree for the best MAC address to use. 'mac-address' is
+ * checked first, because that is supposed to contain to "most recent" MAC
+@@ -65,11 +139,18 @@ static const void *of_get_mac_addr(struc
+ * addresses. Some older U-Boots only initialized 'local-mac-address'. In
+ * this case, the real MAC is in 'local-mac-address', and 'mac-address' exists
+ * but is all zeros.
++ *
++ * If a mtd-mac-address property exists, try to fetch the MAC address from the
++ * specified mtd device, and store it as a 'mac-address' property
+ */
+ const void *of_get_mac_address(struct device_node *np)
+ {
+ const void *addr;
+
++ addr = of_get_mac_address_mtd(np);
++ if (addr)
++ return addr;
++
+ addr = of_get_mac_addr(np, "mac-address");
+ if (addr)
+ return addr;
diff --git a/target/linux/generic/pending-5.4/703-phy-add-detach-callback-to-struct-phy_driver.patch b/target/linux/generic/pending-5.4/703-phy-add-detach-callback-to-struct-phy_driver.patch
new file mode 100644
index 0000000000..1c180b225b
--- /dev/null
+++ b/target/linux/generic/pending-5.4/703-phy-add-detach-callback-to-struct-phy_driver.patch
@@ -0,0 +1,38 @@
+From: Gabor Juhos <juhosg@openwrt.org>
+Subject: generic: add detach callback to struct phy_driver
+
+lede-commit: fe61fc2d7d0b3fb348b502f68f98243b3ddf5867
+
+Signed-off-by: Gabor Juhos <juhosg@openwrt.org>
+---
+ drivers/net/phy/phy_device.c | 3 +++
+ include/linux/phy.h | 6 ++++++
+ 2 files changed, 9 insertions(+)
+
+--- a/drivers/net/phy/phy_device.c
++++ b/drivers/net/phy/phy_device.c
+@@ -1201,6 +1201,9 @@ void phy_detach(struct phy_device *phyde
+ struct module *ndev_owner = dev->dev.parent->driver->owner;
+ struct mii_bus *bus;
+
++ if (phydev->drv && phydev->drv->detach)
++ phydev->drv->detach(phydev);
++
+ if (phydev->sysfs_links) {
+ sysfs_remove_link(&dev->dev.kobj, "phydev");
+ sysfs_remove_link(&phydev->mdio.dev.kobj, "attached_dev");
+--- a/include/linux/phy.h
++++ b/include/linux/phy.h
+@@ -567,6 +567,12 @@ struct phy_driver {
+ */
+ int (*did_interrupt)(struct phy_device *phydev);
+
++ /*
++ * Called before an ethernet device is detached
++ * from the PHY.
++ */
++ void (*detach)(struct phy_device *phydev);
++
+ /* Clears up any memory if needed */
+ void (*remove)(struct phy_device *phydev);
+
diff --git a/target/linux/generic/pending-5.4/735-net-phy-at803x-fix-at8033-sgmii-mode.patch b/target/linux/generic/pending-5.4/735-net-phy-at803x-fix-at8033-sgmii-mode.patch
new file mode 100644
index 0000000000..5c00b8781f
--- /dev/null
+++ b/target/linux/generic/pending-5.4/735-net-phy-at803x-fix-at8033-sgmii-mode.patch
@@ -0,0 +1,51 @@
+From: Roman Yeryomin <roman@advem.lv>
+Subject: kernel: add at803x fix for sgmii mode
+
+Some (possibly broken) bootloaders incorreclty initialize at8033
+phy. This patch enables sgmii autonegotiation mode.
+
+[john@phrozen.org: felix added this to his upstream queue]
+
+Signed-off-by: Roman Yeryomin <roman@advem.lv>
+---
+ drivers/net/phy/at803x.c | 25 +++++++++++++++++++++++++
+ 1 file changed, 25 insertions(+)
+
+--- a/drivers/net/phy/at803x.c
++++ b/drivers/net/phy/at803x.c
+@@ -44,6 +44,7 @@
+ #define AT803X_FUNC_DATA 0x4003
+ #define AT803X_REG_CHIP_CONFIG 0x1f
+ #define AT803X_BT_BX_REG_SEL 0x8000
++#define AT803X_SGMII_ANEG_EN 0x1000
+
+ #define AT803X_DEBUG_ADDR 0x1D
+ #define AT803X_DEBUG_DATA 0x1E
+@@ -252,6 +253,27 @@ static int at803x_probe(struct phy_devic
+ static int at803x_config_init(struct phy_device *phydev)
+ {
+ int ret;
++ u32 v;
++
++ if (phydev->drv->phy_id == ATH8031_PHY_ID &&
++ phydev->interface == PHY_INTERFACE_MODE_SGMII)
++ {
++ v = phy_read(phydev, AT803X_REG_CHIP_CONFIG);
++ /* select SGMII/fiber page */
++ ret = phy_write(phydev, AT803X_REG_CHIP_CONFIG,
++ v & ~AT803X_BT_BX_REG_SEL);
++ if (ret)
++ return ret;
++ /* enable SGMII autonegotiation */
++ ret = phy_write(phydev, MII_BMCR, AT803X_SGMII_ANEG_EN);
++ if (ret)
++ return ret;
++ /* select copper page */
++ ret = phy_write(phydev, AT803X_REG_CHIP_CONFIG,
++ v | AT803X_BT_BX_REG_SEL);
++ if (ret)
++ return ret;
++ }
+
+ ret = genphy_config_init(phydev);
+ if (ret < 0)
diff --git a/target/linux/generic/pending-5.4/739-net-avoid-tx-fault-with-Nokia-GPON-module.patch b/target/linux/generic/pending-5.4/739-net-avoid-tx-fault-with-Nokia-GPON-module.patch
new file mode 100644
index 0000000000..b540e1dbc1
--- /dev/null
+++ b/target/linux/generic/pending-5.4/739-net-avoid-tx-fault-with-Nokia-GPON-module.patch
@@ -0,0 +1,108 @@
+From 283b211aa01bdae94dffb3121655dbb20bf237f4 Mon Sep 17 00:00:00 2001
+From: Russell King <rmk+kernel@armlinux.org.uk>
+Date: Tue, 3 Dec 2019 15:22:05 +0000
+Subject: net: sfp: avoid tx-fault with Nokia GPON module
+
+The Nokia GPON module can hold tx-fault active while it is initialising
+which can take up to 60s. Avoid this causing the module to be declared
+faulty after the SFP MSA defined non-cooled module timeout.
+
+Signed-off-by: Russell King <rmk+kernel@armlinux.org.uk>
+---
+ drivers/net/phy/sfp.c | 42 ++++++++++++++++++++++++++++++------------
+ 1 file changed, 30 insertions(+), 12 deletions(-)
+
+--- a/drivers/net/phy/sfp.c
++++ b/drivers/net/phy/sfp.c
+@@ -153,10 +153,20 @@ static const enum gpiod_flags gpio_flags
+ GPIOD_ASIS,
+ };
+
+-#define T_WAIT msecs_to_jiffies(50)
+-#define T_INIT_JIFFIES msecs_to_jiffies(300)
+-#define T_RESET_US 10
+-#define T_FAULT_RECOVER msecs_to_jiffies(1000)
++/* t_start_up (SFF-8431) or t_init (SFF-8472) is the time required for a
++ * non-cooled module to initialise its laser safety circuitry. We wait
++ * an initial T_WAIT period before we check the tx fault to give any PHY
++ * on board (for a copper SFP) time to initialise.
++ */
++#define T_WAIT msecs_to_jiffies(50)
++#define T_START_UP msecs_to_jiffies(300)
++#define T_START_UP_BAD_GPON msecs_to_jiffies(60000)
++
++/* t_reset is the time required to assert the TX_DISABLE signal to reset
++ * an indicated TX_FAULT.
++ */
++#define T_RESET_US 10
++#define T_FAULT_RECOVER msecs_to_jiffies(1000)
+
+ /* SFP module presence detection is poor: the three MOD DEF signals are
+ * the same length on the PCB, which means it's possible for MOD DEF 0 to
+@@ -216,6 +226,7 @@ struct sfp {
+
+ struct sfp_eeprom_id id;
+ unsigned int module_power_mW;
++ unsigned int module_t_start_up;
+
+ #if IS_ENABLED(CONFIG_HWMON)
+ struct sfp_diag diag;
+@@ -1590,6 +1601,12 @@ static int sfp_sm_mod_probe(struct sfp *
+ if (ret < 0)
+ return ret;
+
++ if (!memcmp(id.base.vendor_name, "ALCATELLUCENT ", 16) &&
++ !memcmp(id.base.vendor_pn, "3FE46541AA ", 16))
++ sfp->module_t_start_up = T_START_UP_BAD_GPON;
++ else
++ sfp->module_t_start_up = T_START_UP;
++
+ return 0;
+ }
+
+@@ -1795,11 +1812,12 @@ static void sfp_sm_main(struct sfp *sfp,
+ break;
+
+ if (sfp->state & SFP_F_TX_FAULT) {
+- /* Wait t_init before indicating that the link is up,
+- * provided the current state indicates no TX_FAULT. If
+- * TX_FAULT clears before this time, that's fine too.
++ /* Wait up to t_init (SFF-8472) or t_start_up (SFF-8431)
++ * from the TX_DISABLE deassertion for the module to
++ * initialise, which is indicated by TX_FAULT
++ * deasserting.
+ */
+- timeout = T_INIT_JIFFIES;
++ timeout = sfp->module_t_start_up;
+ if (timeout > T_WAIT)
+ timeout -= T_WAIT;
+ else
+@@ -1816,8 +1834,8 @@ static void sfp_sm_main(struct sfp *sfp,
+
+ case SFP_S_INIT:
+ if (event == SFP_E_TIMEOUT && sfp->state & SFP_F_TX_FAULT) {
+- /* TX_FAULT is still asserted after t_init, so assume
+- * there is a fault.
++ /* TX_FAULT is still asserted after t_init or
++ * or t_start_up, so assume there is a fault.
+ */
+ sfp_sm_fault(sfp, SFP_S_INIT_TX_FAULT,
+ sfp->sm_retries == 5);
+@@ -1836,7 +1854,7 @@ static void sfp_sm_main(struct sfp *sfp,
+ case SFP_S_INIT_TX_FAULT:
+ if (event == SFP_E_TIMEOUT) {
+ sfp_module_tx_fault_reset(sfp);
+- sfp_sm_next(sfp, SFP_S_INIT, T_INIT_JIFFIES);
++ sfp_sm_next(sfp, SFP_S_INIT, sfp->module_t_start_up);
+ }
+ break;
+
+@@ -1860,7 +1878,7 @@ static void sfp_sm_main(struct sfp *sfp,
+ case SFP_S_TX_FAULT:
+ if (event == SFP_E_TIMEOUT) {
+ sfp_module_tx_fault_reset(sfp);
+- sfp_sm_next(sfp, SFP_S_REINIT, T_INIT_JIFFIES);
++ sfp_sm_next(sfp, SFP_S_REINIT, sfp->module_t_start_up);
+ }
+ break;
+
diff --git a/target/linux/generic/pending-5.4/740-net-sfp-remove-incomplete-100BASE-FX-and-100BASE-LX-.patch b/target/linux/generic/pending-5.4/740-net-sfp-remove-incomplete-100BASE-FX-and-100BASE-LX-.patch
new file mode 100644
index 0000000000..304d9b40bd
--- /dev/null
+++ b/target/linux/generic/pending-5.4/740-net-sfp-remove-incomplete-100BASE-FX-and-100BASE-LX-.patch
@@ -0,0 +1,52 @@
+From 29cd215aaf6c2050c43e4de03aee436c16f90b96 Mon Sep 17 00:00:00 2001
+From: Russell King <rmk+kernel@armlinux.org.uk>
+Date: Thu, 21 Nov 2019 17:27:14 +0000
+Subject: [PATCH 643/660] net: sfp: remove incomplete 100BASE-FX and 100BASE-LX
+ support
+
+The 100BASE-FX and 100BASE-LX support assumes a PHY is present; this
+is probably an incorrect assumption. In any case, sfp_parse_support()
+will fail such a module. Let's stop pretending we support these
+modules.
+
+Signed-off-by: Russell King <rmk+kernel@armlinux.org.uk>
+---
+ drivers/net/phy/sfp-bus.c | 4 +---
+ drivers/net/phy/sfp.c | 13 +------------
+ 2 files changed, 2 insertions(+), 15 deletions(-)
+
+--- a/drivers/net/phy/sfp-bus.c
++++ b/drivers/net/phy/sfp-bus.c
+@@ -341,9 +341,7 @@ phy_interface_t sfp_select_interface(str
+ if (phylink_test(link_modes, 2500baseX_Full))
+ return PHY_INTERFACE_MODE_2500BASEX;
+
+- if (id->base.e1000_base_t ||
+- id->base.e100_base_lx ||
+- id->base.e100_base_fx)
++ if (id->base.e1000_base_t)
+ return PHY_INTERFACE_MODE_SGMII;
+
+ if (phylink_test(link_modes, 1000baseX_Full))
+--- a/drivers/net/phy/sfp.c
++++ b/drivers/net/phy/sfp.c
+@@ -1424,18 +1424,7 @@ static void sfp_sm_fault(struct sfp *sfp
+
+ static void sfp_sm_probe_for_phy(struct sfp *sfp)
+ {
+- /* Setting the serdes link mode is guesswork: there's no
+- * field in the EEPROM which indicates what mode should
+- * be used.
+- *
+- * If it's a gigabit-only fiber module, it probably does
+- * not have a PHY, so switch to 802.3z negotiation mode.
+- * Otherwise, switch to SGMII mode (which is required to
+- * support non-gigabit speeds) and probe for a PHY.
+- */
+- if (sfp->id.base.e1000_base_t ||
+- sfp->id.base.e100_base_lx ||
+- sfp->id.base.e100_base_fx)
++ if (sfp->id.base.e1000_base_t)
+ sfp_sm_probe_phy(sfp);
+ }
+
diff --git a/target/linux/generic/pending-5.4/741-net-sfp-derive-interface-mode-from-ethtool-link-mode.patch b/target/linux/generic/pending-5.4/741-net-sfp-derive-interface-mode-from-ethtool-link-mode.patch
new file mode 100644
index 0000000000..f20b66eae7
--- /dev/null
+++ b/target/linux/generic/pending-5.4/741-net-sfp-derive-interface-mode-from-ethtool-link-mode.patch
@@ -0,0 +1,89 @@
+From dc45d9e04572b5cd6d32f51cdf9f62b18022e6dd Mon Sep 17 00:00:00 2001
+From: Russell King <rmk+kernel@armlinux.org.uk>
+Date: Thu, 21 Nov 2019 17:32:59 +0000
+Subject: [PATCH 644/660] net: sfp: derive interface mode from ethtool link
+ modes
+
+We don't need the EEPROM ID to derive the phy interface mode as we can
+derive it merely from the ethtool link modes. Remove the EEPROM ID
+argument to sfp_select_interface().
+
+Signed-off-by: Russell King <rmk+kernel@armlinux.org.uk>
+---
+ drivers/net/phy/marvell10g.c | 2 +-
+ drivers/net/phy/phylink.c | 2 +-
+ drivers/net/phy/sfp-bus.c | 11 ++++-------
+ include/linux/sfp.h | 2 --
+ 4 files changed, 6 insertions(+), 11 deletions(-)
+
+--- a/drivers/net/phy/marvell10g.c
++++ b/drivers/net/phy/marvell10g.c
+@@ -227,7 +227,7 @@ static int mv3310_sfp_insert(void *upstr
+ phy_interface_t iface;
+
+ sfp_parse_support(phydev->sfp_bus, id, support);
+- iface = sfp_select_interface(phydev->sfp_bus, id, support);
++ iface = sfp_select_interface(phydev->sfp_bus, support);
+
+ if (iface != PHY_INTERFACE_MODE_10GKR) {
+ dev_err(&phydev->mdio.dev, "incompatible SFP module inserted\n");
+--- a/drivers/net/phy/phylink.c
++++ b/drivers/net/phy/phylink.c
+@@ -1663,7 +1663,7 @@ static int phylink_sfp_module_insert(voi
+
+ linkmode_copy(support1, support);
+
+- iface = sfp_select_interface(pl->sfp_bus, id, config.advertising);
++ iface = sfp_select_interface(pl->sfp_bus, config.advertising);
+ if (iface == PHY_INTERFACE_MODE_NA) {
+ netdev_err(pl->netdev,
+ "selection of interface failed, advertisement %*pb\n",
+--- a/drivers/net/phy/sfp-bus.c
++++ b/drivers/net/phy/sfp-bus.c
+@@ -319,16 +319,12 @@ EXPORT_SYMBOL_GPL(sfp_parse_support);
+ /**
+ * sfp_select_interface() - Select appropriate phy_interface_t mode
+ * @bus: a pointer to the &struct sfp_bus structure for the sfp module
+- * @id: a pointer to the module's &struct sfp_eeprom_id
+ * @link_modes: ethtool link modes mask
+ *
+- * Derive the phy_interface_t mode for the information found in the
+- * module's identifying EEPROM and the link modes mask. There is no
+- * standard or defined way to derive this information, so we decide
+- * based upon the link mode mask.
++ * Derive the phy_interface_t mode for the SFP module from the link
++ * modes mask.
+ */
+ phy_interface_t sfp_select_interface(struct sfp_bus *bus,
+- const struct sfp_eeprom_id *id,
+ unsigned long *link_modes)
+ {
+ if (phylink_test(link_modes, 10000baseCR_Full) ||
+@@ -341,7 +337,8 @@ phy_interface_t sfp_select_interface(str
+ if (phylink_test(link_modes, 2500baseX_Full))
+ return PHY_INTERFACE_MODE_2500BASEX;
+
+- if (id->base.e1000_base_t)
++ if (phylink_test(link_modes, 1000baseT_Half) ||
++ phylink_test(link_modes, 1000baseT_Full))
+ return PHY_INTERFACE_MODE_SGMII;
+
+ if (phylink_test(link_modes, 1000baseX_Full))
+--- a/include/linux/sfp.h
++++ b/include/linux/sfp.h
+@@ -504,7 +504,6 @@ int sfp_parse_port(struct sfp_bus *bus,
+ void sfp_parse_support(struct sfp_bus *bus, const struct sfp_eeprom_id *id,
+ unsigned long *support);
+ phy_interface_t sfp_select_interface(struct sfp_bus *bus,
+- const struct sfp_eeprom_id *id,
+ unsigned long *link_modes);
+
+ int sfp_get_module_info(struct sfp_bus *bus, struct ethtool_modinfo *modinfo);
+@@ -532,7 +531,6 @@ static inline void sfp_parse_support(str
+ }
+
+ static inline phy_interface_t sfp_select_interface(struct sfp_bus *bus,
+- const struct sfp_eeprom_id *id,
+ unsigned long *link_modes)
+ {
+ return PHY_INTERFACE_MODE_NA;
diff --git a/target/linux/generic/pending-5.4/742-net-sfp-add-more-extended-compliance-codes.patch b/target/linux/generic/pending-5.4/742-net-sfp-add-more-extended-compliance-codes.patch
new file mode 100644
index 0000000000..4ca0ded55d
--- /dev/null
+++ b/target/linux/generic/pending-5.4/742-net-sfp-add-more-extended-compliance-codes.patch
@@ -0,0 +1,251 @@
+From c66a4e76c8554c84e64b9315314576ac403c6641 Mon Sep 17 00:00:00 2001
+From: Russell King <rmk+kernel@armlinux.org.uk>
+Date: Thu, 26 Sep 2019 15:14:18 +0100
+Subject: [PATCH 645/660] net: sfp: add more extended compliance codes
+
+SFF-8024 is used to define various constants re-used in several SFF
+SFP-related specifications. Split these constants from the enum, and
+rename them to indicate that they're defined by SFF-8024.
+
+Add and use updated SFF-8024 extended compliance code definitions for
+10GBASE-T, 5GBASE-T and 2.5GBASE-T modules.
+
+Signed-off-by: Russell King <rmk+kernel@armlinux.org.uk>
+---
+ drivers/net/phy/sfp-bus.c | 60 ++++++++++++++++------------
+ drivers/net/phy/sfp.c | 4 +-
+ include/linux/sfp.h | 82 ++++++++++++++++++++++++++-------------
+ 3 files changed, 93 insertions(+), 53 deletions(-)
+
+--- a/drivers/net/phy/sfp-bus.c
++++ b/drivers/net/phy/sfp-bus.c
+@@ -123,35 +123,35 @@ int sfp_parse_port(struct sfp_bus *bus,
+
+ /* port is the physical connector, set this from the connector field. */
+ switch (id->base.connector) {
+- case SFP_CONNECTOR_SC:
+- case SFP_CONNECTOR_FIBERJACK:
+- case SFP_CONNECTOR_LC:
+- case SFP_CONNECTOR_MT_RJ:
+- case SFP_CONNECTOR_MU:
+- case SFP_CONNECTOR_OPTICAL_PIGTAIL:
++ case SFF8024_CONNECTOR_SC:
++ case SFF8024_CONNECTOR_FIBERJACK:
++ case SFF8024_CONNECTOR_LC:
++ case SFF8024_CONNECTOR_MT_RJ:
++ case SFF8024_CONNECTOR_MU:
++ case SFF8024_CONNECTOR_OPTICAL_PIGTAIL:
++ case SFF8024_CONNECTOR_MPO_1X12:
++ case SFF8024_CONNECTOR_MPO_2X16:
+ port = PORT_FIBRE;
+ break;
+
+- case SFP_CONNECTOR_RJ45:
++ case SFF8024_CONNECTOR_RJ45:
+ port = PORT_TP;
+ break;
+
+- case SFP_CONNECTOR_COPPER_PIGTAIL:
++ case SFF8024_CONNECTOR_COPPER_PIGTAIL:
+ port = PORT_DA;
+ break;
+
+- case SFP_CONNECTOR_UNSPEC:
++ case SFF8024_CONNECTOR_UNSPEC:
+ if (id->base.e1000_base_t) {
+ port = PORT_TP;
+ break;
+ }
+ /* fallthrough */
+- case SFP_CONNECTOR_SG: /* guess */
+- case SFP_CONNECTOR_MPO_1X12:
+- case SFP_CONNECTOR_MPO_2X16:
+- case SFP_CONNECTOR_HSSDC_II:
+- case SFP_CONNECTOR_NOSEPARATE:
+- case SFP_CONNECTOR_MXC_2X16:
++ case SFF8024_CONNECTOR_SG: /* guess */
++ case SFF8024_CONNECTOR_HSSDC_II:
++ case SFF8024_CONNECTOR_NOSEPARATE:
++ case SFF8024_CONNECTOR_MXC_2X16:
+ port = PORT_OTHER;
+ break;
+ default:
+@@ -260,22 +260,33 @@ void sfp_parse_support(struct sfp_bus *b
+ }
+
+ switch (id->base.extended_cc) {
+- case 0x00: /* Unspecified */
++ case SFF8024_ECC_UNSPEC:
+ break;
+- case 0x02: /* 100Gbase-SR4 or 25Gbase-SR */
++ case SFF8024_ECC_100GBASE_SR4_25GBASE_SR:
+ phylink_set(modes, 100000baseSR4_Full);
+ phylink_set(modes, 25000baseSR_Full);
+ break;
+- case 0x03: /* 100Gbase-LR4 or 25Gbase-LR */
+- case 0x04: /* 100Gbase-ER4 or 25Gbase-ER */
++ case SFF8024_ECC_100GBASE_LR4_25GBASE_LR:
++ case SFF8024_ECC_100GBASE_ER4_25GBASE_ER:
+ phylink_set(modes, 100000baseLR4_ER4_Full);
+ break;
+- case 0x0b: /* 100Gbase-CR4 or 25Gbase-CR CA-L */
+- case 0x0c: /* 25Gbase-CR CA-S */
+- case 0x0d: /* 25Gbase-CR CA-N */
++ case SFF8024_ECC_100GBASE_CR4:
+ phylink_set(modes, 100000baseCR4_Full);
++ /* fallthrough */
++ case SFF8024_ECC_25GBASE_CR_S:
++ case SFF8024_ECC_25GBASE_CR_N:
+ phylink_set(modes, 25000baseCR_Full);
+ break;
++ case SFF8024_ECC_10GBASE_T_SFI:
++ case SFF8024_ECC_10GBASE_T_SR:
++ phylink_set(modes, 10000baseT_Full);
++ break;
++ case SFF8024_ECC_5GBASE_T:
++ phylink_set(modes, 5000baseT_Full);
++ break;
++ case SFF8024_ECC_2_5GBASE_T:
++ phylink_set(modes, 2500baseT_Full);
++ break;
+ default:
+ dev_warn(bus->sfp_dev,
+ "Unknown/unsupported extended compliance code: 0x%02x\n",
+@@ -300,7 +311,7 @@ void sfp_parse_support(struct sfp_bus *b
+ */
+ if (bitmap_empty(modes, __ETHTOOL_LINK_MODE_MASK_NBITS)) {
+ /* If the encoding and bit rate allows 1000baseX */
+- if (id->base.encoding == SFP_ENCODING_8B10B && br_nom &&
++ if (id->base.encoding == SFF8024_ENCODING_8B10B && br_nom &&
+ br_min <= 1300 && br_max >= 1200)
+ phylink_set(modes, 1000baseX_Full);
+ }
+@@ -331,7 +342,8 @@ phy_interface_t sfp_select_interface(str
+ phylink_test(link_modes, 10000baseSR_Full) ||
+ phylink_test(link_modes, 10000baseLR_Full) ||
+ phylink_test(link_modes, 10000baseLRM_Full) ||
+- phylink_test(link_modes, 10000baseER_Full))
++ phylink_test(link_modes, 10000baseER_Full) ||
++ phylink_test(link_modes, 10000baseT_Full))
+ return PHY_INTERFACE_MODE_10GKR;
+
+ if (phylink_test(link_modes, 2500baseX_Full))
+--- a/drivers/net/phy/sfp.c
++++ b/drivers/net/phy/sfp.c
+@@ -240,7 +240,7 @@ struct sfp {
+
+ static bool sff_module_supported(const struct sfp_eeprom_id *id)
+ {
+- return id->base.phys_id == SFP_PHYS_ID_SFF &&
++ return id->base.phys_id == SFF8024_ID_SFF_8472 &&
+ id->base.phys_ext_id == SFP_PHYS_EXT_ID_SFP;
+ }
+
+@@ -251,7 +251,7 @@ static const struct sff_data sff_data =
+
+ static bool sfp_module_supported(const struct sfp_eeprom_id *id)
+ {
+- return id->base.phys_id == SFP_PHYS_ID_SFP &&
++ return id->base.phys_id == SFF8024_ID_SFP &&
+ id->base.phys_ext_id == SFP_PHYS_EXT_ID_SFP;
+ }
+
+--- a/include/linux/sfp.h
++++ b/include/linux/sfp.h
+@@ -275,6 +275,61 @@ struct sfp_diag {
+ __be16 cal_v_offset;
+ } __packed;
+
++/* SFF8024 defined constants */
++enum {
++ SFF8024_ID_UNK = 0x00,
++ SFF8024_ID_SFF_8472 = 0x02,
++ SFF8024_ID_SFP = 0x03,
++ SFF8024_ID_DWDM_SFP = 0x0b,
++ SFF8024_ID_QSFP_8438 = 0x0c,
++ SFF8024_ID_QSFP_8436_8636 = 0x0d,
++ SFF8024_ID_QSFP28_8636 = 0x11,
++
++ SFF8024_ENCODING_UNSPEC = 0x00,
++ SFF8024_ENCODING_8B10B = 0x01,
++ SFF8024_ENCODING_4B5B = 0x02,
++ SFF8024_ENCODING_NRZ = 0x03,
++ SFF8024_ENCODING_8472_MANCHESTER= 0x04,
++ SFF8024_ENCODING_8472_SONET = 0x05,
++ SFF8024_ENCODING_8472_64B66B = 0x06,
++ SFF8024_ENCODING_8436_MANCHESTER= 0x06,
++ SFF8024_ENCODING_8436_SONET = 0x04,
++ SFF8024_ENCODING_8436_64B66B = 0x05,
++ SFF8024_ENCODING_256B257B = 0x07,
++ SFF8024_ENCODING_PAM4 = 0x08,
++
++ SFF8024_CONNECTOR_UNSPEC = 0x00,
++ /* codes 01-05 not supportable on SFP, but some modules have single SC */
++ SFF8024_CONNECTOR_SC = 0x01,
++ SFF8024_CONNECTOR_FIBERJACK = 0x06,
++ SFF8024_CONNECTOR_LC = 0x07,
++ SFF8024_CONNECTOR_MT_RJ = 0x08,
++ SFF8024_CONNECTOR_MU = 0x09,
++ SFF8024_CONNECTOR_SG = 0x0a,
++ SFF8024_CONNECTOR_OPTICAL_PIGTAIL= 0x0b,
++ SFF8024_CONNECTOR_MPO_1X12 = 0x0c,
++ SFF8024_CONNECTOR_MPO_2X16 = 0x0d,
++ SFF8024_CONNECTOR_HSSDC_II = 0x20,
++ SFF8024_CONNECTOR_COPPER_PIGTAIL= 0x21,
++ SFF8024_CONNECTOR_RJ45 = 0x22,
++ SFF8024_CONNECTOR_NOSEPARATE = 0x23,
++ SFF8024_CONNECTOR_MXC_2X16 = 0x24,
++
++ SFF8024_ECC_UNSPEC = 0x00,
++ SFF8024_ECC_100G_25GAUI_C2M_AOC = 0x01,
++ SFF8024_ECC_100GBASE_SR4_25GBASE_SR = 0x02,
++ SFF8024_ECC_100GBASE_LR4_25GBASE_LR = 0x03,
++ SFF8024_ECC_100GBASE_ER4_25GBASE_ER = 0x04,
++ SFF8024_ECC_100GBASE_SR10 = 0x05,
++ SFF8024_ECC_100GBASE_CR4 = 0x0b,
++ SFF8024_ECC_25GBASE_CR_S = 0x0c,
++ SFF8024_ECC_25GBASE_CR_N = 0x0d,
++ SFF8024_ECC_10GBASE_T_SFI = 0x16,
++ SFF8024_ECC_10GBASE_T_SR = 0x1c,
++ SFF8024_ECC_5GBASE_T = 0x1d,
++ SFF8024_ECC_2_5GBASE_T = 0x1e,
++};
++
+ /* SFP EEPROM registers */
+ enum {
+ SFP_PHYS_ID = 0x00,
+@@ -309,34 +364,7 @@ enum {
+ SFP_SFF8472_COMPLIANCE = 0x5e,
+ SFP_CC_EXT = 0x5f,
+
+- SFP_PHYS_ID_SFF = 0x02,
+- SFP_PHYS_ID_SFP = 0x03,
+ SFP_PHYS_EXT_ID_SFP = 0x04,
+- SFP_CONNECTOR_UNSPEC = 0x00,
+- /* codes 01-05 not supportable on SFP, but some modules have single SC */
+- SFP_CONNECTOR_SC = 0x01,
+- SFP_CONNECTOR_FIBERJACK = 0x06,
+- SFP_CONNECTOR_LC = 0x07,
+- SFP_CONNECTOR_MT_RJ = 0x08,
+- SFP_CONNECTOR_MU = 0x09,
+- SFP_CONNECTOR_SG = 0x0a,
+- SFP_CONNECTOR_OPTICAL_PIGTAIL = 0x0b,
+- SFP_CONNECTOR_MPO_1X12 = 0x0c,
+- SFP_CONNECTOR_MPO_2X16 = 0x0d,
+- SFP_CONNECTOR_HSSDC_II = 0x20,
+- SFP_CONNECTOR_COPPER_PIGTAIL = 0x21,
+- SFP_CONNECTOR_RJ45 = 0x22,
+- SFP_CONNECTOR_NOSEPARATE = 0x23,
+- SFP_CONNECTOR_MXC_2X16 = 0x24,
+- SFP_ENCODING_UNSPEC = 0x00,
+- SFP_ENCODING_8B10B = 0x01,
+- SFP_ENCODING_4B5B = 0x02,
+- SFP_ENCODING_NRZ = 0x03,
+- SFP_ENCODING_8472_MANCHESTER = 0x04,
+- SFP_ENCODING_8472_SONET = 0x05,
+- SFP_ENCODING_8472_64B66B = 0x06,
+- SFP_ENCODING_256B257B = 0x07,
+- SFP_ENCODING_PAM4 = 0x08,
+ SFP_OPTIONS_HIGH_POWER_LEVEL = BIT(13),
+ SFP_OPTIONS_PAGING_A2 = BIT(12),
+ SFP_OPTIONS_RETIMER = BIT(11),
diff --git a/target/linux/generic/pending-5.4/743-net-sfp-add-module-start-stop-upstream-notifications.patch b/target/linux/generic/pending-5.4/743-net-sfp-add-module-start-stop-upstream-notifications.patch
new file mode 100644
index 0000000000..c0c3e9e57e
--- /dev/null
+++ b/target/linux/generic/pending-5.4/743-net-sfp-add-module-start-stop-upstream-notifications.patch
@@ -0,0 +1,131 @@
+From f9a5a54b59cb904b37bf7409a43635ab195d0214 Mon Sep 17 00:00:00 2001
+From: Russell King <rmk+kernel@armlinux.org.uk>
+Date: Tue, 19 Nov 2019 10:13:25 +0000
+Subject: [PATCH 646/660] net: sfp: add module start/stop upstream
+ notifications
+
+When dealing with some copper modules, we can't positively know the
+module capabilities are until we have probed the PHY. Without the full
+capabilities, we may end up failing a module that we could otherwise
+drive with a restricted set of capabilities.
+
+An example of this would be a module with a NBASE-T PHY plugged into
+a host that supports phy interface modes 2500BASE-X and SGMII. The
+PHY supports 10GBASE-R, 5000BASE-X, 2500BASE-X, SGMII interface modes,
+which means a subset of the capabilities are compatible with the host.
+
+However, reading the module EEPROM leads us to believe that the module
+only supports ethtool link mode 10GBASE-T, which is incompatible with
+the host - and thus results in the module being rejected.
+
+This patch adds an extra notification which are triggered after the
+SFP module's PHY probe, and a corresponding notification just before
+the PHY is removed.
+
+Signed-off-by: Russell King <rmk+kernel@armlinux.org.uk>
+---
+ drivers/net/phy/sfp-bus.c | 21 +++++++++++++++++++++
+ drivers/net/phy/sfp.c | 8 ++++++++
+ drivers/net/phy/sfp.h | 2 ++
+ include/linux/sfp.h | 4 ++++
+ 4 files changed, 35 insertions(+)
+
+--- a/drivers/net/phy/sfp-bus.c
++++ b/drivers/net/phy/sfp-bus.c
+@@ -711,6 +711,27 @@ void sfp_module_remove(struct sfp_bus *b
+ }
+ EXPORT_SYMBOL_GPL(sfp_module_remove);
+
++int sfp_module_start(struct sfp_bus *bus)
++{
++ const struct sfp_upstream_ops *ops = sfp_get_upstream_ops(bus);
++ int ret = 0;
++
++ if (ops && ops->module_start)
++ ret = ops->module_start(bus->upstream);
++
++ return ret;
++}
++EXPORT_SYMBOL_GPL(sfp_module_start);
++
++void sfp_module_stop(struct sfp_bus *bus)
++{
++ const struct sfp_upstream_ops *ops = sfp_get_upstream_ops(bus);
++
++ if (ops && ops->module_stop)
++ ops->module_stop(bus->upstream);
++}
++EXPORT_SYMBOL_GPL(sfp_module_stop);
++
+ static void sfp_socket_clear(struct sfp_bus *bus)
+ {
+ bus->sfp_dev = NULL;
+--- a/drivers/net/phy/sfp.c
++++ b/drivers/net/phy/sfp.c
+@@ -57,6 +57,7 @@ enum {
+ SFP_DEV_UP,
+
+ SFP_S_DOWN = 0,
++ SFP_S_FAIL,
+ SFP_S_WAIT,
+ SFP_S_INIT,
+ SFP_S_INIT_TX_FAULT,
+@@ -120,6 +121,7 @@ static const char *event_to_str(unsigned
+
+ static const char * const sm_state_strings[] = {
+ [SFP_S_DOWN] = "down",
++ [SFP_S_FAIL] = "fail",
+ [SFP_S_WAIT] = "wait",
+ [SFP_S_INIT] = "init",
+ [SFP_S_INIT_TX_FAULT] = "init_tx_fault",
+@@ -1766,6 +1768,8 @@ static void sfp_sm_main(struct sfp *sfp,
+ if (sfp->sm_state == SFP_S_LINK_UP &&
+ sfp->sm_dev_state == SFP_DEV_UP)
+ sfp_sm_link_down(sfp);
++ if (sfp->sm_state > SFP_S_INIT)
++ sfp_module_stop(sfp->sfp_bus);
+ if (sfp->mod_phy)
+ sfp_sm_phy_detach(sfp);
+ sfp_module_tx_disable(sfp);
+@@ -1833,6 +1837,10 @@ static void sfp_sm_main(struct sfp *sfp,
+ * clear. Probe for the PHY and check the LOS state.
+ */
+ sfp_sm_probe_for_phy(sfp);
++ if (sfp_module_start(sfp->sfp_bus)) {
++ sfp_sm_next(sfp, SFP_S_FAIL, 0);
++ break;
++ }
+ sfp_sm_link_check_los(sfp);
+
+ /* Reset the fault retry count */
+--- a/drivers/net/phy/sfp.h
++++ b/drivers/net/phy/sfp.h
+@@ -22,6 +22,8 @@ void sfp_link_up(struct sfp_bus *bus);
+ void sfp_link_down(struct sfp_bus *bus);
+ int sfp_module_insert(struct sfp_bus *bus, const struct sfp_eeprom_id *id);
+ void sfp_module_remove(struct sfp_bus *bus);
++int sfp_module_start(struct sfp_bus *bus);
++void sfp_module_stop(struct sfp_bus *bus);
+ int sfp_link_configure(struct sfp_bus *bus, const struct sfp_eeprom_id *id);
+ struct sfp_bus *sfp_register_socket(struct device *dev, struct sfp *sfp,
+ const struct sfp_socket_ops *ops);
+--- a/include/linux/sfp.h
++++ b/include/linux/sfp.h
+@@ -507,6 +507,8 @@ struct sfp_bus;
+ * @module_insert: called after a module has been detected to determine
+ * whether the module is supported for the upstream device.
+ * @module_remove: called after the module has been removed.
++ * @module_start: called after the PHY probe step
++ * @module_stop: called before the PHY is removed
+ * @link_down: called when the link is non-operational for whatever
+ * reason.
+ * @link_up: called when the link is operational.
+@@ -520,6 +522,8 @@ struct sfp_upstream_ops {
+ void (*detach)(void *priv, struct sfp_bus *bus);
+ int (*module_insert)(void *priv, const struct sfp_eeprom_id *id);
+ void (*module_remove)(void *priv);
++ int (*module_start)(void *priv);
++ void (*module_stop)(void *priv);
+ void (*link_down)(void *priv);
+ void (*link_up)(void *priv);
+ int (*connect_phy)(void *priv, struct phy_device *);
diff --git a/target/linux/generic/pending-5.4/744-net-sfp-move-phy_start-phy_stop-to-phylink.patch b/target/linux/generic/pending-5.4/744-net-sfp-move-phy_start-phy_stop-to-phylink.patch
new file mode 100644
index 0000000000..a646fcd409
--- /dev/null
+++ b/target/linux/generic/pending-5.4/744-net-sfp-move-phy_start-phy_stop-to-phylink.patch
@@ -0,0 +1,72 @@
+From e2dc261b872a92a055eb2e86ac136baf9b20f2f2 Mon Sep 17 00:00:00 2001
+From: Russell King <rmk+kernel@armlinux.org.uk>
+Date: Thu, 21 Nov 2019 17:21:33 +0000
+Subject: [PATCH 647/660] net: sfp: move phy_start()/phy_stop() to phylink
+
+Move phy_start() and phy_stop() into the module_start and module_stop
+notifications in phylink, rather than having them in the SFP code.
+This gives phylink responsibility for controlling the PHY, rather
+than having SFP start and stop the PHY state machine.
+
+Signed-off-by: Russell King <rmk+kernel@armlinux.org.uk>
+---
+ drivers/net/phy/phylink.c | 22 ++++++++++++++++++++++
+ drivers/net/phy/sfp.c | 2 --
+ 2 files changed, 22 insertions(+), 2 deletions(-)
+
+--- a/drivers/net/phy/phylink.c
++++ b/drivers/net/phy/phylink.c
+@@ -1717,6 +1717,26 @@ static int phylink_sfp_module_insert(voi
+ return ret;
+ }
+
++static int phylink_sfp_module_start(void *upstream)
++{
++ struct phylink *pl = upstream;
++
++ /* If this SFP module has a PHY, start the PHY now. */
++ if (pl->phydev)
++ phy_start(pl->phydev);
++
++ return 0;
++}
++
++static void phylink_sfp_module_stop(void *upstream)
++{
++ struct phylink *pl = upstream;
++
++ /* If this SFP module has a PHY, stop it. */
++ if (pl->phydev)
++ phy_stop(pl->phydev);
++}
++
+ static void phylink_sfp_link_down(void *upstream)
+ {
+ struct phylink *pl = upstream;
+@@ -1752,6 +1772,8 @@ static const struct sfp_upstream_ops sfp
+ .attach = phylink_sfp_attach,
+ .detach = phylink_sfp_detach,
+ .module_insert = phylink_sfp_module_insert,
++ .module_start = phylink_sfp_module_start,
++ .module_stop = phylink_sfp_module_stop,
+ .link_up = phylink_sfp_link_up,
+ .link_down = phylink_sfp_link_down,
+ .connect_phy = phylink_sfp_connect_phy,
+--- a/drivers/net/phy/sfp.c
++++ b/drivers/net/phy/sfp.c
+@@ -1331,7 +1331,6 @@ static void sfp_sm_mod_next(struct sfp *
+
+ static void sfp_sm_phy_detach(struct sfp *sfp)
+ {
+- phy_stop(sfp->mod_phy);
+ sfp_remove_phy(sfp->sfp_bus);
+ phy_device_remove(sfp->mod_phy);
+ phy_device_free(sfp->mod_phy);
+@@ -1362,7 +1361,6 @@ static void sfp_sm_probe_phy(struct sfp
+ }
+
+ sfp->mod_phy = phy;
+- phy_start(phy);
+ }
+
+ static void sfp_sm_link_up(struct sfp *sfp)
diff --git a/target/linux/generic/pending-5.4/745-net-mdio-i2c-add-support-for-Clause-45-accesses.patch b/target/linux/generic/pending-5.4/745-net-mdio-i2c-add-support-for-Clause-45-accesses.patch
new file mode 100644
index 0000000000..9a4bb5bb78
--- /dev/null
+++ b/target/linux/generic/pending-5.4/745-net-mdio-i2c-add-support-for-Clause-45-accesses.patch
@@ -0,0 +1,74 @@
+From c9de73988a35c6c85810a992954ac568cca503e5 Mon Sep 17 00:00:00 2001
+From: Russell King <rmk+kernel@armlinux.org.uk>
+Date: Wed, 2 Oct 2019 10:31:10 +0100
+Subject: [PATCH 648/660] net: mdio-i2c: add support for Clause 45 accesses
+
+Some SFP+ modules have PHYs on them just like SFP modules do, except
+they are Clause 45 PHYs. The I2C protocol used to access them is
+modified slightly in order to send the device address and 16-bit
+register index.
+
+Signed-off-by: Russell King <rmk+kernel@armlinux.org.uk>
+---
+ drivers/net/phy/mdio-i2c.c | 28 ++++++++++++++++++++--------
+ 1 file changed, 20 insertions(+), 8 deletions(-)
+
+--- a/drivers/net/phy/mdio-i2c.c
++++ b/drivers/net/phy/mdio-i2c.c
+@@ -36,17 +36,24 @@ static int i2c_mii_read(struct mii_bus *
+ {
+ struct i2c_adapter *i2c = bus->priv;
+ struct i2c_msg msgs[2];
+- u8 data[2], dev_addr = reg;
++ u8 addr[3], data[2], *p;
+ int bus_addr, ret;
+
+ if (!i2c_mii_valid_phy_id(phy_id))
+ return 0xffff;
+
++ p = addr;
++ if (reg & MII_ADDR_C45) {
++ *p++ = 0x20 | ((reg >> 16) & 31);
++ *p++ = reg >> 8;
++ }
++ *p++ = reg;
++
+ bus_addr = i2c_mii_phy_addr(phy_id);
+ msgs[0].addr = bus_addr;
+ msgs[0].flags = 0;
+- msgs[0].len = 1;
+- msgs[0].buf = &dev_addr;
++ msgs[0].len = p - addr;
++ msgs[0].buf = addr;
+ msgs[1].addr = bus_addr;
+ msgs[1].flags = I2C_M_RD;
+ msgs[1].len = sizeof(data);
+@@ -64,18 +71,23 @@ static int i2c_mii_write(struct mii_bus
+ struct i2c_adapter *i2c = bus->priv;
+ struct i2c_msg msg;
+ int ret;
+- u8 data[3];
++ u8 data[5], *p;
+
+ if (!i2c_mii_valid_phy_id(phy_id))
+ return 0;
+
+- data[0] = reg;
+- data[1] = val >> 8;
+- data[2] = val;
++ p = data;
++ if (reg & MII_ADDR_C45) {
++ *p++ = (reg >> 16) & 31;
++ *p++ = reg >> 8;
++ }
++ *p++ = reg;
++ *p++ = val >> 8;
++ *p++ = val;
+
+ msg.addr = i2c_mii_phy_addr(phy_id);
+ msg.flags = 0;
+- msg.len = 3;
++ msg.len = p - data;
+ msg.buf = data;
+
+ ret = i2c_transfer(i2c, &msg, 1);
diff --git a/target/linux/generic/pending-5.4/746-net-phylink-re-split-__phylink_connect_phy.patch b/target/linux/generic/pending-5.4/746-net-phylink-re-split-__phylink_connect_phy.patch
new file mode 100644
index 0000000000..c74a56c575
--- /dev/null
+++ b/target/linux/generic/pending-5.4/746-net-phylink-re-split-__phylink_connect_phy.patch
@@ -0,0 +1,93 @@
+From 0db7fba746b5608c30d4e2ba1c99a2a309e2d288 Mon Sep 17 00:00:00 2001
+From: Russell King <rmk+kernel@armlinux.org.uk>
+Date: Fri, 8 Nov 2019 15:22:48 +0000
+Subject: [PATCH 649/660] net: phylink: re-split __phylink_connect_phy()
+
+In order to support Clause 45 PHYs on SFP+ modules, which have an
+indeterminant phy interface mode, we need to be able to call
+phylink_bringup_phy() with a different interface mode to that used when
+binding the PHY. Reduce __phylink_connect_phy() to an attach operation,
+and move the call to phylink_bringup_phy() to its call sites.
+
+Signed-off-by: Russell King <rmk+kernel@armlinux.org.uk>
+---
+ drivers/net/phy/phylink.c | 39 ++++++++++++++++++++++++---------------
+ 1 file changed, 24 insertions(+), 15 deletions(-)
+
+--- a/drivers/net/phy/phylink.c
++++ b/drivers/net/phy/phylink.c
+@@ -728,11 +728,9 @@ static int phylink_bringup_phy(struct ph
+ return 0;
+ }
+
+-static int __phylink_connect_phy(struct phylink *pl, struct phy_device *phy,
+- phy_interface_t interface)
++static int phylink_attach_phy(struct phylink *pl, struct phy_device *phy,
++ phy_interface_t interface)
+ {
+- int ret;
+-
+ if (WARN_ON(pl->link_an_mode == MLO_AN_FIXED ||
+ (pl->link_an_mode == MLO_AN_INBAND &&
+ phy_interface_mode_is_8023z(interface))))
+@@ -741,15 +739,7 @@ static int __phylink_connect_phy(struct
+ if (pl->phydev)
+ return -EBUSY;
+
+- ret = phy_attach_direct(pl->netdev, phy, 0, interface);
+- if (ret)
+- return ret;
+-
+- ret = phylink_bringup_phy(pl, phy);
+- if (ret)
+- phy_detach(phy);
+-
+- return ret;
++ return phy_attach_direct(pl->netdev, phy, 0, interface);
+ }
+
+ /**
+@@ -769,13 +759,23 @@ static int __phylink_connect_phy(struct
+ */
+ int phylink_connect_phy(struct phylink *pl, struct phy_device *phy)
+ {
++ int ret;
++
+ /* Use PHY device/driver interface */
+ if (pl->link_interface == PHY_INTERFACE_MODE_NA) {
+ pl->link_interface = phy->interface;
+ pl->link_config.interface = pl->link_interface;
+ }
+
+- return __phylink_connect_phy(pl, phy, pl->link_interface);
++ ret = phylink_attach_phy(pl, phy, pl->link_interface);
++ if (ret < 0)
++ return ret;
++
++ ret = phylink_bringup_phy(pl, phy);
++ if (ret)
++ phy_detach(phy);
++
++ return ret;
+ }
+ EXPORT_SYMBOL_GPL(phylink_connect_phy);
+
+@@ -1759,8 +1759,17 @@ static void phylink_sfp_link_up(void *up
+ static int phylink_sfp_connect_phy(void *upstream, struct phy_device *phy)
+ {
+ struct phylink *pl = upstream;
++ int ret;
+
+- return __phylink_connect_phy(upstream, phy, pl->link_config.interface);
++ ret = phylink_attach_phy(pl, phy, pl->link_config.interface);
++ if (ret < 0)
++ return ret;
++
++ ret = phylink_bringup_phy(pl, phy);
++ if (ret)
++ phy_detach(phy);
++
++ return ret;
+ }
+
+ static void phylink_sfp_disconnect_phy(void *upstream)
diff --git a/target/linux/generic/pending-5.4/747-net-phylink-support-Clause-45-PHYs-on-SFP-modules.patch b/target/linux/generic/pending-5.4/747-net-phylink-support-Clause-45-PHYs-on-SFP-modules.patch
new file mode 100644
index 0000000000..2cea118c82
--- /dev/null
+++ b/target/linux/generic/pending-5.4/747-net-phylink-support-Clause-45-PHYs-on-SFP-modules.patch
@@ -0,0 +1,89 @@
+From caf32f96f13df7d3ae6cb8bf8001c88ae22025ca Mon Sep 17 00:00:00 2001
+From: Russell King <rmk+kernel@armlinux.org.uk>
+Date: Fri, 8 Nov 2019 15:28:22 +0000
+Subject: [PATCH 650/660] net: phylink: support Clause 45 PHYs on SFP+ modules
+
+Some SFP+ modules have Clause 45 PHYs embedded on them, which need a
+little more handling in order to ensure that they are correctly setup,
+as they switch the PHY link mode according to the negotiated speed.
+
+With Clause 22 PHYs, we assumed that they would operate in SGMII mode,
+but this assumption is now false. Adapt phylink to support Clause 45
+PHYs on SFP+ modules.
+
+Signed-off-by: Russell King <rmk+kernel@armlinux.org.uk>
+---
+ drivers/net/phy/phylink.c | 21 ++++++++++++++++-----
+ 1 file changed, 16 insertions(+), 5 deletions(-)
+
+--- a/drivers/net/phy/phylink.c
++++ b/drivers/net/phy/phylink.c
+@@ -671,7 +671,8 @@ static void phylink_phy_change(struct ph
+ phy_duplex_to_str(phydev->duplex));
+ }
+
+-static int phylink_bringup_phy(struct phylink *pl, struct phy_device *phy)
++static int phylink_bringup_phy(struct phylink *pl, struct phy_device *phy,
++ phy_interface_t interface)
+ {
+ struct phylink_link_state config;
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(supported);
+@@ -691,7 +692,7 @@ static int phylink_bringup_phy(struct ph
+ ethtool_convert_legacy_u32_to_link_mode(supported, phy->supported);
+ ethtool_convert_legacy_u32_to_link_mode(config.advertising,
+ phy->advertising);
+- config.interface = pl->link_config.interface;
++ config.interface = interface;
+
+ ret = phylink_validate(pl, supported, &config);
+ if (ret)
+@@ -707,6 +708,7 @@ static int phylink_bringup_phy(struct ph
+ mutex_lock(&phy->lock);
+ mutex_lock(&pl->state_mutex);
+ pl->phydev = phy;
++ pl->phy_state.interface = interface;
+ linkmode_copy(pl->supported, supported);
+ linkmode_copy(pl->link_config.advertising, config.advertising);
+
+@@ -771,7 +773,7 @@ int phylink_connect_phy(struct phylink *
+ if (ret < 0)
+ return ret;
+
+- ret = phylink_bringup_phy(pl, phy);
++ ret = phylink_bringup_phy(pl, phy, pl->link_config.interface);
+ if (ret)
+ phy_detach(phy);
+
+@@ -824,7 +826,7 @@ int phylink_of_phy_connect(struct phylin
+ if (!phy_dev)
+ return -ENODEV;
+
+- ret = phylink_bringup_phy(pl, phy_dev);
++ ret = phylink_bringup_phy(pl, phy_dev, pl->link_config.interface);
+ if (ret)
+ phy_detach(phy_dev);
+
+@@ -1759,13 +1761,22 @@ static void phylink_sfp_link_up(void *up
+ static int phylink_sfp_connect_phy(void *upstream, struct phy_device *phy)
+ {
+ struct phylink *pl = upstream;
++ phy_interface_t interface = pl->link_config.interface;
+ int ret;
+
+ ret = phylink_attach_phy(pl, phy, pl->link_config.interface);
+ if (ret < 0)
+ return ret;
+
+- ret = phylink_bringup_phy(pl, phy);
++ /* Clause 45 PHYs switch their Serdes lane between several different
++ * modes, normally 10GBASE-R, SGMII. Some use 2500BASE-X for 2.5G
++ * speeds. We really need to know which interface modes the PHY and
++ * MAC supports to properly work out which linkmodes can be supported.
++ */
++ if (phy->is_c45)
++ interface = PHY_INTERFACE_MODE_NA;
++
++ ret = phylink_bringup_phy(pl, phy, interface);
+ if (ret)
+ phy_detach(phy);
+
diff --git a/target/linux/generic/pending-5.4/748-net-phylink-split-link_an_mode-configured-and-curren.patch b/target/linux/generic/pending-5.4/748-net-phylink-split-link_an_mode-configured-and-curren.patch
new file mode 100644
index 0000000000..f30d37f0c6
--- /dev/null
+++ b/target/linux/generic/pending-5.4/748-net-phylink-split-link_an_mode-configured-and-curren.patch
@@ -0,0 +1,254 @@
+From d1339d6956f0255b6ce2412328a98945be8cc3ca Mon Sep 17 00:00:00 2001
+From: Russell King <rmk+kernel@armlinux.org.uk>
+Date: Sat, 16 Nov 2019 11:30:18 +0000
+Subject: [PATCH 651/660] net: phylink: split link_an_mode configured and
+ current settings
+
+Split link_an_mode between the configured setting and the current
+operating setting. This is an important distinction to make when we
+need to configure PHY mode for a plugged SFP+ module that does not
+use in-band signalling.
+
+Signed-off-by: Russell King <rmk+kernel@armlinux.org.uk>
+---
+ drivers/net/phy/phylink.c | 59 ++++++++++++++++++++-------------------
+ 1 file changed, 31 insertions(+), 28 deletions(-)
+
+--- a/drivers/net/phy/phylink.c
++++ b/drivers/net/phy/phylink.c
+@@ -48,7 +48,8 @@ struct phylink {
+ unsigned long phylink_disable_state; /* bitmask of disables */
+ struct phy_device *phydev;
+ phy_interface_t link_interface; /* PHY_INTERFACE_xxx */
+- u8 link_an_mode; /* MLO_AN_xxx */
++ u8 cfg_link_an_mode; /* MLO_AN_xxx */
++ u8 cur_link_an_mode;
+ u8 link_port; /* The current non-phy ethtool port */
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(supported);
+
+@@ -253,12 +254,12 @@ static int phylink_parse_mode(struct phy
+
+ dn = fwnode_get_named_child_node(fwnode, "fixed-link");
+ if (dn || fwnode_property_present(fwnode, "fixed-link"))
+- pl->link_an_mode = MLO_AN_FIXED;
++ pl->cfg_link_an_mode = MLO_AN_FIXED;
+ fwnode_handle_put(dn);
+
+ if (fwnode_property_read_string(fwnode, "managed", &managed) == 0 &&
+ strcmp(managed, "in-band-status") == 0) {
+- if (pl->link_an_mode == MLO_AN_FIXED) {
++ if (pl->cfg_link_an_mode == MLO_AN_FIXED) {
+ netdev_err(pl->netdev,
+ "can't use both fixed-link and in-band-status\n");
+ return -EINVAL;
+@@ -270,7 +271,7 @@ static int phylink_parse_mode(struct phy
+ phylink_set(pl->supported, Asym_Pause);
+ phylink_set(pl->supported, Pause);
+ pl->link_config.an_enabled = true;
+- pl->link_an_mode = MLO_AN_INBAND;
++ pl->cfg_link_an_mode = MLO_AN_INBAND;
+
+ switch (pl->link_config.interface) {
+ case PHY_INTERFACE_MODE_SGMII:
+@@ -330,14 +331,14 @@ static void phylink_mac_config(struct ph
+ {
+ netdev_dbg(pl->netdev,
+ "%s: mode=%s/%s/%s/%s adv=%*pb pause=%02x link=%u an=%u\n",
+- __func__, phylink_an_mode_str(pl->link_an_mode),
++ __func__, phylink_an_mode_str(pl->cur_link_an_mode),
+ phy_modes(state->interface),
+ phy_speed_to_str(state->speed),
+ phy_duplex_to_str(state->duplex),
+ __ETHTOOL_LINK_MODE_MASK_NBITS, state->advertising,
+ state->pause, state->link, state->an_enabled);
+
+- pl->ops->mac_config(pl->netdev, pl->link_an_mode, state);
++ pl->ops->mac_config(pl->netdev, pl->cur_link_an_mode, state);
+ }
+
+ static void phylink_mac_config_up(struct phylink *pl,
+@@ -446,7 +447,7 @@ static void phylink_resolve(struct work_
+ } else if (pl->mac_link_dropped) {
+ link_state.link = false;
+ } else {
+- switch (pl->link_an_mode) {
++ switch (pl->cur_link_an_mode) {
+ case MLO_AN_PHY:
+ link_state = pl->phy_state;
+ phylink_resolve_flow(pl, &link_state);
+@@ -483,12 +484,12 @@ static void phylink_resolve(struct work_
+ if (link_state.link != netif_carrier_ok(ndev)) {
+ if (!link_state.link) {
+ netif_carrier_off(ndev);
+- pl->ops->mac_link_down(ndev, pl->link_an_mode,
++ pl->ops->mac_link_down(ndev, pl->cur_link_an_mode,
+ pl->cur_interface);
+ netdev_info(ndev, "Link is Down\n");
+ } else {
+ pl->cur_interface = link_state.interface;
+- pl->ops->mac_link_up(ndev, pl->link_an_mode,
++ pl->ops->mac_link_up(ndev, pl->cur_link_an_mode,
+ pl->cur_interface, pl->phydev);
+
+ netif_carrier_on(ndev);
+@@ -610,7 +611,7 @@ struct phylink *phylink_create(struct ne
+ return ERR_PTR(ret);
+ }
+
+- if (pl->link_an_mode == MLO_AN_FIXED) {
++ if (pl->cfg_link_an_mode == MLO_AN_FIXED) {
+ ret = phylink_parse_fixedlink(pl, fwnode);
+ if (ret < 0) {
+ kfree(pl);
+@@ -618,6 +619,8 @@ struct phylink *phylink_create(struct ne
+ }
+ }
+
++ pl->cur_link_an_mode = pl->cfg_link_an_mode;
++
+ ret = phylink_register_sfp(pl, fwnode);
+ if (ret < 0) {
+ kfree(pl);
+@@ -733,8 +736,8 @@ static int phylink_bringup_phy(struct ph
+ static int phylink_attach_phy(struct phylink *pl, struct phy_device *phy,
+ phy_interface_t interface)
+ {
+- if (WARN_ON(pl->link_an_mode == MLO_AN_FIXED ||
+- (pl->link_an_mode == MLO_AN_INBAND &&
++ if (WARN_ON(pl->cfg_link_an_mode == MLO_AN_FIXED ||
++ (pl->cfg_link_an_mode == MLO_AN_INBAND &&
+ phy_interface_mode_is_8023z(interface))))
+ return -EINVAL;
+
+@@ -801,8 +804,8 @@ int phylink_of_phy_connect(struct phylin
+ int ret;
+
+ /* Fixed links and 802.3z are handled without needing a PHY */
+- if (pl->link_an_mode == MLO_AN_FIXED ||
+- (pl->link_an_mode == MLO_AN_INBAND &&
++ if (pl->cfg_link_an_mode == MLO_AN_FIXED ||
++ (pl->cfg_link_an_mode == MLO_AN_INBAND &&
+ phy_interface_mode_is_8023z(pl->link_interface)))
+ return 0;
+
+@@ -813,7 +816,7 @@ int phylink_of_phy_connect(struct phylin
+ phy_node = of_parse_phandle(dn, "phy-device", 0);
+
+ if (!phy_node) {
+- if (pl->link_an_mode == MLO_AN_PHY)
++ if (pl->cfg_link_an_mode == MLO_AN_PHY)
+ return -ENODEV;
+ return 0;
+ }
+@@ -876,7 +879,7 @@ int phylink_fixed_state_cb(struct phylin
+ /* It does not make sense to let the link be overriden unless we use
+ * MLO_AN_FIXED
+ */
+- if (pl->link_an_mode != MLO_AN_FIXED)
++ if (pl->cfg_link_an_mode != MLO_AN_FIXED)
+ return -EINVAL;
+
+ mutex_lock(&pl->state_mutex);
+@@ -926,7 +929,7 @@ void phylink_start(struct phylink *pl)
+ ASSERT_RTNL();
+
+ netdev_info(pl->netdev, "configuring for %s/%s link mode\n",
+- phylink_an_mode_str(pl->link_an_mode),
++ phylink_an_mode_str(pl->cur_link_an_mode),
+ phy_modes(pl->link_config.interface));
+
+ /* Always set the carrier off */
+@@ -948,7 +951,7 @@ void phylink_start(struct phylink *pl)
+ clear_bit(PHYLINK_DISABLE_STOPPED, &pl->phylink_disable_state);
+ phylink_run_resolve(pl);
+
+- if (pl->link_an_mode == MLO_AN_FIXED && pl->link_gpio) {
++ if (pl->cfg_link_an_mode == MLO_AN_FIXED && pl->link_gpio) {
+ int irq = gpiod_to_irq(pl->link_gpio);
+
+ if (irq > 0) {
+@@ -963,7 +966,7 @@ void phylink_start(struct phylink *pl)
+ if (irq <= 0)
+ mod_timer(&pl->link_poll, jiffies + HZ);
+ }
+- if (pl->link_an_mode == MLO_AN_FIXED && pl->get_fixed_state)
++ if (pl->cfg_link_an_mode == MLO_AN_FIXED && pl->get_fixed_state)
+ mod_timer(&pl->link_poll, jiffies + HZ);
+ if (pl->phydev)
+ phy_start(pl->phydev);
+@@ -1090,7 +1093,7 @@ int phylink_ethtool_ksettings_get(struct
+
+ linkmode_copy(kset->link_modes.supported, pl->supported);
+
+- switch (pl->link_an_mode) {
++ switch (pl->cur_link_an_mode) {
+ case MLO_AN_FIXED:
+ /* We are using fixed settings. Report these as the
+ * current link settings - and note that these also
+@@ -1163,7 +1166,7 @@ int phylink_ethtool_ksettings_set(struct
+ /* If we have a fixed link (as specified by firmware), refuse
+ * to change link parameters.
+ */
+- if (pl->link_an_mode == MLO_AN_FIXED &&
++ if (pl->cur_link_an_mode == MLO_AN_FIXED &&
+ (s->speed != pl->link_config.speed ||
+ s->duplex != pl->link_config.duplex))
+ return -EINVAL;
+@@ -1175,7 +1178,7 @@ int phylink_ethtool_ksettings_set(struct
+ __clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, config.advertising);
+ } else {
+ /* If we have a fixed link, refuse to enable autonegotiation */
+- if (pl->link_an_mode == MLO_AN_FIXED)
++ if (pl->cur_link_an_mode == MLO_AN_FIXED)
+ return -EINVAL;
+
+ config.speed = SPEED_UNKNOWN;
+@@ -1217,7 +1220,7 @@ int phylink_ethtool_ksettings_set(struct
+ * configuration. For a fixed link, this isn't able to change any
+ * parameters, which just leaves inband mode.
+ */
+- if (pl->link_an_mode == MLO_AN_INBAND &&
++ if (pl->cur_link_an_mode == MLO_AN_INBAND &&
+ !test_bit(PHYLINK_DISABLE_STOPPED, &pl->phylink_disable_state)) {
+ phylink_mac_config(pl, &pl->link_config);
+ phylink_mac_an_restart(pl);
+@@ -1307,7 +1310,7 @@ int phylink_ethtool_set_pauseparam(struc
+ pause->tx_pause);
+ } else if (!test_bit(PHYLINK_DISABLE_STOPPED,
+ &pl->phylink_disable_state)) {
+- switch (pl->link_an_mode) {
++ switch (pl->cur_link_an_mode) {
+ case MLO_AN_FIXED:
+ /* Should we allow fixed links to change against the config? */
+ phylink_resolve_flow(pl, config);
+@@ -1496,7 +1499,7 @@ static int phylink_mii_read(struct phyli
+ struct phylink_link_state state;
+ int val = 0xffff;
+
+- switch (pl->link_an_mode) {
++ switch (pl->cur_link_an_mode) {
+ case MLO_AN_FIXED:
+ if (phy_id == 0) {
+ phylink_get_fixed_state(pl, &state);
+@@ -1524,7 +1527,7 @@ static int phylink_mii_read(struct phyli
+ static int phylink_mii_write(struct phylink *pl, unsigned int phy_id,
+ unsigned int reg, unsigned int val)
+ {
+- switch (pl->link_an_mode) {
++ switch (pl->cur_link_an_mode) {
+ case MLO_AN_FIXED:
+ break;
+
+@@ -1698,10 +1701,10 @@ static int phylink_sfp_module_insert(voi
+ linkmode_copy(pl->link_config.advertising, config.advertising);
+ }
+
+- if (pl->link_an_mode != MLO_AN_INBAND ||
++ if (pl->cur_link_an_mode != MLO_AN_INBAND ||
+ pl->link_config.interface != config.interface) {
+ pl->link_config.interface = config.interface;
+- pl->link_an_mode = MLO_AN_INBAND;
++ pl->cur_link_an_mode = MLO_AN_INBAND;
+
+ changed = true;
+
diff --git a/target/linux/generic/pending-5.4/749-net-phylink-split-phylink_sfp_module_insert.patch b/target/linux/generic/pending-5.4/749-net-phylink-split-phylink_sfp_module_insert.patch
new file mode 100644
index 0000000000..87d70d1434
--- /dev/null
+++ b/target/linux/generic/pending-5.4/749-net-phylink-split-phylink_sfp_module_insert.patch
@@ -0,0 +1,120 @@
+From 36569971241ae6b81376da4937d2c8760122d10b Mon Sep 17 00:00:00 2001
+From: Russell King <rmk+kernel@armlinux.org.uk>
+Date: Thu, 21 Nov 2019 17:58:58 +0000
+Subject: [PATCH 652/660] net: phylink: split phylink_sfp_module_insert()
+
+Split out the configuration step from phylink_sfp_module_insert() so
+we can re-use this later.
+
+Signed-off-by: Russell King <rmk+kernel@armlinux.org.uk>
+---
+ drivers/net/phy/phylink.c | 47 +++++++++++++++++++++++----------------
+ 1 file changed, 28 insertions(+), 19 deletions(-)
+
+--- a/drivers/net/phy/phylink.c
++++ b/drivers/net/phy/phylink.c
+@@ -1633,25 +1633,21 @@ static void phylink_sfp_detach(void *ups
+ pl->netdev->sfp_bus = NULL;
+ }
+
+-static int phylink_sfp_module_insert(void *upstream,
+- const struct sfp_eeprom_id *id)
++static int phylink_sfp_config(struct phylink *pl, u8 mode, u8 port,
++ const unsigned long *supported,
++ const unsigned long *advertising)
+ {
+- struct phylink *pl = upstream;
+- __ETHTOOL_DECLARE_LINK_MODE_MASK(support) = { 0, };
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(support1);
++ __ETHTOOL_DECLARE_LINK_MODE_MASK(support);
+ struct phylink_link_state config;
+ phy_interface_t iface;
+- int ret = 0;
+ bool changed;
+- u8 port;
++ int ret;
+
+- ASSERT_RTNL();
+-
+- sfp_parse_support(pl->sfp_bus, id, support);
+- port = sfp_parse_port(pl->sfp_bus, id, support);
++ linkmode_copy(support, supported);
+
+ memset(&config, 0, sizeof(config));
+- linkmode_copy(config.advertising, support);
++ linkmode_copy(config.advertising, advertising);
+ config.interface = PHY_INTERFACE_MODE_NA;
+ config.speed = SPEED_UNKNOWN;
+ config.duplex = DUPLEX_UNKNOWN;
+@@ -1666,8 +1662,6 @@ static int phylink_sfp_module_insert(voi
+ return ret;
+ }
+
+- linkmode_copy(support1, support);
+-
+ iface = sfp_select_interface(pl->sfp_bus, config.advertising);
+ if (iface == PHY_INTERFACE_MODE_NA) {
+ netdev_err(pl->netdev,
+@@ -1677,18 +1671,18 @@ static int phylink_sfp_module_insert(voi
+ }
+
+ config.interface = iface;
++ linkmode_copy(support1, support);
+ ret = phylink_validate(pl, support1, &config);
+ if (ret) {
+ netdev_err(pl->netdev, "validation of %s/%s with support %*pb failed: %d\n",
+- phylink_an_mode_str(MLO_AN_INBAND),
++ phylink_an_mode_str(mode),
+ phy_modes(config.interface),
+ __ETHTOOL_LINK_MODE_MASK_NBITS, support, ret);
+ return ret;
+ }
+
+ netdev_dbg(pl->netdev, "requesting link mode %s/%s with support %*pb\n",
+- phylink_an_mode_str(MLO_AN_INBAND),
+- phy_modes(config.interface),
++ phylink_an_mode_str(mode), phy_modes(config.interface),
+ __ETHTOOL_LINK_MODE_MASK_NBITS, support);
+
+ if (phy_interface_mode_is_8023z(iface) && pl->phydev)
+@@ -1701,15 +1695,15 @@ static int phylink_sfp_module_insert(voi
+ linkmode_copy(pl->link_config.advertising, config.advertising);
+ }
+
+- if (pl->cur_link_an_mode != MLO_AN_INBAND ||
++ if (pl->cur_link_an_mode != mode ||
+ pl->link_config.interface != config.interface) {
+ pl->link_config.interface = config.interface;
+- pl->cur_link_an_mode = MLO_AN_INBAND;
++ pl->cur_link_an_mode = mode;
+
+ changed = true;
+
+ netdev_info(pl->netdev, "switched to %s/%s link mode\n",
+- phylink_an_mode_str(MLO_AN_INBAND),
++ phylink_an_mode_str(mode),
+ phy_modes(config.interface));
+ }
+
+@@ -1722,6 +1716,21 @@ static int phylink_sfp_module_insert(voi
+ return ret;
+ }
+
++static int phylink_sfp_module_insert(void *upstream,
++ const struct sfp_eeprom_id *id)
++{
++ struct phylink *pl = upstream;
++ __ETHTOOL_DECLARE_LINK_MODE_MASK(support) = { 0, };
++ u8 port;
++
++ ASSERT_RTNL();
++
++ sfp_parse_support(pl->sfp_bus, id, support);
++ port = sfp_parse_port(pl->sfp_bus, id, support);
++
++ return phylink_sfp_config(pl, MLO_AN_INBAND, port, support, support);
++}
++
+ static int phylink_sfp_module_start(void *upstream)
+ {
+ struct phylink *pl = upstream;
diff --git a/target/linux/generic/pending-5.4/750-net-phylink-delay-MAC-configuration-for-copper-SFP-m.patch b/target/linux/generic/pending-5.4/750-net-phylink-delay-MAC-configuration-for-copper-SFP-m.patch
new file mode 100644
index 0000000000..358b9a1082
--- /dev/null
+++ b/target/linux/generic/pending-5.4/750-net-phylink-delay-MAC-configuration-for-copper-SFP-m.patch
@@ -0,0 +1,204 @@
+From eb514428f75bc67d12ff019c44a8f8ca9f33c54c Mon Sep 17 00:00:00 2001
+From: Russell King <rmk+kernel@armlinux.org.uk>
+Date: Thu, 21 Nov 2019 17:42:49 +0000
+Subject: [PATCH 653/660] net: phylink: delay MAC configuration for copper SFP
+ modules
+
+Knowing whether we need to delay the MAC configuration because a module
+may have a PHY is useful to phylink to allow NBASE-T modules to work on
+systems supporting no more than 2.5G speeds.
+
+This commit allows us to delay such configuration until after the PHY
+has been probed by recording the parsed capabilities, and if the module
+may have a PHY, doing no more until the module_start() notification is
+called. At that point, we either have a PHY, or we don't.
+
+We move the PHY-based setup a little later, and use the PHYs support
+capabilities rather than the EEPROM parsed capabilities to determine
+whether we can support the PHY.
+
+Signed-off-by: Russell King <rmk+kernel@armlinux.org.uk>
+---
+ drivers/net/phy/phylink.c | 59 +++++++++++++++++++++++++++++++--------
+ drivers/net/phy/sfp-bus.c | 28 +++++++++++++++++++
+ include/linux/sfp.h | 7 +++++
+ 3 files changed, 83 insertions(+), 11 deletions(-)
+
+--- a/drivers/net/phy/phylink.c
++++ b/drivers/net/phy/phylink.c
+@@ -72,6 +72,9 @@ struct phylink {
+ bool mac_link_dropped;
+
+ struct sfp_bus *sfp_bus;
++ bool sfp_may_have_phy;
++ __ETHTOOL_DECLARE_LINK_MODE_MASK(sfp_support);
++ u8 sfp_port;
+ };
+
+ static inline void linkmode_zero(unsigned long *dst)
+@@ -1633,7 +1636,7 @@ static void phylink_sfp_detach(void *ups
+ pl->netdev->sfp_bus = NULL;
+ }
+
+-static int phylink_sfp_config(struct phylink *pl, u8 mode, u8 port,
++static int phylink_sfp_config(struct phylink *pl, u8 mode,
+ const unsigned long *supported,
+ const unsigned long *advertising)
+ {
+@@ -1707,7 +1710,7 @@ static int phylink_sfp_config(struct phy
+ phy_modes(config.interface));
+ }
+
+- pl->link_port = port;
++ pl->link_port = pl->sfp_port;
+
+ if (changed && !test_bit(PHYLINK_DISABLE_STOPPED,
+ &pl->phylink_disable_state))
+@@ -1720,15 +1723,20 @@ static int phylink_sfp_module_insert(voi
+ const struct sfp_eeprom_id *id)
+ {
+ struct phylink *pl = upstream;
+- __ETHTOOL_DECLARE_LINK_MODE_MASK(support) = { 0, };
+- u8 port;
++ unsigned long *support = pl->sfp_support;
+
+ ASSERT_RTNL();
+
++ linkmode_zero(support);
+ sfp_parse_support(pl->sfp_bus, id, support);
+- port = sfp_parse_port(pl->sfp_bus, id, support);
++ pl->sfp_port = sfp_parse_port(pl->sfp_bus, id, support);
+
+- return phylink_sfp_config(pl, MLO_AN_INBAND, port, support, support);
++ /* If this module may have a PHY connecting later, defer until later */
++ pl->sfp_may_have_phy = sfp_may_have_phy(pl->sfp_bus, id);
++ if (pl->sfp_may_have_phy)
++ return 0;
++
++ return phylink_sfp_config(pl, MLO_AN_INBAND, support, support);
+ }
+
+ static int phylink_sfp_module_start(void *upstream)
+@@ -1736,10 +1744,19 @@ static int phylink_sfp_module_start(void
+ struct phylink *pl = upstream;
+
+ /* If this SFP module has a PHY, start the PHY now. */
+- if (pl->phydev)
++ if (pl->phydev) {
+ phy_start(pl->phydev);
+-
+- return 0;
++ return 0;
++ }
++
++ /* If the module may have a PHY but we didn't detect one we
++ * need to configure the MAC here.
++ */
++ if (!pl->sfp_may_have_phy)
++ return 0;
++
++ return phylink_sfp_config(pl, MLO_AN_INBAND,
++ pl->sfp_support, pl->sfp_support);
+ }
+
+ static void phylink_sfp_module_stop(void *upstream)
+@@ -1773,10 +1790,30 @@ static void phylink_sfp_link_up(void *up
+ static int phylink_sfp_connect_phy(void *upstream, struct phy_device *phy)
+ {
+ struct phylink *pl = upstream;
+- phy_interface_t interface = pl->link_config.interface;
++ __ETHTOOL_DECLARE_LINK_MODE_MASK(supported);
++ __ETHTOOL_DECLARE_LINK_MODE_MASK(advertising);
++ phy_interface_t interface;
+ int ret;
+
+- ret = phylink_attach_phy(pl, phy, pl->link_config.interface);
++ /*
++ * This is the new way of dealing with flow control for PHYs,
++ * as described by Timur Tabi in commit 529ed1275263 ("net: phy:
++ * phy drivers should not set SUPPORTED_[Asym_]Pause") except
++ * using our validate call to the MAC, we rely upon the MAC
++ * clearing the bits from both supported and advertising fields.
++ */
++ phy_support_asym_pause(phy);
++
++ ethtool_convert_legacy_u32_to_link_mode(supported, phy->supported);
++ ethtool_convert_legacy_u32_to_link_mode(advertising, phy->advertising);
++
++ /* Do the initial configuration */
++ ret = phylink_sfp_config(pl, ML_AN_INBAND, supported, advertising);
++ if (ret < 0)
++ return ret;
++
++ interface = pl->link_config.interface;
++ ret = phylink_attach_phy(pl, phy, interface);
+ if (ret < 0)
+ return ret;
+
+--- a/drivers/net/phy/sfp-bus.c
++++ b/drivers/net/phy/sfp-bus.c
+@@ -102,6 +102,7 @@ static const struct sfp_quirk *sfp_looku
+
+ return NULL;
+ }
++
+ /**
+ * sfp_parse_port() - Parse the EEPROM base ID, setting the port type
+ * @bus: a pointer to the &struct sfp_bus structure for the sfp module
+@@ -178,6 +179,33 @@ int sfp_parse_port(struct sfp_bus *bus,
+ EXPORT_SYMBOL_GPL(sfp_parse_port);
+
+ /**
++ * sfp_may_have_phy() - indicate whether the module may have a PHY
++ * @bus: a pointer to the &struct sfp_bus structure for the sfp module
++ * @id: a pointer to the module's &struct sfp_eeprom_id
++ *
++ * Parse the EEPROM identification given in @id, and return whether
++ * this module may have a PHY.
++ */
++bool sfp_may_have_phy(struct sfp_bus *bus, const struct sfp_eeprom_id *id)
++{
++ if (id->base.e1000_base_t)
++ return true;
++
++ if (id->base.phys_id != SFF8024_ID_DWDM_SFP) {
++ switch (id->base.extended_cc) {
++ case SFF8024_ECC_10GBASE_T_SFI:
++ case SFF8024_ECC_10GBASE_T_SR:
++ case SFF8024_ECC_5GBASE_T:
++ case SFF8024_ECC_2_5GBASE_T:
++ return true;
++ }
++ }
++
++ return false;
++}
++EXPORT_SYMBOL_GPL(sfp_may_have_phy);
++
++/**
+ * sfp_parse_support() - Parse the eeprom id for supported link modes
+ * @bus: a pointer to the &struct sfp_bus structure for the sfp module
+ * @id: a pointer to the module's &struct sfp_eeprom_id
+--- a/include/linux/sfp.h
++++ b/include/linux/sfp.h
+@@ -533,6 +533,7 @@ struct sfp_upstream_ops {
+ #if IS_ENABLED(CONFIG_SFP)
+ int sfp_parse_port(struct sfp_bus *bus, const struct sfp_eeprom_id *id,
+ unsigned long *support);
++bool sfp_may_have_phy(struct sfp_bus *bus, const struct sfp_eeprom_id *id);
+ void sfp_parse_support(struct sfp_bus *bus, const struct sfp_eeprom_id *id,
+ unsigned long *support);
+ phy_interface_t sfp_select_interface(struct sfp_bus *bus,
+@@ -556,6 +557,12 @@ static inline int sfp_parse_port(struct
+ return PORT_OTHER;
+ }
+
++static inline bool sfp_may_have_phy(struct sfp_bus *bus,
++ const struct sfp_eeprom_id *id)
++{
++ return false;
++}
++
+ static inline void sfp_parse_support(struct sfp_bus *bus,
+ const struct sfp_eeprom_id *id,
+ unsigned long *support)
diff --git a/target/linux/generic/pending-5.4/751-net-phylink-make-Broadcom-BCM84881-based-SFPs-work.patch b/target/linux/generic/pending-5.4/751-net-phylink-make-Broadcom-BCM84881-based-SFPs-work.patch
new file mode 100644
index 0000000000..1724d445b7
--- /dev/null
+++ b/target/linux/generic/pending-5.4/751-net-phylink-make-Broadcom-BCM84881-based-SFPs-work.patch
@@ -0,0 +1,58 @@
+From 3d8592a23dd67fb78ad85ddf711a059d3880fcb4 Mon Sep 17 00:00:00 2001
+From: Russell King <rmk+kernel@armlinux.org.uk>
+Date: Fri, 8 Nov 2019 17:19:16 +0000
+Subject: [PATCH 654/660] net: phylink: make Broadcom BCM84881 based SFPs work
+
+The Broadcom BCM84881 does not appear to send the SGMII control word
+when operating in SGMII mode, which causes network adapters to fail
+to link with the PHY, or decide to operate at fixed 1G speed, even if
+the PHY negotiated 100M.
+
+Work around this by detecting the Broadcom BCM84881 and switch to phy
+mode rather than inband mode.
+
+Signed-off-by: Russell King <rmk+kernel@armlinux.org.uk>
+---
+ drivers/net/phy/phylink.c | 17 ++++++++++++++++-
+ 1 file changed, 16 insertions(+), 1 deletion(-)
+
+--- a/drivers/net/phy/phylink.c
++++ b/drivers/net/phy/phylink.c
+@@ -1787,12 +1787,22 @@ static void phylink_sfp_link_up(void *up
+ phylink_run_resolve(pl);
+ }
+
++/* The Broadcom BCM84881 in the Methode DM7052 is unable to provide a SGMII
++ * or 802.3z control word, so inband will not work.
++ */
++static bool phylink_phy_no_inband(struct phy_device *phy)
++{
++ return phy->is_c45 &&
++ (phy->c45_ids.device_ids[1] & 0xfffffff0) == 0xae025150;
++}
++
+ static int phylink_sfp_connect_phy(void *upstream, struct phy_device *phy)
+ {
+ struct phylink *pl = upstream;
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(supported);
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(advertising);
+ phy_interface_t interface;
++ u8 mode;
+ int ret;
+
+ /*
+@@ -1807,8 +1817,13 @@ static int phylink_sfp_connect_phy(void
+ ethtool_convert_legacy_u32_to_link_mode(supported, phy->supported);
+ ethtool_convert_legacy_u32_to_link_mode(advertising, phy->advertising);
+
++ if (phylink_phy_no_inband(phy))
++ mode = MLO_AN_PHY;
++ else
++ mode = MLO_AN_INBAND;
++
+ /* Do the initial configuration */
+- ret = phylink_sfp_config(pl, ML_AN_INBAND, supported, advertising);
++ ret = phylink_sfp_config(pl, mode, supported, advertising);
+ if (ret < 0)
+ return ret;
+
diff --git a/target/linux/generic/pending-5.4/752-net-phy-add-Broadcom-BCM84881-PHY-driver.patch b/target/linux/generic/pending-5.4/752-net-phy-add-Broadcom-BCM84881-PHY-driver.patch
new file mode 100644
index 0000000000..a69cf397aa
--- /dev/null
+++ b/target/linux/generic/pending-5.4/752-net-phy-add-Broadcom-BCM84881-PHY-driver.patch
@@ -0,0 +1,333 @@
+From 0f669e10ede7f06bb998373de6a9d169f47fcc66 Mon Sep 17 00:00:00 2001
+From: Russell King <rmk+kernel@armlinux.org.uk>
+Date: Tue, 5 Nov 2019 11:54:30 +0000
+Subject: [PATCH 655/660] net: phy: add Broadcom BCM84881 PHY driver
+
+Add a rudimentary Clause 45 driver for the BCM84881 PHY, found on
+Methode DM7052 SFPs.
+
+Signed-off-by: Russell King <rmk+kernel@armlinux.org.uk>
+---
+ drivers/net/phy/Kconfig | 5 +
+ drivers/net/phy/Makefile | 1 +
+ drivers/net/phy/bcm84881.c | 290 +++++++++++++++++++++++++++++++++++++
+ 3 files changed, 296 insertions(+)
+ create mode 100644 drivers/net/phy/bcm84881.c
+
+--- a/drivers/net/phy/Kconfig
++++ b/drivers/net/phy/Kconfig
+@@ -280,6 +280,11 @@ config BROADCOM_PHY
+ Currently supports the BCM5411, BCM5421, BCM5461, BCM54616S, BCM5464,
+ BCM5481, BCM54810 and BCM5482 PHYs.
+
++config BCM84881_PHY
++ tristate "Broadcom BCM84881 PHY"
++ ---help---
++ Support the Broadcom BCM84881 PHY.
++
+ config CICADA_PHY
+ tristate "Cicada PHYs"
+ ---help---
+--- a/drivers/net/phy/Makefile
++++ b/drivers/net/phy/Makefile
+@@ -54,6 +54,7 @@ obj-$(CONFIG_BCM87XX_PHY) += bcm87xx.o
+ obj-$(CONFIG_BCM_CYGNUS_PHY) += bcm-cygnus.o
+ obj-$(CONFIG_BCM_NET_PHYLIB) += bcm-phy-lib.o
+ obj-$(CONFIG_BROADCOM_PHY) += broadcom.o
++obj-$(CONFIG_BCM84881_PHY) += bcm84881.o
+ obj-$(CONFIG_CICADA_PHY) += cicada.o
+ obj-$(CONFIG_CORTINA_PHY) += cortina.o
+ obj-$(CONFIG_DAVICOM_PHY) += davicom.o
+--- /dev/null
++++ b/drivers/net/phy/bcm84881.c
+@@ -0,0 +1,290 @@
++// SPDX-License-Identifier: GPL-2.0
++// Broadcom BCM84881 NBASE-T PHY driver, as found on a SFP+ module.
++// Copyright (C) 2019 Russell King, Deep Blue Solutions Ltd.
++//
++// Like the Marvell 88x3310, the Broadcom 84881 changes its host-side
++// interface according to the operating speed between 10GBASE-R,
++// 2500BASE-X and SGMII (but unlike the 88x3310, without the control
++// word).
++//
++// This driver only supports those aspects of the PHY that I'm able to
++// observe and test with the SFP+ module, which is an incomplete subset
++// of what this PHY is able to support. For example, I only assume it
++// supports a single lane Serdes connection, but it may be that the PHY
++// is able to support more than that.
++#include <linux/delay.h>
++#include <linux/module.h>
++#include <linux/phy.h>
++
++enum {
++ MDIO_AN_C22 = 0xffe0,
++};
++
++static int bcm84881_wait_init(struct phy_device *phydev)
++{
++ unsigned int tries = 20;
++ int ret, val;
++
++ do {
++ val = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_CTRL1);
++ if (val < 0) {
++ ret = val;
++ break;
++ }
++ if (!(val & MDIO_CTRL1_RESET)) {
++ ret = 0;
++ break;
++ }
++ if (!--tries) {
++ ret = -ETIMEDOUT;
++ break;
++ }
++ msleep(100);
++ } while (1);
++
++ if (ret)
++ phydev_err(phydev, "%s failed: %d\n", __func__, ret);
++
++ return ret;
++}
++
++static int bcm84881_config_init(struct phy_device *phydev)
++{
++ switch (phydev->interface) {
++ case PHY_INTERFACE_MODE_SGMII:
++ case PHY_INTERFACE_MODE_2500BASEX:
++ case PHY_INTERFACE_MODE_10GKR:
++ break;
++ default:
++ return -ENODEV;
++ }
++ return 0;
++}
++
++static int bcm84881_probe(struct phy_device *phydev)
++{
++ /* This driver requires PMAPMD and AN blocks */
++ const u32 mmd_mask = MDIO_DEVS_PMAPMD | MDIO_DEVS_AN;
++
++ if (!phydev->is_c45 ||
++ (phydev->c45_ids.devices_in_package & mmd_mask) != mmd_mask)
++ return -ENODEV;
++
++ return 0;
++}
++
++static int genphy_c45_an_config_aneg(struct phy_device *phydev)
++{
++ bool changed = false;
++ u32 advertising;
++ int ret;
++
++ phydev->advertising &= phydev->supported;
++ advertising = phydev->advertising;
++
++ ret = phy_modify_mmd_changed(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE,
++ ADVERTISE_ALL | ADVERTISE_100BASE4 |
++ ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM,
++ ethtool_adv_to_mii_adv_t(advertising));
++ if (ret < 0)
++ return ret;
++ if (ret > 0)
++ changed = true;
++
++ ret = phy_modify_mmd_changed(phydev, MDIO_MMD_AN, MDIO_AN_10GBT_CTRL,
++ MDIO_AN_10GBT_CTRL_ADV10G,
++ advertising & ADVERTISED_10000baseT_Full ?
++ MDIO_AN_10GBT_CTRL_ADV10G : 0);
++ if (ret < 0)
++ return ret;
++ if (ret > 0)
++ changed = true;
++
++ return genphy_c45_check_and_restart_aneg(phydev, changed);
++}
++
++static int bcm84881_config_aneg(struct phy_device *phydev)
++{
++ bool changed = false;
++ u32 adv;
++ int ret;
++
++ /* Wait for the PHY to finish initialising, otherwise our
++ * advertisement may be overwritten.
++ */
++ ret = bcm84881_wait_init(phydev);
++ if (ret)
++ return ret;
++
++ /* We don't support manual MDI control */
++ phydev->mdix_ctrl = ETH_TP_MDI_AUTO;
++
++ /* disabled autoneg doesn't seem to work with this PHY */
++ if (phydev->autoneg == AUTONEG_DISABLE)
++ return -EINVAL;
++
++ ret = genphy_c45_an_config_aneg(phydev);
++ if (ret < 0)
++ return ret;
++ if (ret > 0)
++ changed = true;
++
++ adv = ethtool_adv_to_mii_ctrl1000_t(phydev->advertising);
++ ret = phy_modify_mmd_changed(phydev, MDIO_MMD_AN,
++ MDIO_AN_C22 + MII_CTRL1000,
++ ADVERTISE_1000FULL | ADVERTISE_1000HALF,
++ adv);
++ if (ret < 0)
++ return ret;
++ if (ret > 0)
++ changed = true;
++
++ return genphy_c45_check_and_restart_aneg(phydev, changed);
++}
++
++static int bcm84881_aneg_done(struct phy_device *phydev)
++{
++ int bmsr, val;
++
++ val = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_STAT1);
++ if (val < 0)
++ return val;
++
++ bmsr = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_C22 + MII_BMSR);
++ if (bmsr < 0)
++ return val;
++
++ return !!(val & MDIO_AN_STAT1_COMPLETE) &&
++ !!(bmsr & BMSR_ANEGCOMPLETE);
++}
++
++static int bcm84881_read_status(struct phy_device *phydev)
++{
++ bool autoneg_complete;
++ unsigned int mode;
++ int bmsr, val;
++
++ val = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_CTRL1);
++ if (val < 0)
++ return val;
++
++ if (val & MDIO_AN_CTRL1_RESTART) {
++ phydev->link = 0;
++ return 0;
++ }
++
++ val = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_STAT1);
++ if (val < 0)
++ return val;
++
++ bmsr = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_C22 + MII_BMSR);
++ if (bmsr < 0)
++ return val;
++
++ autoneg_complete = !!(val & MDIO_AN_STAT1_COMPLETE) &&
++ !!(bmsr & BMSR_ANEGCOMPLETE);
++ phydev->link = !!(val & MDIO_STAT1_LSTATUS) &&
++ !!(bmsr & BMSR_LSTATUS);
++ if (phydev->autoneg == AUTONEG_ENABLE && !autoneg_complete)
++ phydev->link = false;
++
++ if (!phydev->link)
++ return 0;
++
++ phydev->lp_advertising = 0;
++ phydev->speed = SPEED_UNKNOWN;
++ phydev->duplex = DUPLEX_UNKNOWN;
++ phydev->pause = 0;
++ phydev->asym_pause = 0;
++ phydev->mdix = 0;
++
++ if (autoneg_complete) {
++ val = genphy_c45_read_lpa(phydev);
++ if (val < 0)
++ return val;
++
++ val = phy_read_mmd(phydev, MDIO_MMD_AN,
++ MDIO_AN_C22 + MII_STAT1000);
++ if (val < 0)
++ return val;
++
++ phydev->lp_advertising |= mii_stat1000_to_ethtool_lpa_t(val);
++
++ if (phydev->autoneg == AUTONEG_ENABLE)
++ phy_resolve_aneg_linkmode(phydev);
++ }
++
++ if (phydev->autoneg == AUTONEG_DISABLE) {
++ /* disabled autoneg doesn't seem to work, so force the link
++ * down.
++ */
++ phydev->link = 0;
++ return 0;
++ }
++
++ /* Set the host link mode - we set the phy interface mode and
++ * the speed according to this register so that downshift works.
++ * We leave the duplex setting as per the resolution from the
++ * above.
++ */
++ val = phy_read_mmd(phydev, MDIO_MMD_VEND1, 0x4011);
++ mode = (val & 0x1e) >> 1;
++ if (mode == 1 || mode == 2)
++ phydev->interface = PHY_INTERFACE_MODE_SGMII;
++ else if (mode == 3)
++ phydev->interface = PHY_INTERFACE_MODE_10GKR;
++ else if (mode == 4)
++ phydev->interface = PHY_INTERFACE_MODE_2500BASEX;
++ switch (mode & 7) {
++ case 1:
++ phydev->speed = SPEED_100;
++ break;
++ case 2:
++ phydev->speed = SPEED_1000;
++ break;
++ case 3:
++ phydev->speed = SPEED_10000;
++ break;
++ case 4:
++ phydev->speed = SPEED_2500;
++ break;
++ case 5:
++ phydev->speed = SPEED_5000;
++ break;
++ }
++
++ return genphy_c45_read_mdix(phydev);
++}
++
++static struct phy_driver bcm84881_drivers[] = {
++ {
++ .phy_id = 0xae025150,
++ .phy_id_mask = 0xfffffff0,
++ .name = "Broadcom BCM84881",
++ .features = SUPPORTED_100baseT_Full |
++ SUPPORTED_100baseT_Half |
++ SUPPORTED_1000baseT_Full |
++ SUPPORTED_Autoneg |
++ SUPPORTED_TP |
++ SUPPORTED_FIBRE |
++ SUPPORTED_10000baseT_Full |
++ SUPPORTED_Backplane,
++ .config_init = bcm84881_config_init,
++ .probe = bcm84881_probe,
++ .config_aneg = bcm84881_config_aneg,
++ .aneg_done = bcm84881_aneg_done,
++ .read_status = bcm84881_read_status,
++ },
++};
++
++module_phy_driver(bcm84881_drivers);
++
++/* FIXME: module auto-loading for Clause 45 PHYs seems non-functional */
++static struct mdio_device_id __maybe_unused bcm84881_tbl[] = {
++ { 0xae025150, 0xfffffff0 },
++ { },
++};
++MODULE_AUTHOR("Russell King");
++MODULE_DESCRIPTION("Broadcom BCM84881 PHY driver");
++MODULE_DEVICE_TABLE(mdio, bcm84881_tbl);
++MODULE_LICENSE("GPL");
diff --git a/target/linux/generic/pending-5.4/753-net-sfp-add-support-for-Clause-45-PHYs.patch b/target/linux/generic/pending-5.4/753-net-sfp-add-support-for-Clause-45-PHYs.patch
new file mode 100644
index 0000000000..ea26770841
--- /dev/null
+++ b/target/linux/generic/pending-5.4/753-net-sfp-add-support-for-Clause-45-PHYs.patch
@@ -0,0 +1,94 @@
+From 6df6709dc3d00e0bc948d45dfa8d8f18ba379c48 Mon Sep 17 00:00:00 2001
+From: Russell King <rmk+kernel@armlinux.org.uk>
+Date: Tue, 5 Nov 2019 11:56:18 +0000
+Subject: [PATCH 656/660] net: sfp: add support for Clause 45 PHYs
+
+Some SFP+ modules have a Clause 45 PHY onboard, which is accessible via
+the normal I2C address. Detect 10G BASE-T PHYs which may have an
+accessible PHY and probe for it.
+
+Signed-off-by: Russell King <rmk+kernel@armlinux.org.uk>
+---
+ drivers/net/phy/sfp.c | 44 +++++++++++++++++++++++++++++++++++++++----
+ 1 file changed, 40 insertions(+), 4 deletions(-)
+
+--- a/drivers/net/phy/sfp.c
++++ b/drivers/net/phy/sfp.c
+@@ -1337,12 +1337,12 @@ static void sfp_sm_phy_detach(struct sfp
+ sfp->mod_phy = NULL;
+ }
+
+-static void sfp_sm_probe_phy(struct sfp *sfp)
++static void sfp_sm_probe_phy(struct sfp *sfp, bool is_c45)
+ {
+ struct phy_device *phy;
+ int err;
+
+- phy = mdiobus_scan(sfp->i2c_mii, SFP_PHY_ADDR);
++ phy = get_phy_device(sfp->i2c_mii, SFP_PHY_ADDR, is_c45);
+ if (phy == ERR_PTR(-ENODEV)) {
+ dev_info(sfp->dev, "no PHY detected\n");
+ return;
+@@ -1352,6 +1352,13 @@ static void sfp_sm_probe_phy(struct sfp
+ return;
+ }
+
++ err = phy_device_register(phy);
++ if (err) {
++ phy_device_free(phy);
++ dev_err(sfp->dev, "phy_device_register failed: %d\n", err);
++ return;
++ }
++
+ err = sfp_add_phy(sfp->sfp_bus, phy);
+ if (err) {
+ phy_device_remove(phy);
+@@ -1422,10 +1429,32 @@ static void sfp_sm_fault(struct sfp *sfp
+ }
+ }
+
++/* Probe a SFP for a PHY device if the module supports copper - the PHY
++ * normally sits at I2C bus address 0x56, and may either be a clause 22
++ * or clause 45 PHY.
++ *
++ * Clause 22 copper SFP modules normally operate in Cisco SGMII mode with
++ * negotiation enabled, but some may be in 1000base-X - which is for the
++ * PHY driver to determine.
++ *
++ * Clause 45 copper SFP+ modules (10G) appear to switch their interface
++ * mode according to the negotiated line speed.
++ */
+ static void sfp_sm_probe_for_phy(struct sfp *sfp)
+ {
+- if (sfp->id.base.e1000_base_t)
+- sfp_sm_probe_phy(sfp);
++ switch (sfp->id.base.extended_cc) {
++ case SFF8024_ECC_10GBASE_T_SFI:
++ case SFF8024_ECC_10GBASE_T_SR:
++ case SFF8024_ECC_5GBASE_T:
++ case SFF8024_ECC_2_5GBASE_T:
++ sfp_sm_probe_phy(sfp, true);
++ break;
++
++ default:
++ if (sfp->id.base.e1000_base_t)
++ sfp_sm_probe_phy(sfp, false);
++ break;
++ }
+ }
+
+ static int sfp_module_parse_power(struct sfp *sfp)
+@@ -1485,6 +1514,13 @@ static int sfp_sm_mod_hpower(struct sfp
+ return -EAGAIN;
+ }
+
++ /* DM7052 reports as a high power module, responds to reads (with
++ * all bytes 0xff) at 0x51 but does not accept writes. In any case,
++ * if the bit is already set, we're already in high power mode.
++ */
++ if (!!(val & BIT(0)) == enable)
++ return 0;
++
+ if (enable)
+ val |= BIT(0);
+ else
diff --git a/target/linux/generic/pending-5.4/754-net-sfp-fix-unbind.patch b/target/linux/generic/pending-5.4/754-net-sfp-fix-unbind.patch
new file mode 100644
index 0000000000..55fabafa81
--- /dev/null
+++ b/target/linux/generic/pending-5.4/754-net-sfp-fix-unbind.patch
@@ -0,0 +1,28 @@
+From 729fd05aac22cdf1e502fbf1bf80e5ebba0d9fbc Mon Sep 17 00:00:00 2001
+From: Russell King <rmk+kernel@armlinux.org.uk>
+Date: Tue, 3 Dec 2019 17:48:28 +0000
+Subject: [PATCH] net: sfp: fix unbind
+
+When unbinding, we don't correctly tear down the module state, leaving
+(for example) the hwmon registration behind. Ensure everything is
+properly removed by sending a remove event at unbind.
+
+Fixes: 6b0da5c9c1a3 ("net: sfp: track upstream's attachment state in state machine")
+Signed-off-by: Russell King <rmk+kernel@armlinux.org.uk>
+---
+ drivers/net/phy/sfp.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+--- a/drivers/net/phy/sfp.c
++++ b/drivers/net/phy/sfp.c
+@@ -2260,6 +2260,10 @@ static int sfp_remove(struct platform_de
+
+ sfp_unregister_socket(sfp->sfp_bus);
+
++ rtnl_lock();
++ sfp_sm_event(sfp, SFP_E_REMOVE);
++ rtnl_unlock();
++
+ return 0;
+ }
+
diff --git a/target/linux/generic/pending-5.4/755-net-sfp-fix-hwmon.patch b/target/linux/generic/pending-5.4/755-net-sfp-fix-hwmon.patch
new file mode 100644
index 0000000000..d003df4e13
--- /dev/null
+++ b/target/linux/generic/pending-5.4/755-net-sfp-fix-hwmon.patch
@@ -0,0 +1,44 @@
+From 5eb0df5023c6ae8a71a7848fd5e1f788d86e51ae Mon Sep 17 00:00:00 2001
+From: Russell King <rmk+kernel@armlinux.org.uk>
+Date: Tue, 3 Dec 2019 18:46:04 +0000
+Subject: [PATCH] net: sfp: fix hwmon
+
+The referenced commit below allowed more than one hwmon device to be
+created per SFP, which is definitely not what we want. Avoid this by
+only creating the hwmon device just as we transition to WAITDEV state.
+
+Fixes: 139d3a212a1f ("net: sfp: allow modules with slow diagnostics to probe")
+Signed-off-by: Russell King <rmk+kernel@armlinux.org.uk>
+---
+ drivers/net/phy/sfp.c | 13 ++++---------
+ 1 file changed, 4 insertions(+), 9 deletions(-)
+
+--- a/drivers/net/phy/sfp.c
++++ b/drivers/net/phy/sfp.c
+@@ -1731,6 +1731,10 @@ static void sfp_sm_module(struct sfp *sf
+ break;
+ }
+
++ err = sfp_hwmon_insert(sfp);
++ if (err)
++ dev_warn(sfp->dev, "hwmon probe failed: %d\n", err);
++
+ sfp_sm_mod_next(sfp, SFP_MOD_WAITDEV, 0);
+ /* fall through */
+ case SFP_MOD_WAITDEV:
+@@ -1780,15 +1784,6 @@ static void sfp_sm_module(struct sfp *sf
+ case SFP_MOD_ERROR:
+ break;
+ }
+-
+-#if IS_ENABLED(CONFIG_HWMON)
+- if (sfp->sm_mod_state >= SFP_MOD_WAITDEV &&
+- IS_ERR_OR_NULL(sfp->hwmon_dev)) {
+- err = sfp_hwmon_insert(sfp);
+- if (err)
+- dev_warn(sfp->dev, "hwmon probe failed: %d\n", err);
+- }
+-#endif
+ }
+
+ static void sfp_sm_main(struct sfp *sfp, unsigned int event)
diff --git a/target/linux/generic/pending-5.4/756-net-sfp-use-a-definition-for-the-fault-recovery-atte.patch b/target/linux/generic/pending-5.4/756-net-sfp-use-a-definition-for-the-fault-recovery-atte.patch
new file mode 100644
index 0000000000..8362f73f46
--- /dev/null
+++ b/target/linux/generic/pending-5.4/756-net-sfp-use-a-definition-for-the-fault-recovery-atte.patch
@@ -0,0 +1,55 @@
+From 4d6bfb6fbb00af38402db4d1ce464e22def9fd9e Mon Sep 17 00:00:00 2001
+From: Russell King <rmk+kernel@armlinux.org.uk>
+Date: Thu, 28 Nov 2019 14:24:40 +0000
+Subject: [PATCH 1/4] net: sfp: use a definition for the fault recovery
+ attempts
+
+Signed-off-by: Russell King <rmk+kernel@armlinux.org.uk>
+---
+ drivers/net/phy/sfp.c | 14 +++++++++++---
+ 1 file changed, 11 insertions(+), 3 deletions(-)
+
+--- a/drivers/net/phy/sfp.c
++++ b/drivers/net/phy/sfp.c
+@@ -170,6 +170,14 @@ static const enum gpiod_flags gpio_flags
+ #define T_RESET_US 10
+ #define T_FAULT_RECOVER msecs_to_jiffies(1000)
+
++/* N_FAULT_INIT is the number of recovery attempts at module initialisation
++ * time. If the TX_FAULT signal is not deasserted after this number of
++ * attempts at clearing it, we decide that the module is faulty.
++ * N_FAULT is the same but after the module has initialised.
++ */
++#define N_FAULT_INIT 5
++#define N_FAULT 5
++
+ /* SFP module presence detection is poor: the three MOD DEF signals are
+ * the same length on the PCB, which means it's possible for MOD DEF 0 to
+ * connect before the I2C bus on MOD DEF 1/2.
+@@ -1820,7 +1828,7 @@ static void sfp_sm_main(struct sfp *sfp,
+ sfp_module_tx_enable(sfp);
+
+ /* Initialise the fault clearance retries */
+- sfp->sm_retries = 5;
++ sfp->sm_retries = N_FAULT_INIT;
+
+ /* We need to check the TX_FAULT state, which is not defined
+ * while TX_DISABLE is asserted. The earliest we want to do
+@@ -1860,7 +1868,7 @@ static void sfp_sm_main(struct sfp *sfp,
+ * or t_start_up, so assume there is a fault.
+ */
+ sfp_sm_fault(sfp, SFP_S_INIT_TX_FAULT,
+- sfp->sm_retries == 5);
++ sfp->sm_retries == N_FAULT_INIT);
+ } else if (event == SFP_E_TIMEOUT || event == SFP_E_TX_CLEAR) {
+ init_done: /* TX_FAULT deasserted or we timed out with TX_FAULT
+ * clear. Probe for the PHY and check the LOS state.
+@@ -1873,7 +1881,7 @@ static void sfp_sm_main(struct sfp *sfp,
+ sfp_sm_link_check_los(sfp);
+
+ /* Reset the fault retry count */
+- sfp->sm_retries = 5;
++ sfp->sm_retries = N_FAULT;
+ }
+ break;
+
diff --git a/target/linux/generic/pending-5.4/757-net-sfp-rename-sm_retries.patch b/target/linux/generic/pending-5.4/757-net-sfp-rename-sm_retries.patch
new file mode 100644
index 0000000000..c6f43729da
--- /dev/null
+++ b/target/linux/generic/pending-5.4/757-net-sfp-rename-sm_retries.patch
@@ -0,0 +1,60 @@
+From bfa3cbb01c7ea34d7369c9bd2ec1b2dc67082b04 Mon Sep 17 00:00:00 2001
+From: Russell King <rmk+kernel@armlinux.org.uk>
+Date: Mon, 2 Dec 2019 18:06:44 +0000
+Subject: [PATCH 2/4] net: sfp: rename sm_retries
+
+Rename sm_retries as sm_fault_retries, as this is what this member is
+tracking.
+
+Signed-off-by: Russell King <rmk+kernel@armlinux.org.uk>
+---
+ drivers/net/phy/sfp.c | 10 +++++-----
+ 1 file changed, 5 insertions(+), 5 deletions(-)
+
+--- a/drivers/net/phy/sfp.c
++++ b/drivers/net/phy/sfp.c
+@@ -232,7 +232,7 @@ struct sfp {
+ unsigned char sm_mod_tries;
+ unsigned char sm_dev_state;
+ unsigned short sm_state;
+- unsigned int sm_retries;
++ unsigned char sm_fault_retries;
+
+ struct sfp_eeprom_id id;
+ unsigned int module_power_mW;
+@@ -1425,7 +1425,7 @@ static bool sfp_los_event_inactive(struc
+
+ static void sfp_sm_fault(struct sfp *sfp, unsigned int next_state, bool warn)
+ {
+- if (sfp->sm_retries && !--sfp->sm_retries) {
++ if (sfp->sm_fault_retries && !--sfp->sm_fault_retries) {
+ dev_err(sfp->dev,
+ "module persistently indicates fault, disabling\n");
+ sfp_sm_next(sfp, SFP_S_TX_DISABLE, 0);
+@@ -1828,7 +1828,7 @@ static void sfp_sm_main(struct sfp *sfp,
+ sfp_module_tx_enable(sfp);
+
+ /* Initialise the fault clearance retries */
+- sfp->sm_retries = N_FAULT_INIT;
++ sfp->sm_fault_retries = N_FAULT_INIT;
+
+ /* We need to check the TX_FAULT state, which is not defined
+ * while TX_DISABLE is asserted. The earliest we want to do
+@@ -1868,7 +1868,7 @@ static void sfp_sm_main(struct sfp *sfp,
+ * or t_start_up, so assume there is a fault.
+ */
+ sfp_sm_fault(sfp, SFP_S_INIT_TX_FAULT,
+- sfp->sm_retries == N_FAULT_INIT);
++ sfp->sm_fault_retries == N_FAULT_INIT);
+ } else if (event == SFP_E_TIMEOUT || event == SFP_E_TX_CLEAR) {
+ init_done: /* TX_FAULT deasserted or we timed out with TX_FAULT
+ * clear. Probe for the PHY and check the LOS state.
+@@ -1881,7 +1881,7 @@ static void sfp_sm_main(struct sfp *sfp,
+ sfp_sm_link_check_los(sfp);
+
+ /* Reset the fault retry count */
+- sfp->sm_retries = N_FAULT;
++ sfp->sm_fault_retries = N_FAULT;
+ }
+ break;
+
diff --git a/target/linux/generic/pending-5.4/758-net-sfp-error-handling-for-phy-probe.patch b/target/linux/generic/pending-5.4/758-net-sfp-error-handling-for-phy-probe.patch
new file mode 100644
index 0000000000..8191e622a1
--- /dev/null
+++ b/target/linux/generic/pending-5.4/758-net-sfp-error-handling-for-phy-probe.patch
@@ -0,0 +1,97 @@
+From 1fba543dc8edf4a43bff3276306648bb27c1e207 Mon Sep 17 00:00:00 2001
+From: Russell King <rmk+kernel@armlinux.org.uk>
+Date: Fri, 29 Nov 2019 00:30:08 +0000
+Subject: [PATCH 3/4] net: sfp: error handling for phy probe
+
+Signed-off-by: Russell King <rmk+kernel@armlinux.org.uk>
+---
+ drivers/net/phy/sfp.c | 26 +++++++++++++++++---------
+ 1 file changed, 17 insertions(+), 9 deletions(-)
+
+--- a/drivers/net/phy/sfp.c
++++ b/drivers/net/phy/sfp.c
+@@ -1345,7 +1345,7 @@ static void sfp_sm_phy_detach(struct sfp
+ sfp->mod_phy = NULL;
+ }
+
+-static void sfp_sm_probe_phy(struct sfp *sfp, bool is_c45)
++static int sfp_sm_probe_phy(struct sfp *sfp, bool is_c45)
+ {
+ struct phy_device *phy;
+ int err;
+@@ -1353,18 +1353,18 @@ static void sfp_sm_probe_phy(struct sfp
+ phy = get_phy_device(sfp->i2c_mii, SFP_PHY_ADDR, is_c45);
+ if (phy == ERR_PTR(-ENODEV)) {
+ dev_info(sfp->dev, "no PHY detected\n");
+- return;
++ return 0;
+ }
+ if (IS_ERR(phy)) {
+ dev_err(sfp->dev, "mdiobus scan returned %ld\n", PTR_ERR(phy));
+- return;
++ return PTR_ERR(phy);
+ }
+
+ err = phy_device_register(phy);
+ if (err) {
+ phy_device_free(phy);
+ dev_err(sfp->dev, "phy_device_register failed: %d\n", err);
+- return;
++ return err;
+ }
+
+ err = sfp_add_phy(sfp->sfp_bus, phy);
+@@ -1372,10 +1372,12 @@ static void sfp_sm_probe_phy(struct sfp
+ phy_device_remove(phy);
+ phy_device_free(phy);
+ dev_err(sfp->dev, "sfp_add_phy failed: %d\n", err);
+- return;
++ return err;
+ }
+
+ sfp->mod_phy = phy;
++
++ return 0;
+ }
+
+ static void sfp_sm_link_up(struct sfp *sfp)
+@@ -1448,21 +1450,24 @@ static void sfp_sm_fault(struct sfp *sfp
+ * Clause 45 copper SFP+ modules (10G) appear to switch their interface
+ * mode according to the negotiated line speed.
+ */
+-static void sfp_sm_probe_for_phy(struct sfp *sfp)
++static int sfp_sm_probe_for_phy(struct sfp *sfp)
+ {
++ int err = 0;
++
+ switch (sfp->id.base.extended_cc) {
+ case SFF8024_ECC_10GBASE_T_SFI:
+ case SFF8024_ECC_10GBASE_T_SR:
+ case SFF8024_ECC_5GBASE_T:
+ case SFF8024_ECC_2_5GBASE_T:
+- sfp_sm_probe_phy(sfp, true);
++ err = sfp_sm_probe_phy(sfp, true);
+ break;
+
+ default:
+ if (sfp->id.base.e1000_base_t)
+- sfp_sm_probe_phy(sfp, false);
++ err = sfp_sm_probe_phy(sfp, false);
+ break;
+ }
++ return err;
+ }
+
+ static int sfp_module_parse_power(struct sfp *sfp)
+@@ -1873,7 +1878,10 @@ static void sfp_sm_main(struct sfp *sfp,
+ init_done: /* TX_FAULT deasserted or we timed out with TX_FAULT
+ * clear. Probe for the PHY and check the LOS state.
+ */
+- sfp_sm_probe_for_phy(sfp);
++ if (sfp_sm_probe_for_phy(sfp)) {
++ sfp_sm_next(sfp, SFP_S_FAIL, 0);
++ break;
++ }
+ if (sfp_module_start(sfp->sfp_bus)) {
+ sfp_sm_next(sfp, SFP_S_FAIL, 0);
+ break;
diff --git a/target/linux/generic/pending-5.4/759-net-sfp-re-attempt-probing-for-phy.patch b/target/linux/generic/pending-5.4/759-net-sfp-re-attempt-probing-for-phy.patch
new file mode 100644
index 0000000000..46d987344f
--- /dev/null
+++ b/target/linux/generic/pending-5.4/759-net-sfp-re-attempt-probing-for-phy.patch
@@ -0,0 +1,132 @@
+From 6c4efe83a0acf6f06c89ae17b885fa5739eb5be7 Mon Sep 17 00:00:00 2001
+From: Russell King <rmk+kernel@armlinux.org.uk>
+Date: Mon, 2 Dec 2019 18:20:22 +0000
+Subject: [PATCH 4/4] net: sfp: re-attempt probing for phy
+
+Some 1000BASE-T PHY modules take a while for the PHY to wake up.
+Retry the probe a number of times before deciding that the module has
+no PHY.
+
+Tested with:
+ Sourcephotonics SPGBTXCNFC - PHY takes less than 50ms to respond.
+ Champion One 1000SFPT - PHY takes about 200ms to respond.
+ Mikrotik S-RJ01 - no PHY
+
+Signed-off-by: Russell King <rmk+kernel@armlinux.org.uk>
+---
+ drivers/net/phy/sfp.c | 59 ++++++++++++++++++++++++++++++++++++---------------
+ 1 file changed, 42 insertions(+), 17 deletions(-)
+
+--- a/drivers/net/phy/sfp.c
++++ b/drivers/net/phy/sfp.c
+@@ -60,6 +60,7 @@ enum {
+ SFP_S_FAIL,
+ SFP_S_WAIT,
+ SFP_S_INIT,
++ SFP_S_INIT_PHY,
+ SFP_S_INIT_TX_FAULT,
+ SFP_S_WAIT_LOS,
+ SFP_S_LINK_UP,
+@@ -124,6 +125,7 @@ static const char * const sm_state_strin
+ [SFP_S_FAIL] = "fail",
+ [SFP_S_WAIT] = "wait",
+ [SFP_S_INIT] = "init",
++ [SFP_S_INIT_PHY] = "init_phy",
+ [SFP_S_INIT_TX_FAULT] = "init_tx_fault",
+ [SFP_S_WAIT_LOS] = "wait_los",
+ [SFP_S_LINK_UP] = "link_up",
+@@ -178,6 +180,12 @@ static const enum gpiod_flags gpio_flags
+ #define N_FAULT_INIT 5
+ #define N_FAULT 5
+
++/* T_PHY_RETRY is the time interval between attempts to probe the PHY.
++ * R_PHY_RETRY is the number of attempts.
++ */
++#define T_PHY_RETRY msecs_to_jiffies(50)
++#define R_PHY_RETRY 12
++
+ /* SFP module presence detection is poor: the three MOD DEF signals are
+ * the same length on the PCB, which means it's possible for MOD DEF 0 to
+ * connect before the I2C bus on MOD DEF 1/2.
+@@ -233,6 +241,7 @@ struct sfp {
+ unsigned char sm_dev_state;
+ unsigned short sm_state;
+ unsigned char sm_fault_retries;
++ unsigned char sm_phy_retries;
+
+ struct sfp_eeprom_id id;
+ unsigned int module_power_mW;
+@@ -1351,10 +1360,8 @@ static int sfp_sm_probe_phy(struct sfp *
+ int err;
+
+ phy = get_phy_device(sfp->i2c_mii, SFP_PHY_ADDR, is_c45);
+- if (phy == ERR_PTR(-ENODEV)) {
+- dev_info(sfp->dev, "no PHY detected\n");
+- return 0;
+- }
++ if (phy == ERR_PTR(-ENODEV))
++ return PTR_ERR(phy);
+ if (IS_ERR(phy)) {
+ dev_err(sfp->dev, "mdiobus scan returned %ld\n", PTR_ERR(phy));
+ return PTR_ERR(phy);
+@@ -1802,6 +1809,7 @@ static void sfp_sm_module(struct sfp *sf
+ static void sfp_sm_main(struct sfp *sfp, unsigned int event)
+ {
+ unsigned long timeout;
++ int ret;
+
+ /* Some events are global */
+ if (sfp->sm_state != SFP_S_DOWN &&
+@@ -1875,22 +1883,39 @@ static void sfp_sm_main(struct sfp *sfp,
+ sfp_sm_fault(sfp, SFP_S_INIT_TX_FAULT,
+ sfp->sm_fault_retries == N_FAULT_INIT);
+ } else if (event == SFP_E_TIMEOUT || event == SFP_E_TX_CLEAR) {
+- init_done: /* TX_FAULT deasserted or we timed out with TX_FAULT
+- * clear. Probe for the PHY and check the LOS state.
+- */
+- if (sfp_sm_probe_for_phy(sfp)) {
+- sfp_sm_next(sfp, SFP_S_FAIL, 0);
+- break;
+- }
+- if (sfp_module_start(sfp->sfp_bus)) {
+- sfp_sm_next(sfp, SFP_S_FAIL, 0);
++ init_done:
++ sfp->sm_phy_retries = R_PHY_RETRY;
++ goto phy_probe;
++ }
++ break;
++
++ case SFP_S_INIT_PHY:
++ if (event != SFP_E_TIMEOUT)
++ break;
++ phy_probe:
++ /* TX_FAULT deasserted or we timed out with TX_FAULT
++ * clear. Probe for the PHY and check the LOS state.
++ */
++ ret = sfp_sm_probe_for_phy(sfp);
++ if (ret == -ENODEV) {
++ if (--sfp->sm_phy_retries) {
++ sfp_sm_next(sfp, SFP_S_INIT_PHY, T_PHY_RETRY);
+ break;
++ } else {
++ dev_info(sfp->dev, "no PHY detected\n");
+ }
+- sfp_sm_link_check_los(sfp);
+-
+- /* Reset the fault retry count */
+- sfp->sm_fault_retries = N_FAULT;
++ } else if (ret) {
++ sfp_sm_next(sfp, SFP_S_FAIL, 0);
++ break;
+ }
++ if (sfp_module_start(sfp->sfp_bus)) {
++ sfp_sm_next(sfp, SFP_S_FAIL, 0);
++ break;
++ }
++ sfp_sm_link_check_los(sfp);
++
++ /* Reset the fault retry count */
++ sfp->sm_fault_retries = N_FAULT;
+ break;
+
+ case SFP_S_INIT_TX_FAULT:
diff --git a/target/linux/generic/pending-5.4/800-bcma-get-SoC-device-struct-copy-its-DMA-params-to-th.patch b/target/linux/generic/pending-5.4/800-bcma-get-SoC-device-struct-copy-its-DMA-params-to-th.patch
new file mode 100644
index 0000000000..cb02c71829
--- /dev/null
+++ b/target/linux/generic/pending-5.4/800-bcma-get-SoC-device-struct-copy-its-DMA-params-to-th.patch
@@ -0,0 +1,70 @@
+From: =?UTF-8?q?Rafa=C5=82=20Mi=C5=82ecki?= <rafal@milecki.pl>
+Subject: [PATCH] bcma: get SoC device struct & copy its DMA params to the
+ subdevices
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+For bus devices to be fully usable it's required to set their DMA
+parameters.
+
+For years it has been missing and remained unnoticed because of
+mips_dma_alloc_coherent() silently handling the empty coherent_dma_mask.
+Kernel 4.19 came with a lot of DMA changes and caused a regression on
+the bcm47xx. Starting with the commit f8c55dc6e828 ("MIPS: use generic
+dma noncoherent ops for simple noncoherent platforms") DMA coherent
+allocations just fail. Example:
+[ 1.114914] bgmac_bcma bcma0:2: Allocation of TX ring 0x200 failed
+[ 1.121215] bgmac_bcma bcma0:2: Unable to alloc memory for DMA
+[ 1.127626] bgmac_bcma: probe of bcma0:2 failed with error -12
+[ 1.133838] bgmac_bcma: Broadcom 47xx GBit MAC driver loaded
+
+This change fixes above regression in addition to the MIPS bcm47xx
+commit 321c46b91550 ("MIPS: BCM47XX: Setup struct device for the SoC").
+
+It also fixes another *old* GPIO regression caused by a parent pointing
+to the NULL:
+[ 0.157054] missing gpiochip .dev parent pointer
+[ 0.157287] bcma: bus0: Error registering GPIO driver: -22
+introduced by the commit 74f4e0cc6108 ("bcma: switch GPIO portions to
+use GPIOLIB_IRQCHIP").
+
+Fixes: f8c55dc6e828 ("MIPS: use generic dma noncoherent ops for simple noncoherent platforms")
+Fixes: 74f4e0cc6108 ("bcma: switch GPIO portions to use GPIOLIB_IRQCHIP")
+Cc: linux-mips@linux-mips.org
+Cc: Christoph Hellwig <hch@lst.de>
+Cc: Linus Walleij <linus.walleij@linaro.org>
+Signed-off-by: Rafał Miłecki <rafal@milecki.pl>
+---
+
+--- a/drivers/bcma/host_soc.c
++++ b/drivers/bcma/host_soc.c
+@@ -191,6 +191,8 @@ int __init bcma_host_soc_init(struct bcm
+ struct bcma_bus *bus = &soc->bus;
+ int err;
+
++ bus->dev = soc->dev;
++
+ /* Scan bus and initialize it */
+ err = bcma_bus_early_register(bus);
+ if (err)
+--- a/drivers/bcma/main.c
++++ b/drivers/bcma/main.c
+@@ -236,12 +236,16 @@ EXPORT_SYMBOL(bcma_core_irq);
+
+ void bcma_prepare_core(struct bcma_bus *bus, struct bcma_device *core)
+ {
++ struct device *dev = &core->dev;
++
+ core->dev.release = bcma_release_core_dev;
+ core->dev.bus = &bcma_bus_type;
+ dev_set_name(&core->dev, "bcma%d:%d", bus->num, core->core_index);
+ core->dev.parent = bus->dev;
+- if (bus->dev)
++ if (bus->dev) {
+ bcma_of_fill_device(bus->dev, core);
++ dma_coerce_mask_and_coherent(dev, bus->dev->coherent_dma_mask);
++ }
+
+ switch (bus->hosttype) {
+ case BCMA_HOSTTYPE_PCI:
diff --git a/target/linux/generic/pending-5.4/810-pci_disable_common_quirks.patch b/target/linux/generic/pending-5.4/810-pci_disable_common_quirks.patch
new file mode 100644
index 0000000000..c8c28b5175
--- /dev/null
+++ b/target/linux/generic/pending-5.4/810-pci_disable_common_quirks.patch
@@ -0,0 +1,62 @@
+From: Gabor Juhos <juhosg@openwrt.org>
+Subject: debloat: add kernel config option to disabling common PCI quirks
+
+Signed-off-by: Gabor Juhos <juhosg@openwrt.org>
+---
+ drivers/pci/Kconfig | 6 ++++++
+ drivers/pci/quirks.c | 6 ++++++
+ 2 files changed, 12 insertions(+)
+
+--- a/drivers/pci/Kconfig
++++ b/drivers/pci/Kconfig
+@@ -89,6 +89,13 @@ config XEN_PCIDEV_FRONTEND
+ The PCI device frontend driver allows the kernel to import arbitrary
+ PCI devices from a PCI backend to support PCI driver domains.
+
++config PCI_DISABLE_COMMON_QUIRKS
++ bool "PCI disable common quirks"
++ depends on PCI
++ help
++ If you don't know what to do here, say N.
++
++
+ config PCI_ATS
+ bool
+
+--- a/drivers/pci/quirks.c
++++ b/drivers/pci/quirks.c
+@@ -207,6 +207,7 @@ static void quirk_mmio_always_on(struct
+ DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_ANY_ID, PCI_ANY_ID,
+ PCI_CLASS_BRIDGE_HOST, 8, quirk_mmio_always_on);
+
++#ifndef CONFIG_PCI_DISABLE_COMMON_QUIRKS
+ /*
+ * The Mellanox Tavor device gives false positive parity errors. Mark this
+ * device with a broken_parity_status to allow PCI scanning code to "skip"
+@@ -3152,6 +3153,8 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_I
+ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65f9, quirk_intel_mc_errata);
+ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65fa, quirk_intel_mc_errata);
+
++#endif /* !CONFIG_PCI_DISABLE_COMMON_QUIRKS */
++
+ /*
+ * Ivytown NTB BAR sizes are misreported by the hardware due to an erratum.
+ * To work around this, query the size it should be configured to by the
+@@ -3177,6 +3180,8 @@ static void quirk_intel_ntb(struct pci_d
+ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0e08, quirk_intel_ntb);
+ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0e0d, quirk_intel_ntb);
+
++#ifndef CONFIG_PCI_DISABLE_COMMON_QUIRKS
++
+ /*
+ * Some BIOS implementations leave the Intel GPU interrupts enabled, even
+ * though no one is handling them (e.g., if the i915 driver is never
+@@ -3215,6 +3220,8 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_IN
+ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x010a, disable_igfx_irq);
+ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0152, disable_igfx_irq);
+
++#endif /* !CONFIG_PCI_DISABLE_COMMON_QUIRKS */
++
+ /*
+ * PCI devices which are on Intel chips can skip the 10ms delay
+ * before entering D3 mode.
diff --git a/target/linux/generic/pending-5.4/811-pci_disable_usb_common_quirks.patch b/target/linux/generic/pending-5.4/811-pci_disable_usb_common_quirks.patch
new file mode 100644
index 0000000000..848aecaa87
--- /dev/null
+++ b/target/linux/generic/pending-5.4/811-pci_disable_usb_common_quirks.patch
@@ -0,0 +1,115 @@
+From: Felix Fietkau <nbd@nbd.name>
+Subject: debloat: disable common USB quirks
+
+Signed-off-by: Felix Fietkau <nbd@nbd.name>
+---
+ drivers/usb/host/pci-quirks.c | 16 ++++++++++++++++
+ drivers/usb/host/pci-quirks.h | 18 +++++++++++++++++-
+ include/linux/usb/hcd.h | 7 +++++++
+ 3 files changed, 40 insertions(+), 1 deletion(-)
+
+--- a/drivers/usb/host/pci-quirks.c
++++ b/drivers/usb/host/pci-quirks.c
+@@ -125,6 +125,8 @@ struct amd_chipset_type {
+ u8 rev;
+ };
+
++#ifndef CONFIG_PCI_DISABLE_COMMON_QUIRKS
++
+ static struct amd_chipset_info {
+ struct pci_dev *nb_dev;
+ struct pci_dev *smbus_dev;
+@@ -628,6 +630,10 @@ bool usb_amd_pt_check_port(struct device
+ }
+ EXPORT_SYMBOL_GPL(usb_amd_pt_check_port);
+
++#endif /* CONFIG_PCI_DISABLE_COMMON_QUIRKS */
++
++#if IS_ENABLED(CONFIG_USB_UHCI_HCD)
++
+ /*
+ * Make sure the controller is completely inactive, unable to
+ * generate interrupts or do DMA.
+@@ -707,8 +713,17 @@ reset_needed:
+ uhci_reset_hc(pdev, base);
+ return 1;
+ }
++#else
++int uhci_check_and_reset_hc(struct pci_dev *pdev, unsigned long base)
++{
++ return 0;
++}
++
++#endif
+ EXPORT_SYMBOL_GPL(uhci_check_and_reset_hc);
+
++#ifndef CONFIG_PCI_DISABLE_COMMON_QUIRKS
++
+ static inline int io_type_enabled(struct pci_dev *pdev, unsigned int mask)
+ {
+ u16 cmd;
+@@ -1275,3 +1290,4 @@ static void quirk_usb_early_handoff(stru
+ }
+ DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_ANY_ID, PCI_ANY_ID,
+ PCI_CLASS_SERIAL_USB, 8, quirk_usb_early_handoff);
++#endif
+--- a/drivers/usb/host/pci-quirks.h
++++ b/drivers/usb/host/pci-quirks.h
+@@ -5,6 +5,9 @@
+ #ifdef CONFIG_USB_PCI
+ void uhci_reset_hc(struct pci_dev *pdev, unsigned long base);
+ int uhci_check_and_reset_hc(struct pci_dev *pdev, unsigned long base);
++#endif /* CONFIG_USB_PCI */
++
++#if defined(CONFIG_USB_PCI) && !defined(CONFIG_PCI_DISABLE_COMMON_QUIRKS)
+ int usb_amd_find_chipset_info(void);
+ int usb_hcd_amd_remote_wakeup_quirk(struct pci_dev *pdev);
+ bool usb_amd_hang_symptom_quirk(void);
+@@ -19,6 +22,18 @@ void sb800_prefetch(struct device *dev,
+ bool usb_amd_pt_check_port(struct device *device, int port);
+ #else
+ struct pci_dev;
++static inline int usb_amd_find_chipset_info(void)
++{
++ return 0;
++}
++static inline bool usb_amd_hang_symptom_quirk(void)
++{
++ return false;
++}
++static inline bool usb_amd_prefetch_quirk(void)
++{
++ return false;
++}
+ static inline void usb_amd_quirk_pll_disable(void) {}
+ static inline void usb_amd_quirk_pll_enable(void) {}
+ static inline void usb_asmedia_modifyflowcontrol(struct pci_dev *pdev) {}
+@@ -29,6 +44,11 @@ static inline bool usb_amd_pt_check_port
+ {
+ return false;
+ }
++static inline void usb_enable_intel_xhci_ports(struct pci_dev *xhci_pdev) {}
++static inline bool usb_xhci_needs_pci_reset(struct pci_dev *pdev)
++{
++ return false;
++}
+ #endif /* CONFIG_USB_PCI */
+
+ #endif /* __LINUX_USB_PCI_QUIRKS_H */
+--- a/include/linux/usb/hcd.h
++++ b/include/linux/usb/hcd.h
+@@ -473,7 +473,14 @@ extern int usb_hcd_pci_probe(struct pci_
+ extern void usb_hcd_pci_remove(struct pci_dev *dev);
+ extern void usb_hcd_pci_shutdown(struct pci_dev *dev);
+
++#ifndef CONFIG_PCI_DISABLE_COMMON_QUIRKS
+ extern int usb_hcd_amd_remote_wakeup_quirk(struct pci_dev *dev);
++#else
++static inline int usb_hcd_amd_remote_wakeup_quirk(struct pci_dev *dev)
++{
++ return 0;
++}
++#endif
+
+ #ifdef CONFIG_PM
+ extern const struct dev_pm_ops usb_hcd_pci_pm_ops;
diff --git a/target/linux/generic/pending-5.4/834-ledtrig-libata.patch b/target/linux/generic/pending-5.4/834-ledtrig-libata.patch
new file mode 100644
index 0000000000..f1dc0e5995
--- /dev/null
+++ b/target/linux/generic/pending-5.4/834-ledtrig-libata.patch
@@ -0,0 +1,149 @@
+From: Daniel Golle <daniel@makrotopia.org>
+Subject: libata: add ledtrig support
+
+This adds a LED trigger for each ATA port indicating disk activity.
+
+As this is needed only on specific platforms (NAS SoCs and such),
+these platforms should define ARCH_WANTS_LIBATA_LEDS if there
+are boards with LED(s) intended to indicate ATA disk activity and
+need the OS to take care of that.
+In that way, if not selected, LED trigger support not will be
+included in libata-core and both, codepaths and structures remain
+untouched.
+
+Signed-off-by: Daniel Golle <daniel@makrotopia.org>
+---
+ drivers/ata/Kconfig | 16 ++++++++++++++++
+ drivers/ata/libata-core.c | 41 +++++++++++++++++++++++++++++++++++++++++
+ include/linux/libata.h | 9 +++++++++
+ 3 files changed, 66 insertions(+)
+
+--- a/drivers/ata/Kconfig
++++ b/drivers/ata/Kconfig
+@@ -46,6 +46,22 @@ config ATA_VERBOSE_ERROR
+
+ If unsure, say Y.
+
++config ARCH_WANT_LIBATA_LEDS
++ bool
++
++config ATA_LEDS
++ bool "support ATA port LED triggers"
++ depends on ARCH_WANT_LIBATA_LEDS
++ select NEW_LEDS
++ select LEDS_CLASS
++ select LEDS_TRIGGERS
++ default y
++ help
++ This option adds a LED trigger for each registered ATA port.
++ It is used to drive disk activity leds connected via GPIO.
++
++ If unsure, say N.
++
+ config ATA_ACPI
+ bool "ATA ACPI Support"
+ depends on ACPI
+--- a/drivers/ata/libata-core.c
++++ b/drivers/ata/libata-core.c
+@@ -731,6 +731,19 @@ u64 ata_tf_read_block(const struct ata_t
+ return block;
+ }
+
++#ifdef CONFIG_ATA_LEDS
++#define LIBATA_BLINK_DELAY 20 /* ms */
++static inline void ata_led_act(struct ata_port *ap)
++{
++ unsigned long led_delay = LIBATA_BLINK_DELAY;
++
++ if (unlikely(!ap->ledtrig))
++ return;
++
++ led_trigger_blink_oneshot(ap->ledtrig, &led_delay, &led_delay, 0);
++}
++#endif
++
+ /**
+ * ata_build_rw_tf - Build ATA taskfile for given read/write request
+ * @tf: Target ATA taskfile
+@@ -5134,6 +5147,9 @@ struct ata_queued_cmd *ata_qc_new_init(s
+ if (tag < 0)
+ return NULL;
+ }
++#ifdef CONFIG_ATA_LEDS
++ ata_led_act(ap);
++#endif
+
+ qc = __ata_qc_from_tag(ap, tag);
+ qc->tag = qc->hw_tag = tag;
+@@ -6068,6 +6084,9 @@ struct ata_port *ata_port_alloc(struct a
+ ap->stats.unhandled_irq = 1;
+ ap->stats.idle_irq = 1;
+ #endif
++#ifdef CONFIG_ATA_LEDS
++ ap->ledtrig = kzalloc(sizeof(struct led_trigger), GFP_KERNEL);
++#endif
+ ata_sff_port_init(ap);
+
+ return ap;
+@@ -6103,6 +6122,12 @@ static void ata_host_release(struct kref
+
+ kfree(ap->pmp_link);
+ kfree(ap->slave_link);
++#ifdef CONFIG_ATA_LEDS
++ if (ap->ledtrig) {
++ led_trigger_unregister(ap->ledtrig);
++ kfree(ap->ledtrig);
++ };
++#endif
+ kfree(ap);
+ host->ports[i] = NULL;
+ }
+@@ -6566,7 +6591,23 @@ int ata_host_register(struct ata_host *h
+ host->ports[i]->print_id = atomic_inc_return(&ata_print_id);
+ host->ports[i]->local_port_no = i + 1;
+ }
++#ifdef CONFIG_ATA_LEDS
++ for (i = 0; i < host->n_ports; i++) {
++ if (unlikely(!host->ports[i]->ledtrig))
++ continue;
+
++ snprintf(host->ports[i]->ledtrig_name,
++ sizeof(host->ports[i]->ledtrig_name), "ata%u",
++ host->ports[i]->print_id);
++
++ host->ports[i]->ledtrig->name = host->ports[i]->ledtrig_name;
++
++ if (led_trigger_register(host->ports[i]->ledtrig)) {
++ kfree(host->ports[i]->ledtrig);
++ host->ports[i]->ledtrig = NULL;
++ }
++ }
++#endif
+ /* Create associated sysfs transport objects */
+ for (i = 0; i < host->n_ports; i++) {
+ rc = ata_tport_add(host->dev,host->ports[i]);
+--- a/include/linux/libata.h
++++ b/include/linux/libata.h
+@@ -38,6 +38,9 @@
+ #include <linux/acpi.h>
+ #include <linux/cdrom.h>
+ #include <linux/sched.h>
++#ifdef CONFIG_ATA_LEDS
++#include <linux/leds.h>
++#endif
+
+ /*
+ * Define if arch has non-standard setup. This is a _PCI_ standard
+@@ -893,6 +896,12 @@ struct ata_port {
+ #ifdef CONFIG_ATA_ACPI
+ struct ata_acpi_gtm __acpi_init_gtm; /* use ata_acpi_init_gtm() */
+ #endif
++
++#ifdef CONFIG_ATA_LEDS
++ struct led_trigger *ledtrig;
++ char ledtrig_name[8];
++#endif
++
+ /* owned by EH */
+ u8 sector_buf[ATA_SECT_SIZE] ____cacheline_aligned;
+ };
diff --git a/target/linux/generic/pending-5.4/920-mangle_bootargs.patch b/target/linux/generic/pending-5.4/920-mangle_bootargs.patch
new file mode 100644
index 0000000000..0750ab77c2
--- /dev/null
+++ b/target/linux/generic/pending-5.4/920-mangle_bootargs.patch
@@ -0,0 +1,71 @@
+From: Imre Kaloz <kaloz@openwrt.org>
+Subject: init: add CONFIG_MANGLE_BOOTARGS and disable it by default
+
+Enabling this option renames the bootloader supplied root=
+and rootfstype= variables, which might have to be know but
+would break the automatisms OpenWrt uses.
+
+Signed-off-by: Imre Kaloz <kaloz@openwrt.org>
+---
+ init/Kconfig | 9 +++++++++
+ init/main.c | 24 ++++++++++++++++++++++++
+ 2 files changed, 33 insertions(+)
+
+--- a/init/Kconfig
++++ b/init/Kconfig
+@@ -1531,6 +1531,15 @@ config EMBEDDED
+ an embedded system so certain expert options are available
+ for configuration.
+
++config MANGLE_BOOTARGS
++ bool "Rename offending bootargs"
++ depends on EXPERT
++ help
++ Sometimes the bootloader passed bogus root= and rootfstype=
++ parameters to the kernel, and while you want to ignore them,
++ you need to know the values f.e. to support dual firmware
++ layouts on the flash.
++
+ config HAVE_PERF_EVENTS
+ bool
+ help
+--- a/init/main.c
++++ b/init/main.c
+@@ -365,6 +365,29 @@ static inline void setup_nr_cpu_ids(void
+ static inline void smp_prepare_cpus(unsigned int maxcpus) { }
+ #endif
+
++#ifdef CONFIG_MANGLE_BOOTARGS
++static void __init mangle_bootargs(char *command_line)
++{
++ char *rootdev;
++ char *rootfs;
++
++ rootdev = strstr(command_line, "root=/dev/mtdblock");
++
++ if (rootdev)
++ strncpy(rootdev, "mangled_rootblock=", 18);
++
++ rootfs = strstr(command_line, "rootfstype");
++
++ if (rootfs)
++ strncpy(rootfs, "mangled_fs", 10);
++
++}
++#else
++static void __init mangle_bootargs(char *command_line)
++{
++}
++#endif
++
+ /*
+ * We need to store the untouched command line for future reference.
+ * We also need to store the touched command line since the parameter
+@@ -557,6 +580,7 @@ asmlinkage __visible void __init start_k
+ add_device_randomness(command_line, strlen(command_line));
+ boot_init_stack_canary();
+ mm_init_cpumask(&init_mm);
++ mangle_bootargs(command_line);
+ setup_command_line(command_line);
+ setup_nr_cpu_ids();
+ setup_per_cpu_areas();