diff options
author | Ansuel Smith <ansuelsmth@gmail.com> | 2021-11-04 21:52:43 +0100 |
---|---|---|
committer | Daniel Golle <daniel@makrotopia.org> | 2022-03-27 00:07:34 +0000 |
commit | 9a038e7fd12eae3695875232962f96af8252f3ba (patch) | |
tree | 3ac43d08d68ead1856e271791f5f5ba75f555e95 /target/linux/generic/pending-5.15 | |
parent | 13960fb0e0babcd99530fcb234073af0c0a5e2f5 (diff) | |
download | upstream-9a038e7fd12eae3695875232962f96af8252f3ba.tar.gz upstream-9a038e7fd12eae3695875232962f96af8252f3ba.tar.bz2 upstream-9a038e7fd12eae3695875232962f96af8252f3ba.zip |
generic: 5.15: copy config and patch from 5.10
Copy config and patches from kernel 5.10 to kernel 5.15
Signed-off-by: Ansuel Smith <ansuelsmth@gmail.com>
Diffstat (limited to 'target/linux/generic/pending-5.15')
129 files changed, 16691 insertions, 0 deletions
diff --git a/target/linux/generic/pending-5.15/050-dtc-checks-Drop-interrupt-provider-address-cells-check.patch b/target/linux/generic/pending-5.15/050-dtc-checks-Drop-interrupt-provider-address-cells-check.patch new file mode 100644 index 0000000000..75f63728ec --- /dev/null +++ b/target/linux/generic/pending-5.15/050-dtc-checks-Drop-interrupt-provider-address-cells-check.patch @@ -0,0 +1,28 @@ +From d8d1a9a77863a8c7031ae82a1d461aa78eb72a7b Mon Sep 17 00:00:00 2001 +From: Rob Herring <robh@kernel.org> +Date: Mon, 11 Oct 2021 14:12:43 -0500 +Subject: [PATCH] checks: Drop interrupt provider '#address-cells' check + +'#address-cells' is only needed when parsing 'interrupt-map' properties, so +remove it from the common interrupt-provider test. + +Cc: Andre Przywara <andre.przywara@arm.com> +Reviewed-by: David Gibson <david@gibson.dropbear.id.au> +Signed-off-by: Rob Herring <robh@kernel.org> +Message-Id: <20211011191245.1009682-3-robh@kernel.org> +Signed-off-by: David Gibson <david@gibson.dropbear.id.au> +--- +--- a/scripts/dtc/checks.c ++++ b/scripts/dtc/checks.c +@@ -1569,11 +1569,6 @@ static void check_interrupt_provider(str + if (!prop) + FAIL(c, dti, node, + "Missing #interrupt-cells in interrupt provider"); +- +- prop = get_property(node, "#address-cells"); +- if (!prop) +- FAIL(c, dti, node, +- "Missing #address-cells in interrupt provider"); + } + WARNING(interrupt_provider, check_interrupt_provider, NULL); + diff --git a/target/linux/generic/pending-5.15/100-compiler.h-only-include-asm-rwonce.h-for-kernel-code.patch b/target/linux/generic/pending-5.15/100-compiler.h-only-include-asm-rwonce.h-for-kernel-code.patch new file mode 100644 index 0000000000..282c8196e5 --- /dev/null +++ b/target/linux/generic/pending-5.15/100-compiler.h-only-include-asm-rwonce.h-for-kernel-code.patch @@ -0,0 +1,29 @@ +From: Felix Fietkau <nbd@nbd.name> +Date: Thu, 22 Oct 2020 22:00:03 +0200 +Subject: [PATCH] compiler.h: only include asm/rwonce.h for kernel code + +This header file is not in uapi, which makes any user space code that includes +linux/compiler.h to fail with the error 'asm/rwonce.h: No such file or directory' + +Fixes: e506ea451254 ("compiler.h: Split {READ,WRITE}_ONCE definitions out into rwonce.h") +Signed-off-by: Felix Fietkau <nbd@nbd.name> +--- + +--- a/include/linux/compiler.h ++++ b/include/linux/compiler.h +@@ -213,6 +213,8 @@ void ftrace_likely_update(struct ftrace_ + __v; \ + }) + ++#include <asm/rwonce.h> ++ + #endif /* __KERNEL__ */ + + /* +@@ -245,6 +247,4 @@ static inline void *offset_to_ptr(const + */ + #define prevent_tail_call_optimization() mb() + +-#include <asm/rwonce.h> +- + #endif /* __LINUX_COMPILER_H */ diff --git a/target/linux/generic/pending-5.15/101-Use-stddefs.h-instead-of-compiler.h.patch b/target/linux/generic/pending-5.15/101-Use-stddefs.h-instead-of-compiler.h.patch new file mode 100644 index 0000000000..824b9444ee --- /dev/null +++ b/target/linux/generic/pending-5.15/101-Use-stddefs.h-instead-of-compiler.h.patch @@ -0,0 +1,11 @@ +--- a/include/uapi/linux/swab.h ++++ b/include/uapi/linux/swab.h +@@ -3,7 +3,7 @@ + #define _UAPI_LINUX_SWAB_H + + #include <linux/types.h> +-#include <linux/compiler.h> ++#include <linux/stddef.h> + #include <asm/bitsperlong.h> + #include <asm/swab.h> + diff --git a/target/linux/generic/pending-5.15/102-MIPS-only-process-negative-stack-offsets-on-stack-tr.patch b/target/linux/generic/pending-5.15/102-MIPS-only-process-negative-stack-offsets-on-stack-tr.patch new file mode 100644 index 0000000000..cc9f99e8b0 --- /dev/null +++ b/target/linux/generic/pending-5.15/102-MIPS-only-process-negative-stack-offsets-on-stack-tr.patch @@ -0,0 +1,57 @@ +From: Felix Fietkau <nbd@nbd.name> +Date: Wed, 18 Apr 2018 10:50:05 +0200 +Subject: [PATCH] MIPS: only process negative stack offsets on stack traces + +Fixes endless back traces in cases where the compiler emits a stack +pointer increase in a branch delay slot (probably for some form of +function return). + +[ 3.475442] BUG: MAX_STACK_TRACE_ENTRIES too low! +[ 3.480070] turning off the locking correctness validator. +[ 3.485521] CPU: 0 PID: 1 Comm: swapper/0 Not tainted 4.14.34 #0 +[ 3.491475] Stack : 00000000 00000000 00000000 00000000 80e0fce2 00000034 00000000 00000000 +[ 3.499764] 87c3838c 80696377 8061047c 00000000 00000001 00000001 87c2d850 6534689f +[ 3.508059] 00000000 00000000 80e10000 00000000 00000000 000000cf 0000000f 00000000 +[ 3.516353] 00000000 806a0000 00076891 00000000 00000000 00000000 ffffffff 00000000 +[ 3.524648] 806c0000 00000004 80e10000 806a0000 00000003 80690000 00000000 80700000 +[ 3.532942] ... +[ 3.535362] Call Trace: +[ 3.537818] [<80010a48>] show_stack+0x58/0x100 +[ 3.542207] [<804c2f78>] dump_stack+0xe8/0x170 +[ 3.546613] [<80079f90>] save_trace+0xf0/0x110 +[ 3.551010] [<8007b1ec>] mark_lock+0x33c/0x78c +[ 3.555413] [<8007bf48>] __lock_acquire+0x2ac/0x1a08 +[ 3.560337] [<8007de60>] lock_acquire+0x64/0x8c +[ 3.564846] [<804e1570>] _raw_spin_lock_irqsave+0x54/0x78 +[ 3.570186] [<801b618c>] kernfs_notify+0x94/0xac +[ 3.574770] [<801b7b10>] sysfs_notify+0x74/0xa0 +[ 3.579257] [<801b618c>] kernfs_notify+0x94/0xac +[ 3.583839] [<801b7b10>] sysfs_notify+0x74/0xa0 +[ 3.588329] [<801b618c>] kernfs_notify+0x94/0xac +[ 3.592911] [<801b7b10>] sysfs_notify+0x74/0xa0 +[ 3.597401] [<801b618c>] kernfs_notify+0x94/0xac +[ 3.601983] [<801b7b10>] sysfs_notify+0x74/0xa0 +[ 3.606473] [<801b618c>] kernfs_notify+0x94/0xac +[ 3.611055] [<801b7b10>] sysfs_notify+0x74/0xa0 +[ 3.615545] [<801b618c>] kernfs_notify+0x94/0xac +[ 3.620125] [<801b7b10>] sysfs_notify+0x74/0xa0 +[ 3.624619] [<801b618c>] kernfs_notify+0x94/0xac +[ 3.629197] [<801b7b10>] sysfs_notify+0x74/0xa0 +[ 3.633691] [<801b618c>] kernfs_notify+0x94/0xac +[ 3.638269] [<801b7b10>] sysfs_notify+0x74/0xa0 +[ 3.642763] [<801b618c>] kernfs_notify+0x94/0xac + +Signed-off-by: Felix Fietkau <nbd@nbd.name> +--- + +--- a/arch/mips/kernel/process.c ++++ b/arch/mips/kernel/process.c +@@ -380,6 +380,8 @@ static inline int is_sp_move_ins(union m + + if (ip->i_format.opcode == addiu_op || + ip->i_format.opcode == daddiu_op) { ++ if (ip->i_format.simmediate > 0) ++ return 0; + *frame_size = -ip->i_format.simmediate; + return 1; + } diff --git a/target/linux/generic/pending-5.15/120-Fix-alloc_node_mem_map-with-ARCH_PFN_OFFSET-calcu.patch b/target/linux/generic/pending-5.15/120-Fix-alloc_node_mem_map-with-ARCH_PFN_OFFSET-calcu.patch new file mode 100644 index 0000000000..0d1d3e1137 --- /dev/null +++ b/target/linux/generic/pending-5.15/120-Fix-alloc_node_mem_map-with-ARCH_PFN_OFFSET-calcu.patch @@ -0,0 +1,82 @@ +From: Tobias Wolf <dev-NTEO@vplace.de> +Subject: mm: Fix alloc_node_mem_map with ARCH_PFN_OFFSET calculation + +An rt288x (ralink) based router (Belkin F5D8235 v1) does not boot with any +kernel beyond version 4.3 resulting in: + +BUG: Bad page state in process swapper pfn:086ac + +bisect resulted in: + +a1c34a3bf00af2cede839879502e12dc68491ad5 is the first bad commit +commit a1c34a3bf00af2cede839879502e12dc68491ad5 +Author: Laura Abbott <laura@labbott.name> +Date: Thu Nov 5 18:48:46 2015 -0800 + + mm: Don't offset memmap for flatmem + + Srinivas Kandagatla reported bad page messages when trying to remove the + bottom 2MB on an ARM based IFC6410 board + + BUG: Bad page state in process swapper pfn:fffa8 + page:ef7fb500 count:0 mapcount:0 mapping: (null) index:0x0 + flags: 0x96640253(locked|error|dirty|active|arch_1|reclaim|mlocked) + page dumped because: PAGE_FLAGS_CHECK_AT_FREE flag(s) set + bad because of flags: + flags: 0x200041(locked|active|mlocked) + Modules linked in: + CPU: 0 PID: 0 Comm: swapper Not tainted 3.19.0-rc3-00007-g412f9ba-dirty +#816 + Hardware name: Qualcomm (Flattened Device Tree) + unwind_backtrace + show_stack + dump_stack + bad_page + free_pages_prepare + free_hot_cold_page + __free_pages + free_highmem_page + mem_init + start_kernel + Disabling lock debugging due to kernel taint + [...] +:040000 040000 2de013c372345fd471cd58f0553c9b38b0ef1cc4 +0a8156f848733dfa21e16c196dfb6c0a76290709 M mm + +This fix for ARM does not account ARCH_PFN_OFFSET for mem_map as later used by +page_to_pfn anymore. + +The following output was generated with two hacked in printk statements: + +printk("before %p vs. %p or %p\n", mem_map, mem_map - offset, mem_map - +(pgdat->node_start_pfn - ARCH_PFN_OFFSET)); + if (page_to_pfn(mem_map) != pgdat->node_start_pfn) + mem_map -= offset + (pgdat->node_start_pfn - ARCH_PFN_OFFSET); +printk("after %p\n", mem_map); + +Output: + +[ 0.000000] before 8861b280 vs. 8861b280 or 8851b280 +[ 0.000000] after 8851b280 + +As seen in the first line mem_map with subtraction of offset does not equal the +mem_map after subtraction of ARCH_PFN_OFFSET. + +After adding the offset of ARCH_PFN_OFFSET as well to mem_map as the +previously calculated offset is zero for the named platform it is able to boot +4.4 and 4.9-rc7 again. + +Signed-off-by: Tobias Wolf <dev-NTEO@vplace.de> +--- + +--- a/mm/page_alloc.c ++++ b/mm/page_alloc.c +@@ -7055,7 +7055,7 @@ static void __ref alloc_node_mem_map(str + if (pgdat == NODE_DATA(0)) { + mem_map = NODE_DATA(0)->node_mem_map; + if (page_to_pfn(mem_map) != pgdat->node_start_pfn) +- mem_map -= offset; ++ mem_map -= offset + (pgdat->node_start_pfn - ARCH_PFN_OFFSET); + } + #endif + } diff --git a/target/linux/generic/pending-5.15/130-add-linux-spidev-compatible-si3210.patch b/target/linux/generic/pending-5.15/130-add-linux-spidev-compatible-si3210.patch new file mode 100644 index 0000000000..355e900a3b --- /dev/null +++ b/target/linux/generic/pending-5.15/130-add-linux-spidev-compatible-si3210.patch @@ -0,0 +1,18 @@ +From: Giuseppe Lippolis <giu.lippolis@gmail.com> +Subject: Add the linux,spidev compatible in spidev Several device in ramips have this binding in the dts + +Signed-off-by: Giuseppe Lippolis <giu.lippolis@gmail.com> +--- + drivers/spi/spidev.c | 1 + + 1 file changed, 1 insertion(+) + +--- a/drivers/spi/spidev.c ++++ b/drivers/spi/spidev.c +@@ -682,6 +682,7 @@ static const struct of_device_id spidev_ + { .compatible = "lwn,bk4" }, + { .compatible = "dh,dhcom-board" }, + { .compatible = "menlo,m53cpld" }, ++ { .compatible = "siliconlabs,si3210" }, + {}, + }; + MODULE_DEVICE_TABLE(of, spidev_dt_ids); diff --git a/target/linux/generic/pending-5.15/140-jffs2-use-.rename2-and-add-RENAME_WHITEOUT-support.patch b/target/linux/generic/pending-5.15/140-jffs2-use-.rename2-and-add-RENAME_WHITEOUT-support.patch new file mode 100644 index 0000000000..e48da41fae --- /dev/null +++ b/target/linux/generic/pending-5.15/140-jffs2-use-.rename2-and-add-RENAME_WHITEOUT-support.patch @@ -0,0 +1,78 @@ +From: Felix Fietkau <nbd@nbd.name> +Subject: jffs2: use .rename2 and add RENAME_WHITEOUT support + +It is required for renames on overlayfs + +Signed-off-by: Felix Fietkau <nbd@nbd.name> +--- + +--- a/fs/jffs2/dir.c ++++ b/fs/jffs2/dir.c +@@ -609,7 +609,8 @@ static int jffs2_rmdir (struct inode *di + return ret; + } + +-static int jffs2_mknod (struct inode *dir_i, struct dentry *dentry, umode_t mode, dev_t rdev) ++static int __jffs2_mknod (struct inode *dir_i, struct dentry *dentry, ++ umode_t mode, dev_t rdev, bool whiteout) + { + struct jffs2_inode_info *f, *dir_f; + struct jffs2_sb_info *c; +@@ -748,7 +749,11 @@ static int jffs2_mknod (struct inode *di + mutex_unlock(&dir_f->sem); + jffs2_complete_reservation(c); + +- d_instantiate_new(dentry, inode); ++ if (!whiteout) ++ d_instantiate_new(dentry, inode); ++ else ++ unlock_new_inode(inode); ++ + return 0; + + fail: +@@ -756,6 +761,17 @@ static int jffs2_mknod (struct inode *di + return ret; + } + ++static int jffs2_mknod (struct inode *dir_i, struct dentry *dentry, umode_t mode, dev_t rdev) ++{ ++ return __jffs2_mknod(dir_i, dentry, mode, rdev, false); ++} ++ ++static int jffs2_whiteout (struct inode *old_dir, struct dentry *old_dentry) ++{ ++ return __jffs2_mknod(old_dir, old_dentry, S_IFCHR | WHITEOUT_MODE, ++ WHITEOUT_DEV, true); ++} ++ + static int jffs2_rename (struct inode *old_dir_i, struct dentry *old_dentry, + struct inode *new_dir_i, struct dentry *new_dentry, + unsigned int flags) +@@ -766,7 +782,7 @@ static int jffs2_rename (struct inode *o + uint8_t type; + uint32_t now; + +- if (flags & ~RENAME_NOREPLACE) ++ if (flags & ~(RENAME_NOREPLACE|RENAME_WHITEOUT)) + return -EINVAL; + + /* The VFS will check for us and prevent trying to rename a +@@ -832,9 +848,14 @@ static int jffs2_rename (struct inode *o + if (d_is_dir(old_dentry) && !victim_f) + inc_nlink(new_dir_i); + +- /* Unlink the original */ +- ret = jffs2_do_unlink(c, JFFS2_INODE_INFO(old_dir_i), +- old_dentry->d_name.name, old_dentry->d_name.len, NULL, now); ++ if (flags & RENAME_WHITEOUT) ++ /* Replace with whiteout */ ++ ret = jffs2_whiteout(old_dir_i, old_dentry); ++ else ++ /* Unlink the original */ ++ ret = jffs2_do_unlink(c, JFFS2_INODE_INFO(old_dir_i), ++ old_dentry->d_name.name, ++ old_dentry->d_name.len, NULL, now); + + /* We don't touch inode->i_nlink */ + diff --git a/target/linux/generic/pending-5.15/141-jffs2-add-RENAME_EXCHANGE-support.patch b/target/linux/generic/pending-5.15/141-jffs2-add-RENAME_EXCHANGE-support.patch new file mode 100644 index 0000000000..dbc72339c6 --- /dev/null +++ b/target/linux/generic/pending-5.15/141-jffs2-add-RENAME_EXCHANGE-support.patch @@ -0,0 +1,73 @@ +From: Felix Fietkau <nbd@nbd.name> +Subject: jffs2: add RENAME_EXCHANGE support + +Signed-off-by: Felix Fietkau <nbd@nbd.name> +--- + +--- a/fs/jffs2/dir.c ++++ b/fs/jffs2/dir.c +@@ -779,18 +779,31 @@ static int jffs2_rename (struct inode *o + int ret; + struct jffs2_sb_info *c = JFFS2_SB_INFO(old_dir_i->i_sb); + struct jffs2_inode_info *victim_f = NULL; ++ struct inode *fst_inode = d_inode(old_dentry); ++ struct inode *snd_inode = d_inode(new_dentry); + uint8_t type; + uint32_t now; + +- if (flags & ~(RENAME_NOREPLACE|RENAME_WHITEOUT)) ++ if (flags & ~(RENAME_NOREPLACE|RENAME_WHITEOUT|RENAME_EXCHANGE)) + return -EINVAL; + ++ if ((flags & RENAME_EXCHANGE) && (old_dir_i != new_dir_i)) { ++ if (S_ISDIR(fst_inode->i_mode) && !S_ISDIR(snd_inode->i_mode)) { ++ inc_nlink(new_dir_i); ++ drop_nlink(old_dir_i); ++ } ++ else if (!S_ISDIR(fst_inode->i_mode) && S_ISDIR(snd_inode->i_mode)) { ++ drop_nlink(new_dir_i); ++ inc_nlink(old_dir_i); ++ } ++ } ++ + /* The VFS will check for us and prevent trying to rename a + * file over a directory and vice versa, but if it's a directory, + * the VFS can't check whether the victim is empty. The filesystem + * needs to do that for itself. + */ +- if (d_really_is_positive(new_dentry)) { ++ if (d_really_is_positive(new_dentry) && !(flags & RENAME_EXCHANGE)) { + victim_f = JFFS2_INODE_INFO(d_inode(new_dentry)); + if (d_is_dir(new_dentry)) { + struct jffs2_full_dirent *fd; +@@ -825,7 +838,7 @@ static int jffs2_rename (struct inode *o + if (ret) + return ret; + +- if (victim_f) { ++ if (victim_f && !(flags & RENAME_EXCHANGE)) { + /* There was a victim. Kill it off nicely */ + if (d_is_dir(new_dentry)) + clear_nlink(d_inode(new_dentry)); +@@ -851,6 +864,12 @@ static int jffs2_rename (struct inode *o + if (flags & RENAME_WHITEOUT) + /* Replace with whiteout */ + ret = jffs2_whiteout(old_dir_i, old_dentry); ++ else if (flags & RENAME_EXCHANGE) ++ /* Replace the original */ ++ ret = jffs2_do_link(c, JFFS2_INODE_INFO(old_dir_i), ++ d_inode(new_dentry)->i_ino, type, ++ old_dentry->d_name.name, old_dentry->d_name.len, ++ now); + else + /* Unlink the original */ + ret = jffs2_do_unlink(c, JFFS2_INODE_INFO(old_dir_i), +@@ -882,7 +901,7 @@ static int jffs2_rename (struct inode *o + return ret; + } + +- if (d_is_dir(old_dentry)) ++ if (d_is_dir(old_dentry) && !(flags & RENAME_EXCHANGE)) + drop_nlink(old_dir_i); + + new_dir_i->i_mtime = new_dir_i->i_ctime = old_dir_i->i_mtime = old_dir_i->i_ctime = ITIME(now); diff --git a/target/linux/generic/pending-5.15/142-jffs2-add-splice-ops.patch b/target/linux/generic/pending-5.15/142-jffs2-add-splice-ops.patch new file mode 100644 index 0000000000..de847a1f5c --- /dev/null +++ b/target/linux/generic/pending-5.15/142-jffs2-add-splice-ops.patch @@ -0,0 +1,20 @@ +From: Felix Fietkau <nbd@nbd.name> +Subject: jffs2: add splice ops + +Add splice_read using generic_file_splice_read. +Add splice_write using iter_file_splice_write + +Signed-off-by: Felix Fietkau <nbd@nbd.name> +--- + +--- a/fs/jffs2/file.c ++++ b/fs/jffs2/file.c +@@ -53,6 +53,8 @@ const struct file_operations jffs2_file_ + .open = generic_file_open, + .read_iter = generic_file_read_iter, + .write_iter = generic_file_write_iter, ++ .splice_read = generic_file_splice_read, ++ .splice_write = iter_file_splice_write, + .unlocked_ioctl=jffs2_ioctl, + .mmap = generic_file_readonly_mmap, + .fsync = jffs2_fsync, diff --git a/target/linux/generic/pending-5.15/150-bridge_allow_receiption_on_disabled_port.patch b/target/linux/generic/pending-5.15/150-bridge_allow_receiption_on_disabled_port.patch new file mode 100644 index 0000000000..6de22b1c7d --- /dev/null +++ b/target/linux/generic/pending-5.15/150-bridge_allow_receiption_on_disabled_port.patch @@ -0,0 +1,45 @@ +From: Stephen Hemminger <stephen@networkplumber.org> +Subject: bridge: allow receiption on disabled port + +When an ethernet device is enslaved to a bridge, and the bridge STP +detects loss of carrier (or operational state down), then normally +packet receiption is blocked. + +This breaks control applications like WPA which maybe expecting to +receive packets to negotiate to bring link up. The bridge needs to +block forwarding packets from these disabled ports, but there is no +hard requirement to not allow local packet delivery. + +Signed-off-by: Stephen Hemminger <stephen@networkplumber.org> +Signed-off-by: Felix Fietkau <nbd@nbd.name> + +--- a/net/bridge/br_input.c ++++ b/net/bridge/br_input.c +@@ -195,6 +195,9 @@ static void __br_handle_local_finish(str + /* note: already called with rcu_read_lock */ + static int br_handle_local_finish(struct net *net, struct sock *sk, struct sk_buff *skb) + { ++ struct net_bridge_port *p = br_port_get_rcu(skb->dev); ++ ++ if (p->state != BR_STATE_DISABLED) + __br_handle_local_finish(skb); + + /* return 1 to signal the okfn() was called so it's ok to use the skb */ +@@ -348,6 +351,17 @@ static rx_handler_result_t br_handle_fra + + forward: + switch (p->state) { ++ case BR_STATE_DISABLED: ++ if (ether_addr_equal(p->br->dev->dev_addr, dest)) ++ skb->pkt_type = PACKET_HOST; ++ ++ if (NF_HOOK(NFPROTO_BRIDGE, NF_BR_PRE_ROUTING, ++ dev_net(skb->dev), NULL, skb, skb->dev, NULL, ++ br_handle_local_finish) == 1) { ++ return RX_HANDLER_PASS; ++ } ++ break; ++ + case BR_STATE_FORWARDING: + case BR_STATE_LEARNING: + if (ether_addr_equal(p->br->dev->dev_addr, dest)) diff --git a/target/linux/generic/pending-5.15/190-rtc-rs5c372-support_alarms_up_to_1_week.patch b/target/linux/generic/pending-5.15/190-rtc-rs5c372-support_alarms_up_to_1_week.patch new file mode 100644 index 0000000000..13b79b5c09 --- /dev/null +++ b/target/linux/generic/pending-5.15/190-rtc-rs5c372-support_alarms_up_to_1_week.patch @@ -0,0 +1,94 @@ +From: Daniel González Cabanelas <dgcbueu@gmail.com> +Subject: [PATCH 1/2] rtc: rs5c372: support alarms up to 1 week + +The Ricoh R2221x, R2223x, RS5C372, RV5C387A chips can handle 1 week +alarms. + +Read the "wday" alarm register and convert it to a date to support up 1 +week in our driver. + +Signed-off-by: Daniel González Cabanelas <dgcbueu@gmail.com> +--- + drivers/rtc/rtc-rs5c372.c | 48 ++++++++++++++++++++++++++++++++++----- + 1 file changed, 42 insertions(+), 6 deletions(-) + +--- a/drivers/rtc/rtc-rs5c372.c ++++ b/drivers/rtc/rtc-rs5c372.c +@@ -393,7 +393,9 @@ static int rs5c_read_alarm(struct device + { + struct i2c_client *client = to_i2c_client(dev); + struct rs5c372 *rs5c = i2c_get_clientdata(client); +- int status; ++ int status, wday_offs; ++ struct rtc_time rtc; ++ unsigned long alarm_secs; + + status = rs5c_get_regs(rs5c); + if (status < 0) +@@ -403,6 +405,30 @@ static int rs5c_read_alarm(struct device + t->time.tm_sec = 0; + t->time.tm_min = bcd2bin(rs5c->regs[RS5C_REG_ALARM_A_MIN] & 0x7f); + t->time.tm_hour = rs5c_reg2hr(rs5c, rs5c->regs[RS5C_REG_ALARM_A_HOURS]); ++ t->time.tm_wday = ffs(rs5c->regs[RS5C_REG_ALARM_A_WDAY] & 0x7f) - 1; ++ ++ /* determine the day, month and year based on alarm wday, taking as a ++ * reference the current time from the rtc ++ */ ++ status = rs5c372_rtc_read_time(dev, &rtc); ++ if (status < 0) ++ return status; ++ ++ wday_offs = t->time.tm_wday - rtc.tm_wday; ++ alarm_secs = mktime64(rtc.tm_year + 1900, ++ rtc.tm_mon + 1, ++ rtc.tm_mday + wday_offs, ++ t->time.tm_hour, ++ t->time.tm_min, ++ t->time.tm_sec); ++ ++ if (wday_offs < 0 || (wday_offs == 0 && ++ (t->time.tm_hour < rtc.tm_hour || ++ (t->time.tm_hour == rtc.tm_hour && ++ t->time.tm_min <= rtc.tm_min)))) ++ alarm_secs += 7 * 86400; ++ ++ rtc_time64_to_tm(alarm_secs, &t->time); + + /* ... and status */ + t->enabled = !!(rs5c->regs[RS5C_REG_CTRL1] & RS5C_CTRL1_AALE); +@@ -417,12 +443,20 @@ static int rs5c_set_alarm(struct device + struct rs5c372 *rs5c = i2c_get_clientdata(client); + int status, addr, i; + unsigned char buf[3]; ++ struct rtc_time rtc_tm; ++ unsigned long rtc_secs, alarm_secs; + +- /* only handle up to 24 hours in the future, like RTC_ALM_SET */ +- if (t->time.tm_mday != -1 +- || t->time.tm_mon != -1 +- || t->time.tm_year != -1) ++ /* chip only can handle alarms up to one week in the future*/ ++ status = rs5c372_rtc_read_time(dev, &rtc_tm); ++ if (status) ++ return status; ++ rtc_secs = rtc_tm_to_time64(&rtc_tm); ++ alarm_secs = rtc_tm_to_time64(&t->time); ++ if (alarm_secs >= rtc_secs + 7 * 86400) { ++ dev_err(dev, "%s: alarm maximum is one week in the future (%d)\n", ++ __func__, status); + return -EINVAL; ++ } + + /* REVISIT: round up tm_sec */ + +@@ -443,7 +477,9 @@ static int rs5c_set_alarm(struct device + /* set alarm */ + buf[0] = bin2bcd(t->time.tm_min); + buf[1] = rs5c_hr2reg(rs5c, t->time.tm_hour); +- buf[2] = 0x7f; /* any/all days */ ++ /* each bit is the day of the week, 0x7f means all days */ ++ buf[2] = (t->time.tm_wday >= 0 && t->time.tm_wday < 7) ? ++ BIT(t->time.tm_wday) : 0x7f; + + for (i = 0; i < sizeof(buf); i++) { + addr = RS5C_ADDR(RS5C_REG_ALARM_A_MIN + i); diff --git a/target/linux/generic/pending-5.15/191-rtc-rs5c372-let_the_alarm_to_be_used_as_wakeup_source.patch b/target/linux/generic/pending-5.15/191-rtc-rs5c372-let_the_alarm_to_be_used_as_wakeup_source.patch new file mode 100644 index 0000000000..7e9d0e66c0 --- /dev/null +++ b/target/linux/generic/pending-5.15/191-rtc-rs5c372-let_the_alarm_to_be_used_as_wakeup_source.patch @@ -0,0 +1,70 @@ +From: Daniel González Cabanelas <dgcbueu@gmail.com> +Subject: [PATCH 2/2] rtc: rs5c372: let the alarm to be used as wakeup source + +Currently there is no use for the interrupts on the rs5c372 RTC and the +wakealarm isn't enabled. There are some devices like NASes which use this +RTC to wake up from the power off state when the INTR pin is activated by +the alarm clock. + +Enable the alarm and let to be used as a wakeup source. + +Tested on a Buffalo LS421DE NAS. + +Signed-off-by: Daniel González Cabanelas <dgcbueu@gmail.com> +--- + drivers/rtc/rtc-rs5c372.c | 16 ++++++++++++++++ + 1 file changed, 16 insertions(+) + +--- a/drivers/rtc/rtc-rs5c372.c ++++ b/drivers/rtc/rtc-rs5c372.c +@@ -654,6 +654,7 @@ static int rs5c372_probe(struct i2c_clie + int err = 0; + int smbus_mode = 0; + struct rs5c372 *rs5c372; ++ bool rs5c372_can_wakeup_device = false; + + dev_dbg(&client->dev, "%s\n", __func__); + +@@ -689,6 +690,12 @@ static int rs5c372_probe(struct i2c_clie + else + rs5c372->type = id->driver_data; + ++#ifdef CONFIG_OF ++ if(of_property_read_bool(client->dev.of_node, ++ "wakeup-source")) ++ rs5c372_can_wakeup_device = true; ++#endif ++ + /* we read registers 0x0f then 0x00-0x0f; skip the first one */ + rs5c372->regs = &rs5c372->buf[1]; + rs5c372->smbus = smbus_mode; +@@ -722,6 +729,8 @@ static int rs5c372_probe(struct i2c_clie + goto exit; + } + ++ rs5c372->has_irq = 1; ++ + /* if the oscillator lost power and no other software (like + * the bootloader) set it up, do it here. + * +@@ -748,6 +757,10 @@ static int rs5c372_probe(struct i2c_clie + ); + + /* REVISIT use client->irq to register alarm irq ... */ ++ if (rs5c372_can_wakeup_device) { ++ device_init_wakeup(&client->dev, true); ++ } ++ + rs5c372->rtc = devm_rtc_device_register(&client->dev, + rs5c372_driver.driver.name, + &rs5c372_rtc_ops, THIS_MODULE); +@@ -761,6 +774,9 @@ static int rs5c372_probe(struct i2c_clie + if (err) + goto exit; + ++ /* the rs5c372 alarm only supports a minute accuracy */ ++ rs5c372->rtc->uie_unsupported = 1; ++ + return 0; + + exit: diff --git a/target/linux/generic/pending-5.15/201-extra_optimization.patch b/target/linux/generic/pending-5.15/201-extra_optimization.patch new file mode 100644 index 0000000000..cb1de80764 --- /dev/null +++ b/target/linux/generic/pending-5.15/201-extra_optimization.patch @@ -0,0 +1,31 @@ +From: Felix Fietkau <nbd@nbd.name> +Subject: Upgrade to Linux 2.6.19 + +- Includes large parts of the patch from #1021 by dpalffy +- Includes RB532 NAND driver changes by n0-1 + +[john@phrozen.org: feix will add this to his upstream queue] + +lede-commit: bff468813f78f81e36ebb2a3f4354de7365e640f +Signed-off-by: Felix Fietkau <nbd@nbd.name> +--- + Makefile | 6 +++--- + 1 file changed, 3 insertions(+), 3 deletions(-) + +--- a/Makefile ++++ b/Makefile +@@ -735,11 +735,11 @@ KBUILD_CFLAGS += $(call cc-disable-warni + KBUILD_CFLAGS += $(call cc-disable-warning, address-of-packed-member) + + ifdef CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE +-KBUILD_CFLAGS += -O2 ++KBUILD_CFLAGS += -O2 $(EXTRA_OPTIMIZATION) + else ifdef CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE_O3 +-KBUILD_CFLAGS += -O3 ++KBUILD_CFLAGS += -O3 $(EXTRA_OPTIMIZATION) + else ifdef CONFIG_CC_OPTIMIZE_FOR_SIZE +-KBUILD_CFLAGS += -Os ++KBUILD_CFLAGS += -Os -fno-reorder-blocks -fno-tree-ch $(EXTRA_OPTIMIZATION) + endif + + # Tell gcc to never replace conditional load with a non-conditional one diff --git a/target/linux/generic/pending-5.15/203-kallsyms_uncompressed.patch b/target/linux/generic/pending-5.15/203-kallsyms_uncompressed.patch new file mode 100644 index 0000000000..348a5afa3d --- /dev/null +++ b/target/linux/generic/pending-5.15/203-kallsyms_uncompressed.patch @@ -0,0 +1,119 @@ +From: Felix Fietkau <nbd@nbd.name> +Subject: kernel: add a config option for keeping the kallsyms table uncompressed, saving ~9kb kernel size after lzma on ar71xx + +[john@phrozen.org: added to my upstream queue 30.12.2016] +lede-commit: e0e3509b5ce2ccf93d4d67ea907613f5f7ec2eed +Signed-off-by: Felix Fietkau <nbd@nbd.name> +--- + init/Kconfig | 11 +++++++++++ + kernel/kallsyms.c | 8 ++++++++ + scripts/kallsyms.c | 12 ++++++++++++ + scripts/link-vmlinux.sh | 4 ++++ + 4 files changed, 35 insertions(+) + +--- a/init/Kconfig ++++ b/init/Kconfig +@@ -1384,6 +1384,17 @@ config SYSCTL_ARCH_UNALIGN_ALLOW + the unaligned access emulation. + see arch/parisc/kernel/unaligned.c for reference + ++config KALLSYMS_UNCOMPRESSED ++ bool "Keep kallsyms uncompressed" ++ depends on KALLSYMS ++ help ++ Normally kallsyms contains compressed symbols (using a token table), ++ reducing the uncompressed kernel image size. Keeping the symbol table ++ uncompressed significantly improves the size of this part in compressed ++ kernel images. ++ ++ Say N unless you need compressed kernel images to be small. ++ + config HAVE_PCSPKR_PLATFORM + bool + +--- a/kernel/kallsyms.c ++++ b/kernel/kallsyms.c +@@ -77,6 +77,11 @@ static unsigned int kallsyms_expand_symb + * For every byte on the compressed symbol data, copy the table + * entry for that byte. + */ ++#ifdef CONFIG_KALLSYMS_UNCOMPRESSED ++ memcpy(result, data + 1, len - 1); ++ result += len - 1; ++ len = 0; ++#endif + while (len) { + tptr = &kallsyms_token_table[kallsyms_token_index[*data]]; + data++; +@@ -109,6 +114,9 @@ tail: + */ + static char kallsyms_get_symbol_type(unsigned int off) + { ++#ifdef CONFIG_KALLSYMS_UNCOMPRESSED ++ return kallsyms_names[off + 1]; ++#endif + /* + * Get just the first code, look it up in the token table, + * and return the first char from this token. +--- a/scripts/kallsyms.c ++++ b/scripts/kallsyms.c +@@ -58,6 +58,7 @@ static struct addr_range percpu_range = + static struct sym_entry **table; + static unsigned int table_size, table_cnt; + static int all_symbols; ++static int uncompressed; + static int absolute_percpu; + static int base_relative; + +@@ -486,6 +487,9 @@ static void write_src(void) + + free(markers); + ++ if (uncompressed) ++ return; ++ + output_label("kallsyms_token_table"); + off = 0; + for (i = 0; i < 256; i++) { +@@ -537,6 +541,9 @@ static unsigned char *find_token(unsigne + { + int i; + ++ if (uncompressed) ++ return NULL; ++ + for (i = 0; i < len - 1; i++) { + if (str[i] == token[0] && str[i+1] == token[1]) + return &str[i]; +@@ -609,6 +616,9 @@ static void optimize_result(void) + { + int i, best; + ++ if (uncompressed) ++ return; ++ + /* using the '\0' symbol last allows compress_symbols to use standard + * fast string functions */ + for (i = 255; i >= 0; i--) { +@@ -773,6 +783,8 @@ int main(int argc, char **argv) + absolute_percpu = 1; + else if (strcmp(argv[i], "--base-relative") == 0) + base_relative = 1; ++ else if (strcmp(argv[i], "--uncompressed") == 0) ++ uncompressed = 1; + else + usage(); + } +--- a/scripts/link-vmlinux.sh ++++ b/scripts/link-vmlinux.sh +@@ -186,6 +186,10 @@ kallsyms() + kallsymopt="${kallsymopt} --base-relative" + fi + ++ if [ -n "${CONFIG_KALLSYMS_UNCOMPRESSED}" ]; then ++ kallsymopt="${kallsymopt} --uncompressed" ++ fi ++ + info KSYMS ${2} + ${NM} -n ${1} | scripts/kallsyms ${kallsymopt} > ${2} + } diff --git a/target/linux/generic/pending-5.15/205-backtrace_module_info.patch b/target/linux/generic/pending-5.15/205-backtrace_module_info.patch new file mode 100644 index 0000000000..f46877f254 --- /dev/null +++ b/target/linux/generic/pending-5.15/205-backtrace_module_info.patch @@ -0,0 +1,41 @@ +From: Felix Fietkau <nbd@nbd.name> +Subject: kernel: when KALLSYMS is disabled, print module address + size for matching backtrace entries + +[john@phrozen.org: felix will add this to his upstream queue] + +lede-commit 53827cdc824556cda910b23ce5030c363b8f1461 +Signed-off-by: Felix Fietkau <nbd@nbd.name> +--- + lib/vsprintf.c | 15 +++++++++++---- + 1 file changed, 11 insertions(+), 4 deletions(-) + +--- a/lib/vsprintf.c ++++ b/lib/vsprintf.c +@@ -983,8 +983,10 @@ char *symbol_string(char *buf, char *end + struct printf_spec spec, const char *fmt) + { + unsigned long value; +-#ifdef CONFIG_KALLSYMS + char sym[KSYM_SYMBOL_LEN]; ++#ifndef CONFIG_KALLSYMS ++ struct module *mod; ++ int len; + #endif + + if (fmt[1] == 'R') +@@ -1001,8 +1003,14 @@ char *symbol_string(char *buf, char *end + + return string_nocheck(buf, end, sym, spec); + #else +- return special_hex_number(buf, end, value, sizeof(void *)); ++ len = snprintf(sym, sizeof(sym), "0x%lx", value); ++ mod = __module_address(value); ++ if (mod) ++ snprintf(sym + len, sizeof(sym) - len, " [%s@%p+0x%x]", ++ mod->name, mod->core_layout.base, ++ mod->core_layout.size); + #endif ++ return string(buf, end, sym, spec); + } + + static const struct printf_spec default_str_spec = { diff --git a/target/linux/generic/pending-5.15/240-remove-unsane-filenames-from-deps_initramfs-list.patch b/target/linux/generic/pending-5.15/240-remove-unsane-filenames-from-deps_initramfs-list.patch new file mode 100644 index 0000000000..29cfade716 --- /dev/null +++ b/target/linux/generic/pending-5.15/240-remove-unsane-filenames-from-deps_initramfs-list.patch @@ -0,0 +1,30 @@ +From: Gabor Juhos <juhosg@openwrt.org> +Subject: usr: sanitize deps_initramfs list + +If any filename in the intramfs dependency +list contains a colon, that causes a kernel +build error like this: + +/devel/openwrt/build_dir/linux-ar71xx_generic/linux-3.6.6/usr/Makefile:58: *** multiple target patterns. Stop. +make[5]: *** [usr] Error 2 + +Fix it by removing such filenames from the +deps_initramfs list. + +Signed-off-by: Gabor Juhos <juhosg@openwrt.org> +Signed-off-by: Felix Fietkau <nbd@nbd.name> +--- + usr/Makefile | 8 +++++--- + 1 file changed, 5 insertions(+), 3 deletions(-) + +--- a/usr/Makefile ++++ b/usr/Makefile +@@ -61,6 +61,8 @@ hostprogs := gen_init_cpio + # The dependency list is generated by gen_initramfs.sh -l + -include $(obj)/.initramfs_data.cpio.d + ++deps_initramfs := $(foreach v,$(deps_initramfs),$(if $(findstring :,$(v)),,$(v))) ++ + # do not try to update files included in initramfs + $(deps_initramfs): ; + diff --git a/target/linux/generic/pending-5.15/261-enable_wilink_platform_without_drivers.patch b/target/linux/generic/pending-5.15/261-enable_wilink_platform_without_drivers.patch new file mode 100644 index 0000000000..cd31f9d934 --- /dev/null +++ b/target/linux/generic/pending-5.15/261-enable_wilink_platform_without_drivers.patch @@ -0,0 +1,20 @@ +From: Imre Kaloz <kaloz@openwrt.org> +Subject: [PATCH] hack: net: wireless: make the wl12xx glue code available with + compat-wireless, too + +Signed-off-by: Imre Kaloz <kaloz@openwrt.org> +--- + drivers/net/wireless/ti/Kconfig | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +--- a/drivers/net/wireless/ti/Kconfig ++++ b/drivers/net/wireless/ti/Kconfig +@@ -20,7 +20,7 @@ source "drivers/net/wireless/ti/wlcore/K + + config WILINK_PLATFORM_DATA + bool "TI WiLink platform data" +- depends on WLCORE_SDIO || WL1251_SDIO ++ depends on WLCORE_SDIO || WL1251_SDIO || ARCH_OMAP2PLUS + default y + help + Small platform data bit needed to pass data to the sdio modules. diff --git a/target/linux/generic/pending-5.15/270-platform-mikrotik-build-bits.patch b/target/linux/generic/pending-5.15/270-platform-mikrotik-build-bits.patch new file mode 100644 index 0000000000..df738ef97b --- /dev/null +++ b/target/linux/generic/pending-5.15/270-platform-mikrotik-build-bits.patch @@ -0,0 +1,31 @@ +From c2deb5ef01a0ef09088832744cbace9e239a6ee0 Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?Thibaut=20VAR=C3=88NE?= <hacks@slashdirt.org> +Date: Sat, 28 Mar 2020 12:11:50 +0100 +Subject: [PATCH] generic: platform/mikrotik build bits (5.4) +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +This patch adds platform/mikrotik kernel build bits + +Signed-off-by: Thibaut VARÈNE <hacks@slashdirt.org> +--- + drivers/platform/Kconfig | 2 ++ + drivers/platform/Makefile | 1 + + 2 files changed, 3 insertions(+) + +--- a/drivers/platform/Kconfig ++++ b/drivers/platform/Kconfig +@@ -13,3 +13,5 @@ source "drivers/platform/chrome/Kconfig" + source "drivers/platform/mellanox/Kconfig" + + source "drivers/platform/olpc/Kconfig" ++ ++source "drivers/platform/mikrotik/Kconfig" +--- a/drivers/platform/Makefile ++++ b/drivers/platform/Makefile +@@ -9,3 +9,4 @@ obj-$(CONFIG_MIPS) += mips/ + obj-$(CONFIG_OLPC_EC) += olpc/ + obj-$(CONFIG_GOLDFISH) += goldfish/ + obj-$(CONFIG_CHROME_PLATFORMS) += chrome/ ++obj-$(CONFIG_MIKROTIK) += mikrotik/ diff --git a/target/linux/generic/pending-5.15/300-mips_expose_boot_raw.patch b/target/linux/generic/pending-5.15/300-mips_expose_boot_raw.patch new file mode 100644 index 0000000000..706c5dee22 --- /dev/null +++ b/target/linux/generic/pending-5.15/300-mips_expose_boot_raw.patch @@ -0,0 +1,40 @@ +From: Mark Miller <mark@mirell.org> +Subject: mips: expose CONFIG_BOOT_RAW + +This exposes the CONFIG_BOOT_RAW symbol in Kconfig. This is needed on +certain Broadcom chipsets running CFE in order to load the kernel. + +Signed-off-by: Mark Miller <mark@mirell.org> +Acked-by: Rob Landley <rob@landley.net> +--- +--- a/arch/mips/Kconfig ++++ b/arch/mips/Kconfig +@@ -1085,9 +1085,6 @@ config FW_ARC + config ARCH_MAY_HAVE_PC_FDC + bool + +-config BOOT_RAW +- bool +- + config CEVT_BCM1480 + bool + +@@ -3182,6 +3179,18 @@ choice + bool "Extend builtin kernel arguments with bootloader arguments" + endchoice + ++config BOOT_RAW ++ bool "Enable the kernel to be executed from the load address" ++ default n ++ help ++ Allow the kernel to be executed from the load address for ++ bootloaders which cannot read the ELF format. This places ++ a jump to start_kernel at the load address. ++ ++ If unsure, say N. ++ ++ ++ + endmenu + + config LOCKDEP_SUPPORT diff --git a/target/linux/generic/pending-5.15/302-mips_no_branch_likely.patch b/target/linux/generic/pending-5.15/302-mips_no_branch_likely.patch new file mode 100644 index 0000000000..271923fca8 --- /dev/null +++ b/target/linux/generic/pending-5.15/302-mips_no_branch_likely.patch @@ -0,0 +1,22 @@ +From: Felix Fietkau <nbd@nbd.name> +Subject: mips: use -mno-branch-likely for kernel and userspace + +saves ~11k kernel size after lzma and ~12k squashfs size in the + +lede-commit: 41a039f46450ffae9483d6216422098669da2900 +Signed-off-by: Felix Fietkau <nbd@nbd.name> +--- + arch/mips/Makefile | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +--- a/arch/mips/Makefile ++++ b/arch/mips/Makefile +@@ -95,7 +95,7 @@ all-$(CONFIG_SYS_SUPPORTS_ZBOOT)+= vmlin + # machines may also. Since BFD is incredibly buggy with respect to + # crossformat linking we rely on the elf2ecoff tool for format conversion. + # +-cflags-y += -G 0 -mno-abicalls -fno-pic -pipe ++cflags-y += -G 0 -mno-abicalls -fno-pic -pipe -mno-branch-likely + cflags-y += -msoft-float + LDFLAGS_vmlinux += -G 0 -static -n -nostdlib + KBUILD_AFLAGS_MODULE += -mlong-calls diff --git a/target/linux/generic/pending-5.15/305-mips_module_reloc.patch b/target/linux/generic/pending-5.15/305-mips_module_reloc.patch new file mode 100644 index 0000000000..839ba24293 --- /dev/null +++ b/target/linux/generic/pending-5.15/305-mips_module_reloc.patch @@ -0,0 +1,371 @@ +From: Felix Fietkau <nbd@nbd.name> +Subject: mips: replace -mlong-calls with -mno-long-calls to make function calls faster in kernel modules to achieve this, try to + +lede-commit: 3b3d64743ba2a874df9d70cd19e242205b0a788c +Signed-off-by: Felix Fietkau <nbd@nbd.name> +--- + arch/mips/Makefile | 5 + + arch/mips/include/asm/module.h | 5 + + arch/mips/kernel/module.c | 279 ++++++++++++++++++++++++++++++++++++++++- + 3 files changed, 284 insertions(+), 5 deletions(-) + +--- a/arch/mips/Makefile ++++ b/arch/mips/Makefile +@@ -98,8 +98,18 @@ all-$(CONFIG_SYS_SUPPORTS_ZBOOT)+= vmlin + cflags-y += -G 0 -mno-abicalls -fno-pic -pipe -mno-branch-likely + cflags-y += -msoft-float + LDFLAGS_vmlinux += -G 0 -static -n -nostdlib ++ifdef CONFIG_64BIT + KBUILD_AFLAGS_MODULE += -mlong-calls + KBUILD_CFLAGS_MODULE += -mlong-calls ++else ++ ifdef CONFIG_DYNAMIC_FTRACE ++ KBUILD_AFLAGS_MODULE += -mlong-calls ++ KBUILD_CFLAGS_MODULE += -mlong-calls ++ else ++ KBUILD_AFLAGS_MODULE += -mno-long-calls ++ KBUILD_CFLAGS_MODULE += -mno-long-calls ++ endif ++endif + + ifeq ($(CONFIG_RELOCATABLE),y) + LDFLAGS_vmlinux += --emit-relocs +--- a/arch/mips/include/asm/module.h ++++ b/arch/mips/include/asm/module.h +@@ -12,6 +12,11 @@ struct mod_arch_specific { + const struct exception_table_entry *dbe_start; + const struct exception_table_entry *dbe_end; + struct mips_hi16 *r_mips_hi16_list; ++ ++ void *phys_plt_tbl; ++ void *virt_plt_tbl; ++ unsigned int phys_plt_offset; ++ unsigned int virt_plt_offset; + }; + + typedef uint8_t Elf64_Byte; /* Type for a 8-bit quantity. */ +--- a/arch/mips/kernel/module.c ++++ b/arch/mips/kernel/module.c +@@ -31,14 +31,221 @@ struct mips_hi16 { + static LIST_HEAD(dbe_list); + static DEFINE_SPINLOCK(dbe_lock); + +-#ifdef MODULE_START ++/* ++ * Get the potential max trampolines size required of the init and ++ * non-init sections. Only used if we cannot find enough contiguous ++ * physically mapped memory to put the module into. ++ */ ++static unsigned int ++get_plt_size(const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs, ++ const char *secstrings, unsigned int symindex, bool is_init) ++{ ++ unsigned long ret = 0; ++ unsigned int i, j; ++ Elf_Sym *syms; ++ ++ /* Everything marked ALLOC (this includes the exported symbols) */ ++ for (i = 1; i < hdr->e_shnum; ++i) { ++ unsigned int info = sechdrs[i].sh_info; ++ ++ if (sechdrs[i].sh_type != SHT_REL ++ && sechdrs[i].sh_type != SHT_RELA) ++ continue; ++ ++ /* Not a valid relocation section? */ ++ if (info >= hdr->e_shnum) ++ continue; ++ ++ /* Don't bother with non-allocated sections */ ++ if (!(sechdrs[info].sh_flags & SHF_ALLOC)) ++ continue; ++ ++ /* If it's called *.init*, and we're not init, we're ++ not interested */ ++ if ((strstr(secstrings + sechdrs[i].sh_name, ".init") != 0) ++ != is_init) ++ continue; ++ ++ syms = (Elf_Sym *) sechdrs[symindex].sh_addr; ++ if (sechdrs[i].sh_type == SHT_REL) { ++ Elf_Mips_Rel *rel = (void *) sechdrs[i].sh_addr; ++ unsigned int size = sechdrs[i].sh_size / sizeof(*rel); ++ ++ for (j = 0; j < size; ++j) { ++ Elf_Sym *sym; ++ ++ if (ELF_MIPS_R_TYPE(rel[j]) != R_MIPS_26) ++ continue; ++ ++ sym = syms + ELF_MIPS_R_SYM(rel[j]); ++ if (!is_init && sym->st_shndx != SHN_UNDEF) ++ continue; ++ ++ ret += 4 * sizeof(int); ++ } ++ } else { ++ Elf_Mips_Rela *rela = (void *) sechdrs[i].sh_addr; ++ unsigned int size = sechdrs[i].sh_size / sizeof(*rela); ++ ++ for (j = 0; j < size; ++j) { ++ Elf_Sym *sym; ++ ++ if (ELF_MIPS_R_TYPE(rela[j]) != R_MIPS_26) ++ continue; ++ ++ sym = syms + ELF_MIPS_R_SYM(rela[j]); ++ if (!is_init && sym->st_shndx != SHN_UNDEF) ++ continue; ++ ++ ret += 4 * sizeof(int); ++ } ++ } ++ } ++ ++ return ret; ++} ++ ++#ifndef MODULE_START ++static void *alloc_phys(unsigned long size) ++{ ++ unsigned order; ++ struct page *page; ++ struct page *p; ++ ++ size = PAGE_ALIGN(size); ++ order = get_order(size); ++ ++ page = alloc_pages(GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN | ++ __GFP_THISNODE, order); ++ if (!page) ++ return NULL; ++ ++ split_page(page, order); ++ ++ /* mark all pages except for the last one */ ++ for (p = page; p + 1 < page + (size >> PAGE_SHIFT); ++p) ++ set_bit(PG_owner_priv_1, &p->flags); ++ ++ for (p = page + (size >> PAGE_SHIFT); p < page + (1 << order); ++p) ++ __free_page(p); ++ ++ return page_address(page); ++} ++#endif ++ ++static void free_phys(void *ptr) ++{ ++ struct page *page; ++ bool free; ++ ++ page = virt_to_page(ptr); ++ do { ++ free = test_and_clear_bit(PG_owner_priv_1, &page->flags); ++ __free_page(page); ++ page++; ++ } while (free); ++} ++ ++ + void *module_alloc(unsigned long size) + { ++#ifdef MODULE_START + return __vmalloc_node_range(size, 1, MODULE_START, MODULE_END, + GFP_KERNEL, PAGE_KERNEL, 0, NUMA_NO_NODE, + __builtin_return_address(0)); ++#else ++ void *ptr; ++ ++ if (size == 0) ++ return NULL; ++ ++ ptr = alloc_phys(size); ++ ++ /* If we failed to allocate physically contiguous memory, ++ * fall back to regular vmalloc. The module loader code will ++ * create jump tables to handle long jumps */ ++ if (!ptr) ++ return vmalloc(size); ++ ++ return ptr; ++#endif + } ++ ++static inline bool is_phys_addr(void *ptr) ++{ ++#ifdef CONFIG_64BIT ++ return (KSEGX((unsigned long)ptr) == CKSEG0); ++#else ++ return (KSEGX(ptr) == KSEG0); + #endif ++} ++ ++/* Free memory returned from module_alloc */ ++void module_memfree(void *module_region) ++{ ++ if (is_phys_addr(module_region)) ++ free_phys(module_region); ++ else ++ vfree(module_region); ++} ++ ++static void *__module_alloc(int size, bool phys) ++{ ++ void *ptr; ++ ++ if (phys) ++ ptr = kmalloc(size, GFP_KERNEL); ++ else ++ ptr = vmalloc(size); ++ return ptr; ++} ++ ++static void __module_free(void *ptr) ++{ ++ if (is_phys_addr(ptr)) ++ kfree(ptr); ++ else ++ vfree(ptr); ++} ++ ++int module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs, ++ char *secstrings, struct module *mod) ++{ ++ unsigned int symindex = 0; ++ unsigned int core_size, init_size; ++ int i; ++ ++ mod->arch.phys_plt_offset = 0; ++ mod->arch.virt_plt_offset = 0; ++ mod->arch.phys_plt_tbl = NULL; ++ mod->arch.virt_plt_tbl = NULL; ++ ++ if (IS_ENABLED(CONFIG_64BIT)) ++ return 0; ++ ++ for (i = 1; i < hdr->e_shnum; i++) ++ if (sechdrs[i].sh_type == SHT_SYMTAB) ++ symindex = i; ++ ++ core_size = get_plt_size(hdr, sechdrs, secstrings, symindex, false); ++ init_size = get_plt_size(hdr, sechdrs, secstrings, symindex, true); ++ ++ if ((core_size + init_size) == 0) ++ return 0; ++ ++ mod->arch.phys_plt_tbl = __module_alloc(core_size + init_size, 1); ++ if (!mod->arch.phys_plt_tbl) ++ return -ENOMEM; ++ ++ mod->arch.virt_plt_tbl = __module_alloc(core_size + init_size, 0); ++ if (!mod->arch.virt_plt_tbl) { ++ __module_free(mod->arch.phys_plt_tbl); ++ mod->arch.phys_plt_tbl = NULL; ++ return -ENOMEM; ++ } ++ ++ return 0; ++} + + static int apply_r_mips_none(struct module *me, u32 *location, + u32 base, Elf_Addr v, bool rela) +@@ -54,9 +261,40 @@ static int apply_r_mips_32(struct module + return 0; + } + ++static Elf_Addr add_plt_entry_to(unsigned *plt_offset, ++ void *start, Elf_Addr v) ++{ ++ unsigned *tramp = start + *plt_offset; ++ *plt_offset += 4 * sizeof(int); ++ ++ /* adjust carry for addiu */ ++ if (v & 0x00008000) ++ v += 0x10000; ++ ++ tramp[0] = 0x3c190000 | (v >> 16); /* lui t9, hi16 */ ++ tramp[1] = 0x27390000 | (v & 0xffff); /* addiu t9, t9, lo16 */ ++ tramp[2] = 0x03200008; /* jr t9 */ ++ tramp[3] = 0x00000000; /* nop */ ++ ++ return (Elf_Addr) tramp; ++} ++ ++static Elf_Addr add_plt_entry(struct module *me, void *location, Elf_Addr v) ++{ ++ if (is_phys_addr(location)) ++ return add_plt_entry_to(&me->arch.phys_plt_offset, ++ me->arch.phys_plt_tbl, v); ++ else ++ return add_plt_entry_to(&me->arch.virt_plt_offset, ++ me->arch.virt_plt_tbl, v); ++ ++} ++ + static int apply_r_mips_26(struct module *me, u32 *location, + u32 base, Elf_Addr v, bool rela) + { ++ u32 ofs = base & 0x03ffffff; ++ + if (v % 4) { + pr_err("module %s: dangerous R_MIPS_26 relocation\n", + me->name); +@@ -64,13 +302,17 @@ static int apply_r_mips_26(struct module + } + + if ((v & 0xf0000000) != (((unsigned long)location + 4) & 0xf0000000)) { +- pr_err("module %s: relocation overflow\n", +- me->name); +- return -ENOEXEC; ++ v = add_plt_entry(me, location, v + (ofs << 2)); ++ if (!v) { ++ pr_err("module %s: relocation overflow\n", ++ me->name); ++ return -ENOEXEC; ++ } ++ ofs = 0; + } + + *location = (*location & ~0x03ffffff) | +- ((base + (v >> 2)) & 0x03ffffff); ++ ((ofs + (v >> 2)) & 0x03ffffff); + + return 0; + } +@@ -446,9 +688,36 @@ int module_finalize(const Elf_Ehdr *hdr, + list_add(&me->arch.dbe_list, &dbe_list); + spin_unlock_irq(&dbe_lock); + } ++ ++ /* Get rid of the fixup trampoline if we're running the module ++ * from physically mapped address space */ ++ if (me->arch.phys_plt_offset == 0) { ++ __module_free(me->arch.phys_plt_tbl); ++ me->arch.phys_plt_tbl = NULL; ++ } ++ if (me->arch.virt_plt_offset == 0) { ++ __module_free(me->arch.virt_plt_tbl); ++ me->arch.virt_plt_tbl = NULL; ++ } ++ + return 0; + } + ++void module_arch_freeing_init(struct module *mod) ++{ ++ if (mod->state == MODULE_STATE_LIVE) ++ return; ++ ++ if (mod->arch.phys_plt_tbl) { ++ __module_free(mod->arch.phys_plt_tbl); ++ mod->arch.phys_plt_tbl = NULL; ++ } ++ if (mod->arch.virt_plt_tbl) { ++ __module_free(mod->arch.virt_plt_tbl); ++ mod->arch.virt_plt_tbl = NULL; ++ } ++} ++ + void module_arch_cleanup(struct module *mod) + { + spin_lock_irq(&dbe_lock); diff --git a/target/linux/generic/pending-5.15/307-mips_highmem_offset.patch b/target/linux/generic/pending-5.15/307-mips_highmem_offset.patch new file mode 100644 index 0000000000..e1ada22f34 --- /dev/null +++ b/target/linux/generic/pending-5.15/307-mips_highmem_offset.patch @@ -0,0 +1,19 @@ +From: Felix Fietkau <nbd@nbd.name> +Subject: kernel: adjust mips highmem offset to avoid the need for -mlong-calls on systems with >256M RAM + +Signed-off-by: Felix Fietkau <nbd@nbd.name> +--- + arch/mips/include/asm/mach-generic/spaces.h | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +--- a/arch/mips/include/asm/mach-generic/spaces.h ++++ b/arch/mips/include/asm/mach-generic/spaces.h +@@ -54,7 +54,7 @@ + * Memory above this physical address will be considered highmem. + */ + #ifndef HIGHMEM_START +-#define HIGHMEM_START _AC(0x20000000, UL) ++#define HIGHMEM_START _AC(0x10000000, UL) + #endif + + #endif /* CONFIG_32BIT */ diff --git a/target/linux/generic/pending-5.15/308-mips32r2_tune.patch b/target/linux/generic/pending-5.15/308-mips32r2_tune.patch new file mode 100644 index 0000000000..bbea947382 --- /dev/null +++ b/target/linux/generic/pending-5.15/308-mips32r2_tune.patch @@ -0,0 +1,22 @@ +From: Felix Fietkau <nbd@nbd.name> +Subject: kernel: add -mtune=34kc to MIPS CFLAGS when building for mips32r2 + +This provides a good tradeoff across at least 24Kc-74Kc, while also +producing smaller code. + +Signed-off-by: Felix Fietkau <nbd@nbd.name> +--- + arch/mips/Makefile | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +--- a/arch/mips/Makefile ++++ b/arch/mips/Makefile +@@ -174,7 +174,7 @@ cflags-$(CONFIG_CPU_VR41XX) += -march=r4 + cflags-$(CONFIG_CPU_R4X00) += -march=r4600 -Wa,--trap + cflags-$(CONFIG_CPU_TX49XX) += -march=r4600 -Wa,--trap + cflags-$(CONFIG_CPU_MIPS32_R1) += -march=mips32 -Wa,--trap +-cflags-$(CONFIG_CPU_MIPS32_R2) += -march=mips32r2 -Wa,--trap ++cflags-$(CONFIG_CPU_MIPS32_R2) += -march=mips32r2 -mtune=34kc -Wa,--trap + cflags-$(CONFIG_CPU_MIPS32_R5) += -march=mips32r5 -Wa,--trap -modd-spreg + cflags-$(CONFIG_CPU_MIPS32_R6) += -march=mips32r6 -Wa,--trap -modd-spreg + cflags-$(CONFIG_CPU_MIPS64_R1) += -march=mips64 -Wa,--trap diff --git a/target/linux/generic/pending-5.15/309-MIPS-Add-CPU-option-reporting-to-proc-cpuinfo.patch b/target/linux/generic/pending-5.15/309-MIPS-Add-CPU-option-reporting-to-proc-cpuinfo.patch new file mode 100644 index 0000000000..794f027f18 --- /dev/null +++ b/target/linux/generic/pending-5.15/309-MIPS-Add-CPU-option-reporting-to-proc-cpuinfo.patch @@ -0,0 +1,140 @@ +From 87ec87c2ad615c1a177cd08ef5fa29fc739f6e50 Mon Sep 17 00:00:00 2001 +From: Hauke Mehrtens <hauke@hauke-m.de> +Date: Sun, 23 Dec 2018 18:06:53 +0100 +Subject: [PATCH] MIPS: Add CPU option reporting to /proc/cpuinfo + +Many MIPS CPUs have optional CPU features which are not activates for +all CPU cores. Print the CPU options which are implemented in the core +in /proc/cpuinfo. This makes it possible to see what features are +supported and which are not supported. This should cover all standard +MIPS extensions, before it only printed information about the main MIPS +ASEs. + +Signed-off-by: Hauke Mehrtens <hauke@hauke-m.de> +--- + arch/mips/kernel/proc.c | 116 ++++++++++++++++++++++++++++++++++++++++ + 1 file changed, 116 insertions(+) + +--- a/arch/mips/kernel/proc.c ++++ b/arch/mips/kernel/proc.c +@@ -138,6 +138,120 @@ static int show_cpuinfo(struct seq_file + seq_printf(m, "micromips kernel\t: %s\n", + (read_c0_config3() & MIPS_CONF3_ISA_OE) ? "yes" : "no"); + } ++ ++ seq_printf(m, "Options implemented\t:"); ++ if (cpu_has_tlb) ++ seq_printf(m, "%s", " tlb"); ++ if (cpu_has_ftlb) ++ seq_printf(m, "%s", " ftlb"); ++ if (cpu_has_tlbinv) ++ seq_printf(m, "%s", " tlbinv"); ++ if (cpu_has_segments) ++ seq_printf(m, "%s", " segments"); ++ if (cpu_has_rixiex) ++ seq_printf(m, "%s", " rixiex"); ++ if (cpu_has_ldpte) ++ seq_printf(m, "%s", " ldpte"); ++ if (cpu_has_maar) ++ seq_printf(m, "%s", " maar"); ++ if (cpu_has_rw_llb) ++ seq_printf(m, "%s", " rw_llb"); ++ if (cpu_has_4kex) ++ seq_printf(m, "%s", " 4kex"); ++ if (cpu_has_3k_cache) ++ seq_printf(m, "%s", " 3k_cache"); ++ if (cpu_has_4k_cache) ++ seq_printf(m, "%s", " 4k_cache"); ++ if (cpu_has_6k_cache) ++ seq_printf(m, "%s", " 6k_cache"); ++ if (cpu_has_8k_cache) ++ seq_printf(m, "%s", " 8k_cache"); ++ if (cpu_has_tx39_cache) ++ seq_printf(m, "%s", " tx39_cache"); ++ if (cpu_has_octeon_cache) ++ seq_printf(m, "%s", " octeon_cache"); ++ if (cpu_has_fpu) ++ seq_printf(m, "%s", " fpu"); ++ if (cpu_has_32fpr) ++ seq_printf(m, "%s", " 32fpr"); ++ if (cpu_has_cache_cdex_p) ++ seq_printf(m, "%s", " cache_cdex_p"); ++ if (cpu_has_cache_cdex_s) ++ seq_printf(m, "%s", " cache_cdex_s"); ++ if (cpu_has_prefetch) ++ seq_printf(m, "%s", " prefetch"); ++ if (cpu_has_mcheck) ++ seq_printf(m, "%s", " mcheck"); ++ if (cpu_has_ejtag) ++ seq_printf(m, "%s", " ejtag"); ++ if (cpu_has_llsc) ++ seq_printf(m, "%s", " llsc"); ++ if (cpu_has_guestctl0ext) ++ seq_printf(m, "%s", " guestctl0ext"); ++ if (cpu_has_guestctl1) ++ seq_printf(m, "%s", " guestctl1"); ++ if (cpu_has_guestctl2) ++ seq_printf(m, "%s", " guestctl2"); ++ if (cpu_has_guestid) ++ seq_printf(m, "%s", " guestid"); ++ if (cpu_has_drg) ++ seq_printf(m, "%s", " drg"); ++ if (cpu_has_rixi) ++ seq_printf(m, "%s", " rixi"); ++ if (cpu_has_lpa) ++ seq_printf(m, "%s", " lpa"); ++ if (cpu_has_mvh) ++ seq_printf(m, "%s", " mvh"); ++ if (cpu_has_vtag_icache) ++ seq_printf(m, "%s", " vtag_icache"); ++ if (cpu_has_dc_aliases) ++ seq_printf(m, "%s", " dc_aliases"); ++ if (cpu_has_ic_fills_f_dc) ++ seq_printf(m, "%s", " ic_fills_f_dc"); ++ if (cpu_has_pindexed_dcache) ++ seq_printf(m, "%s", " pindexed_dcache"); ++ if (cpu_has_userlocal) ++ seq_printf(m, "%s", " userlocal"); ++ if (cpu_has_nofpuex) ++ seq_printf(m, "%s", " nofpuex"); ++ if (cpu_has_vint) ++ seq_printf(m, "%s", " vint"); ++ if (cpu_has_veic) ++ seq_printf(m, "%s", " veic"); ++ if (cpu_has_inclusive_pcaches) ++ seq_printf(m, "%s", " inclusive_pcaches"); ++ if (cpu_has_perf_cntr_intr_bit) ++ seq_printf(m, "%s", " perf_cntr_intr_bit"); ++ if (cpu_has_ufr) ++ seq_printf(m, "%s", " ufr"); ++ if (cpu_has_fre) ++ seq_printf(m, "%s", " fre"); ++ if (cpu_has_cdmm) ++ seq_printf(m, "%s", " cdmm"); ++ if (cpu_has_small_pages) ++ seq_printf(m, "%s", " small_pages"); ++ if (cpu_has_nan_legacy) ++ seq_printf(m, "%s", " nan_legacy"); ++ if (cpu_has_nan_2008) ++ seq_printf(m, "%s", " nan_2008"); ++ if (cpu_has_ebase_wg) ++ seq_printf(m, "%s", " ebase_wg"); ++ if (cpu_has_badinstr) ++ seq_printf(m, "%s", " badinstr"); ++ if (cpu_has_badinstrp) ++ seq_printf(m, "%s", " badinstrp"); ++ if (cpu_has_contextconfig) ++ seq_printf(m, "%s", " contextconfig"); ++ if (cpu_has_perf) ++ seq_printf(m, "%s", " perf"); ++ if (cpu_has_shared_ftlb_ram) ++ seq_printf(m, "%s", " shared_ftlb_ram"); ++ if (cpu_has_shared_ftlb_entries) ++ seq_printf(m, "%s", " shared_ftlb_entries"); ++ if (cpu_has_mipsmt_pertccounters) ++ seq_printf(m, "%s", " mipsmt_pertccounters"); ++ seq_printf(m, "\n"); ++ + seq_printf(m, "shadow register sets\t: %d\n", + cpu_data[n].srsets); + seq_printf(m, "kscratch registers\t: %d\n", diff --git a/target/linux/generic/pending-5.15/310-arm_module_unresolved_weak_sym.patch b/target/linux/generic/pending-5.15/310-arm_module_unresolved_weak_sym.patch new file mode 100644 index 0000000000..191dc6ac3c --- /dev/null +++ b/target/linux/generic/pending-5.15/310-arm_module_unresolved_weak_sym.patch @@ -0,0 +1,22 @@ +From: Felix Fietkau <nbd@nbd.name> +Subject: fix errors in unresolved weak symbols on arm + +lede-commit: 570699d4838a907c3ef9f2819bf19eb72997b32f +Signed-off-by: Felix Fietkau <nbd@nbd.name> +--- + arch/arm/kernel/module.c | 4 ++++ + 1 file changed, 4 insertions(+) + +--- a/arch/arm/kernel/module.c ++++ b/arch/arm/kernel/module.c +@@ -105,6 +105,10 @@ apply_relocate(Elf32_Shdr *sechdrs, cons + return -ENOEXEC; + } + ++ if ((IS_ERR_VALUE(sym->st_value) || !sym->st_value) && ++ ELF_ST_BIND(sym->st_info) == STB_WEAK) ++ continue; ++ + loc = dstsec->sh_addr + rel->r_offset; + + switch (ELF32_R_TYPE(rel->r_info)) { diff --git a/target/linux/generic/pending-5.15/330-MIPS-kexec-Accept-command-line-parameters-from-users.patch b/target/linux/generic/pending-5.15/330-MIPS-kexec-Accept-command-line-parameters-from-users.patch new file mode 100644 index 0000000000..2808c95322 --- /dev/null +++ b/target/linux/generic/pending-5.15/330-MIPS-kexec-Accept-command-line-parameters-from-users.patch @@ -0,0 +1,281 @@ +From: Yousong Zhou <yszhou4tech@gmail.com> +Subject: MIPS: kexec: Accept command line parameters from userspace. + +Signed-off-by: Yousong Zhou <yszhou4tech@gmail.com> +--- + arch/mips/kernel/machine_kexec.c | 153 +++++++++++++++++++++++++++++++----- + arch/mips/kernel/machine_kexec.h | 20 +++++ + arch/mips/kernel/relocate_kernel.S | 21 +++-- + 3 files changed, 167 insertions(+), 27 deletions(-) + create mode 100644 arch/mips/kernel/machine_kexec.h + +--- a/arch/mips/kernel/machine_kexec.c ++++ b/arch/mips/kernel/machine_kexec.c +@@ -9,14 +9,11 @@ + #include <linux/delay.h> + #include <linux/libfdt.h> + ++#include <asm/bootinfo.h> + #include <asm/cacheflush.h> + #include <asm/page.h> +- +-extern const unsigned char relocate_new_kernel[]; +-extern const size_t relocate_new_kernel_size; +- +-extern unsigned long kexec_start_address; +-extern unsigned long kexec_indirection_page; ++#include <linux/uaccess.h> ++#include "machine_kexec.h" + + static unsigned long reboot_code_buffer; + +@@ -30,6 +27,101 @@ void (*_crash_smp_send_stop)(void) = NUL + void (*_machine_kexec_shutdown)(void) = NULL; + void (*_machine_crash_shutdown)(struct pt_regs *regs) = NULL; + ++static void machine_kexec_print_args(void) ++{ ++ unsigned long argc = (int)kexec_args[0]; ++ int i; ++ ++ pr_info("kexec_args[0] (argc): %lu\n", argc); ++ pr_info("kexec_args[1] (argv): %p\n", (void *)kexec_args[1]); ++ pr_info("kexec_args[2] (env ): %p\n", (void *)kexec_args[2]); ++ pr_info("kexec_args[3] (desc): %p\n", (void *)kexec_args[3]); ++ ++ for (i = 0; i < argc; i++) { ++ pr_info("kexec_argv[%d] = %p, %s\n", ++ i, kexec_argv[i], kexec_argv[i]); ++ } ++} ++ ++static void machine_kexec_init_argv(struct kimage *image) ++{ ++ void __user *buf = NULL; ++ size_t bufsz; ++ size_t size; ++ int i; ++ ++ bufsz = 0; ++ for (i = 0; i < image->nr_segments; i++) { ++ struct kexec_segment *seg; ++ ++ seg = &image->segment[i]; ++ if (seg->bufsz < 6) ++ continue; ++ ++ if (strncmp((char *) seg->buf, "kexec ", 6)) ++ continue; ++ ++ buf = seg->buf; ++ bufsz = seg->bufsz; ++ break; ++ } ++ ++ if (!buf) ++ return; ++ ++ size = KEXEC_COMMAND_LINE_SIZE; ++ size = min(size, bufsz); ++ if (size < bufsz) ++ pr_warn("kexec command line truncated to %zd bytes\n", size); ++ ++ /* Copy to kernel space */ ++ if (copy_from_user(kexec_argv_buf, buf, size)) ++ pr_warn("kexec command line copy to kernel space failed\n"); ++ ++ kexec_argv_buf[size - 1] = 0; ++} ++ ++static void machine_kexec_parse_argv(struct kimage *image) ++{ ++ char *reboot_code_buffer; ++ int reloc_delta; ++ char *ptr; ++ int argc; ++ int i; ++ ++ ptr = kexec_argv_buf; ++ argc = 0; ++ ++ /* ++ * convert command line string to array of parameters ++ * (as bootloader does). ++ */ ++ while (ptr && *ptr && (KEXEC_MAX_ARGC > argc)) { ++ if (*ptr == ' ') { ++ *ptr++ = '\0'; ++ continue; ++ } ++ ++ kexec_argv[argc++] = ptr; ++ ptr = strchr(ptr, ' '); ++ } ++ ++ if (!argc) ++ return; ++ ++ kexec_args[0] = argc; ++ kexec_args[1] = (unsigned long)kexec_argv; ++ kexec_args[2] = 0; ++ kexec_args[3] = 0; ++ ++ reboot_code_buffer = page_address(image->control_code_page); ++ reloc_delta = reboot_code_buffer - (char *)kexec_relocate_new_kernel; ++ ++ kexec_args[1] += reloc_delta; ++ for (i = 0; i < argc; i++) ++ kexec_argv[i] += reloc_delta; ++} ++ + static void kexec_image_info(const struct kimage *kimage) + { + unsigned long i; +@@ -99,6 +191,18 @@ machine_kexec_prepare(struct kimage *kim + #endif + + kexec_image_info(kimage); ++ /* ++ * Whenever arguments passed from kexec-tools, Init the arguments as ++ * the original ones to try avoiding booting failure. ++ */ ++ ++ kexec_args[0] = fw_arg0; ++ kexec_args[1] = fw_arg1; ++ kexec_args[2] = fw_arg2; ++ kexec_args[3] = fw_arg3; ++ ++ machine_kexec_init_argv(kimage); ++ machine_kexec_parse_argv(kimage); + + if (_machine_kexec_prepare) + return _machine_kexec_prepare(kimage); +@@ -161,7 +265,7 @@ machine_crash_shutdown(struct pt_regs *r + void kexec_nonboot_cpu_jump(void) + { + local_flush_icache_range((unsigned long)relocated_kexec_smp_wait, +- reboot_code_buffer + relocate_new_kernel_size); ++ reboot_code_buffer + KEXEC_RELOCATE_NEW_KERNEL_SIZE); + + relocated_kexec_smp_wait(NULL); + } +@@ -199,7 +303,7 @@ void kexec_reboot(void) + * machine_kexec() CPU. + */ + local_flush_icache_range(reboot_code_buffer, +- reboot_code_buffer + relocate_new_kernel_size); ++ reboot_code_buffer + KEXEC_RELOCATE_NEW_KERNEL_SIZE); + + do_kexec = (void *)reboot_code_buffer; + do_kexec(); +@@ -212,10 +316,12 @@ machine_kexec(struct kimage *image) + unsigned long *ptr; + + reboot_code_buffer = +- (unsigned long)page_address(image->control_code_page); ++ (unsigned long)page_address(image->control_code_page); ++ pr_info("reboot_code_buffer = %p\n", (void *)reboot_code_buffer); + + kexec_start_address = + (unsigned long) phys_to_virt(image->start); ++ pr_info("kexec_start_address = %p\n", (void *)kexec_start_address); + + if (image->type == KEXEC_TYPE_DEFAULT) { + kexec_indirection_page = +@@ -223,9 +329,19 @@ machine_kexec(struct kimage *image) + } else { + kexec_indirection_page = (unsigned long)&image->head; + } ++ pr_info("kexec_indirection_page = %p\n", (void *)kexec_indirection_page); + +- memcpy((void*)reboot_code_buffer, relocate_new_kernel, +- relocate_new_kernel_size); ++ pr_info("Where is memcpy: %p\n", memcpy); ++ pr_info("kexec_relocate_new_kernel = %p, kexec_relocate_new_kernel_end = %p\n", ++ (void *)kexec_relocate_new_kernel, &kexec_relocate_new_kernel_end); ++ pr_info("Copy %lu bytes from %p to %p\n", KEXEC_RELOCATE_NEW_KERNEL_SIZE, ++ (void *)kexec_relocate_new_kernel, (void *)reboot_code_buffer); ++ memcpy((void*)reboot_code_buffer, kexec_relocate_new_kernel, ++ KEXEC_RELOCATE_NEW_KERNEL_SIZE); ++ ++ pr_info("Before _print_args().\n"); ++ machine_kexec_print_args(); ++ pr_info("Before eval loop.\n"); + + /* + * The generic kexec code builds a page list with physical +@@ -256,7 +372,7 @@ machine_kexec(struct kimage *image) + #ifdef CONFIG_SMP + /* All secondary cpus now may jump to kexec_wait cycle */ + relocated_kexec_smp_wait = reboot_code_buffer + +- (void *)(kexec_smp_wait - relocate_new_kernel); ++ (void *)(kexec_smp_wait - kexec_relocate_new_kernel); + smp_wmb(); + atomic_set(&kexec_ready_to_reboot, 1); + #endif +--- /dev/null ++++ b/arch/mips/kernel/machine_kexec.h +@@ -0,0 +1,20 @@ ++#ifndef _MACHINE_KEXEC_H ++#define _MACHINE_KEXEC_H ++ ++#ifndef __ASSEMBLY__ ++extern const unsigned char kexec_relocate_new_kernel[]; ++extern unsigned long kexec_relocate_new_kernel_end; ++extern unsigned long kexec_start_address; ++extern unsigned long kexec_indirection_page; ++ ++extern char kexec_argv_buf[]; ++extern char *kexec_argv[]; ++ ++#define KEXEC_RELOCATE_NEW_KERNEL_SIZE ((unsigned long)&kexec_relocate_new_kernel_end - (unsigned long)kexec_relocate_new_kernel) ++#endif /* !__ASSEMBLY__ */ ++ ++#define KEXEC_COMMAND_LINE_SIZE 256 ++#define KEXEC_ARGV_SIZE (KEXEC_COMMAND_LINE_SIZE / 16) ++#define KEXEC_MAX_ARGC (KEXEC_ARGV_SIZE / sizeof(long)) ++ ++#endif +--- a/arch/mips/kernel/relocate_kernel.S ++++ b/arch/mips/kernel/relocate_kernel.S +@@ -10,8 +10,9 @@ + #include <asm/mipsregs.h> + #include <asm/stackframe.h> + #include <asm/addrspace.h> ++#include "machine_kexec.h" + +-LEAF(relocate_new_kernel) ++LEAF(kexec_relocate_new_kernel) + PTR_L a0, arg0 + PTR_L a1, arg1 + PTR_L a2, arg2 +@@ -96,7 +97,7 @@ done: + #endif + /* jump to kexec_start_address */ + j s1 +- END(relocate_new_kernel) ++ END(kexec_relocate_new_kernel) + + #ifdef CONFIG_SMP + /* +@@ -182,9 +183,15 @@ kexec_indirection_page: + PTR 0 + .size kexec_indirection_page, PTRSIZE + +-relocate_new_kernel_end: ++kexec_argv_buf: ++ EXPORT(kexec_argv_buf) ++ .skip KEXEC_COMMAND_LINE_SIZE ++ .size kexec_argv_buf, KEXEC_COMMAND_LINE_SIZE ++ ++kexec_argv: ++ EXPORT(kexec_argv) ++ .skip KEXEC_ARGV_SIZE ++ .size kexec_argv, KEXEC_ARGV_SIZE + +-relocate_new_kernel_size: +- EXPORT(relocate_new_kernel_size) +- PTR relocate_new_kernel_end - relocate_new_kernel +- .size relocate_new_kernel_size, PTRSIZE ++kexec_relocate_new_kernel_end: ++ EXPORT(kexec_relocate_new_kernel_end) diff --git a/target/linux/generic/pending-5.15/332-arc-add-OWRTDTB-section.patch b/target/linux/generic/pending-5.15/332-arc-add-OWRTDTB-section.patch new file mode 100644 index 0000000000..30158cf399 --- /dev/null +++ b/target/linux/generic/pending-5.15/332-arc-add-OWRTDTB-section.patch @@ -0,0 +1,84 @@ +From bb0c3b0175240bf152fd7c644821a0cf9f77c37c Mon Sep 17 00:00:00 2001 +From: Evgeniy Didin <Evgeniy.Didin@synopsys.com> +Date: Fri, 15 Mar 2019 18:53:38 +0300 +Subject: [PATCH] arc add OWRTDTB section + +This change allows OpenWRT to patch resulting kernel binary with +external .dtb. + +That allows us to re-use exactky the same vmlinux on different boards +given its ARC core configurations match (at least cache line sizes etc). + +""patch-dtb" searches for ASCII "OWRTDTB:" strign and copies external +.dtb right after it, keeping the string in place. + +Signed-off-by: Eugeniy Paltsev <Eugeniy.Paltsev@synopsys.com> +Signed-off-by: Alexey Brodkin <abrodkin@synopsys.com> +Signed-off-by: Evgeniy Didin <Evgeniy.Didin@synopsys.com> +--- + arch/arc/kernel/head.S | 10 ++++++++++ + arch/arc/kernel/setup.c | 4 +++- + arch/arc/kernel/vmlinux.lds.S | 13 +++++++++++++ + 3 files changed, 26 insertions(+), 1 deletion(-) + +--- a/arch/arc/kernel/head.S ++++ b/arch/arc/kernel/head.S +@@ -88,6 +88,16 @@ + DSP_EARLY_INIT + .endm + ++ ; Here "patch-dtb" will embed external .dtb ++ ; Note "patch-dtb" searches for ASCII "OWRTDTB:" string ++ ; and pastes .dtb right after it, hense the string precedes ++ ; __image_dtb symbol. ++ .section .owrt, "aw",@progbits ++ .ascii "OWRTDTB:" ++ENTRY(__image_dtb) ++ .fill 0x4000 ++END(__image_dtb) ++ + .section .init.text, "ax",@progbits + + ;---------------------------------------------------------------- +--- a/arch/arc/kernel/setup.c ++++ b/arch/arc/kernel/setup.c +@@ -495,6 +495,8 @@ static inline bool uboot_arg_invalid(uns + /* We always pass 0 as magic from U-boot */ + #define UBOOT_MAGIC_VALUE 0 + ++extern struct boot_param_header __image_dtb; ++ + void __init handle_uboot_args(void) + { + bool use_embedded_dtb = true; +@@ -533,7 +535,7 @@ void __init handle_uboot_args(void) + ignore_uboot_args: + + if (use_embedded_dtb) { +- machine_desc = setup_machine_fdt(__dtb_start); ++ machine_desc = setup_machine_fdt(&__image_dtb); + if (!machine_desc) + panic("Embedded DT invalid\n"); + } +--- a/arch/arc/kernel/vmlinux.lds.S ++++ b/arch/arc/kernel/vmlinux.lds.S +@@ -27,6 +27,19 @@ SECTIONS + + . = CONFIG_LINUX_LINK_BASE; + ++ /* ++ * In OpenWRT we want to patch built binary embedding .dtb of choice. ++ * This is implemented with "patch-dtb" utility which searches for ++ * "OWRTDTB:" string in first 16k of image and if it is found ++ * copies .dtb right after mentioned string. ++ * ++ * Note: "OWRTDTB:" won't be overwritten with .dtb, .dtb will follow it. ++ */ ++ .owrt : { ++ *(.owrt) ++ . = ALIGN(PAGE_SIZE); ++ } ++ + _int_vec_base_lds = .; + .vector : { + *(.vector) diff --git a/target/linux/generic/pending-5.15/333-arc-enable-unaligned-access-in-kernel-mode.patch b/target/linux/generic/pending-5.15/333-arc-enable-unaligned-access-in-kernel-mode.patch new file mode 100644 index 0000000000..1848a84cc4 --- /dev/null +++ b/target/linux/generic/pending-5.15/333-arc-enable-unaligned-access-in-kernel-mode.patch @@ -0,0 +1,24 @@ +From: Alexey Brodkin <abrodkin@synopsys.com> +Subject: arc: enable unaligned access in kernel mode + +This enables misaligned access handling even in kernel mode. +Some wireless drivers (ath9k-htc and mt7601u) use misaligned accesses +here and there and to cope with that without fixing stuff in the drivers +we're just gracefully handling it on ARC. + +Signed-off-by: Alexey Brodkin <abrodkin@synopsys.com> +--- + arch/arc/kernel/unaligned.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +--- a/arch/arc/kernel/unaligned.c ++++ b/arch/arc/kernel/unaligned.c +@@ -202,7 +202,7 @@ int misaligned_fixup(unsigned long addre + char buf[TASK_COMM_LEN]; + + /* handle user mode only and only if enabled by sysadmin */ +- if (!user_mode(regs) || !unaligned_enabled) ++ if (!unaligned_enabled) + return 1; + + if (no_unaligned_warning) { diff --git a/target/linux/generic/pending-5.15/342-powerpc-Enable-kernel-XZ-compression-option-on-PPC_8.patch b/target/linux/generic/pending-5.15/342-powerpc-Enable-kernel-XZ-compression-option-on-PPC_8.patch new file mode 100644 index 0000000000..6792a66f8a --- /dev/null +++ b/target/linux/generic/pending-5.15/342-powerpc-Enable-kernel-XZ-compression-option-on-PPC_8.patch @@ -0,0 +1,25 @@ +From 66770a004afe10df11d3902e16eaa0c2c39436bb Mon Sep 17 00:00:00 2001 +From: Pawel Dembicki <paweldembicki@gmail.com> +Date: Fri, 24 May 2019 17:56:19 +0200 +Subject: [PATCH] powerpc: Enable kernel XZ compression option on PPC_85xx + +Enable kernel XZ compression option on PPC_85xx. Tested with +simpleImage on TP-Link TL-WDR4900 (Freescale P1014 processor). + +Suggested-by: Christian Lamparter <chunkeey@gmail.com> +Signed-off-by: Pawel Dembicki <paweldembicki@gmail.com> +--- + arch/powerpc/Kconfig | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +--- a/arch/powerpc/Kconfig ++++ b/arch/powerpc/Kconfig +@@ -214,7 +214,7 @@ config PPC + select HAVE_KERNEL_GZIP + select HAVE_KERNEL_LZMA if DEFAULT_UIMAGE + select HAVE_KERNEL_LZO if DEFAULT_UIMAGE +- select HAVE_KERNEL_XZ if PPC_BOOK3S || 44x ++ select HAVE_KERNEL_XZ if PPC_BOOK3S || 44x || PPC_85xx + select HAVE_KPROBES + select HAVE_KPROBES_ON_FTRACE + select HAVE_KRETPROBES diff --git a/target/linux/generic/pending-5.15/400-mtd-mtdsplit-support.patch b/target/linux/generic/pending-5.15/400-mtd-mtdsplit-support.patch new file mode 100644 index 0000000000..6f816a6ad3 --- /dev/null +++ b/target/linux/generic/pending-5.15/400-mtd-mtdsplit-support.patch @@ -0,0 +1,313 @@ +--- a/drivers/mtd/Kconfig ++++ b/drivers/mtd/Kconfig +@@ -12,6 +12,25 @@ menuconfig MTD + + if MTD + ++menu "OpenWrt specific MTD options" ++ ++config MTD_ROOTFS_ROOT_DEV ++ bool "Automatically set 'rootfs' partition to be root filesystem" ++ default y ++ ++config MTD_SPLIT_FIRMWARE ++ bool "Automatically split firmware partition for kernel+rootfs" ++ default y ++ ++config MTD_SPLIT_FIRMWARE_NAME ++ string "Firmware partition name" ++ depends on MTD_SPLIT_FIRMWARE ++ default "firmware" ++ ++source "drivers/mtd/mtdsplit/Kconfig" ++ ++endmenu ++ + config MTD_TESTS + tristate "MTD tests support (DANGEROUS)" + depends on m +--- a/drivers/mtd/mtdpart.c ++++ b/drivers/mtd/mtdpart.c +@@ -15,10 +15,12 @@ + #include <linux/kmod.h> + #include <linux/mtd/mtd.h> + #include <linux/mtd/partitions.h> ++#include <linux/magic.h> + #include <linux/err.h> + #include <linux/of.h> + + #include "mtdcore.h" ++#include "mtdsplit/mtdsplit.h" + + /* + * MTD methods which simply translate the effective address and pass through +@@ -236,6 +238,146 @@ static int mtd_add_partition_attrs(struc + return ret; + } + ++static DEFINE_SPINLOCK(part_parser_lock); ++static LIST_HEAD(part_parsers); ++ ++static struct mtd_part_parser *mtd_part_parser_get(const char *name) ++{ ++ struct mtd_part_parser *p, *ret = NULL; ++ ++ spin_lock(&part_parser_lock); ++ ++ list_for_each_entry(p, &part_parsers, list) ++ if (!strcmp(p->name, name) && try_module_get(p->owner)) { ++ ret = p; ++ break; ++ } ++ ++ spin_unlock(&part_parser_lock); ++ ++ return ret; ++} ++ ++static inline void mtd_part_parser_put(const struct mtd_part_parser *p) ++{ ++ module_put(p->owner); ++} ++ ++static struct mtd_part_parser * ++get_partition_parser_by_type(enum mtd_parser_type type, ++ struct mtd_part_parser *start) ++{ ++ struct mtd_part_parser *p, *ret = NULL; ++ ++ spin_lock(&part_parser_lock); ++ ++ p = list_prepare_entry(start, &part_parsers, list); ++ if (start) ++ mtd_part_parser_put(start); ++ ++ list_for_each_entry_continue(p, &part_parsers, list) { ++ if (p->type == type && try_module_get(p->owner)) { ++ ret = p; ++ break; ++ } ++ } ++ ++ spin_unlock(&part_parser_lock); ++ ++ return ret; ++} ++ ++static int parse_mtd_partitions_by_type(struct mtd_info *master, ++ enum mtd_parser_type type, ++ const struct mtd_partition **pparts, ++ struct mtd_part_parser_data *data) ++{ ++ struct mtd_part_parser *prev = NULL; ++ int ret = 0; ++ ++ while (1) { ++ struct mtd_part_parser *parser; ++ ++ parser = get_partition_parser_by_type(type, prev); ++ if (!parser) ++ break; ++ ++ ret = (*parser->parse_fn)(master, pparts, data); ++ ++ if (ret > 0) { ++ mtd_part_parser_put(parser); ++ printk(KERN_NOTICE ++ "%d %s partitions found on MTD device %s\n", ++ ret, parser->name, master->name); ++ break; ++ } ++ ++ prev = parser; ++ } ++ ++ return ret; ++} ++ ++static int ++run_parsers_by_type(struct mtd_info *child, enum mtd_parser_type type) ++{ ++ struct mtd_partition *parts; ++ int nr_parts; ++ int i; ++ ++ nr_parts = parse_mtd_partitions_by_type(child, type, (const struct mtd_partition **)&parts, ++ NULL); ++ if (nr_parts <= 0) ++ return nr_parts; ++ ++ if (WARN_ON(!parts)) ++ return 0; ++ ++ for (i = 0; i < nr_parts; i++) { ++ /* adjust partition offsets */ ++ parts[i].offset += child->part.offset; ++ ++ mtd_add_partition(child->parent, ++ parts[i].name, ++ parts[i].offset, ++ parts[i].size); ++ } ++ ++ kfree(parts); ++ ++ return nr_parts; ++} ++ ++#ifdef CONFIG_MTD_SPLIT_FIRMWARE_NAME ++#define SPLIT_FIRMWARE_NAME CONFIG_MTD_SPLIT_FIRMWARE_NAME ++#else ++#define SPLIT_FIRMWARE_NAME "unused" ++#endif ++ ++static void split_firmware(struct mtd_info *master, struct mtd_info *part) ++{ ++ run_parsers_by_type(part, MTD_PARSER_TYPE_FIRMWARE); ++} ++ ++static void mtd_partition_split(struct mtd_info *master, struct mtd_info *part) ++{ ++ static int rootfs_found = 0; ++ ++ if (rootfs_found) ++ return; ++ ++ if (!strcmp(part->name, "rootfs")) { ++ run_parsers_by_type(part, MTD_PARSER_TYPE_ROOTFS); ++ ++ rootfs_found = 1; ++ } ++ ++ if (IS_ENABLED(CONFIG_MTD_SPLIT_FIRMWARE) && ++ !strcmp(part->name, SPLIT_FIRMWARE_NAME) && ++ !of_find_property(mtd_get_of_node(part), "compatible", NULL)) ++ split_firmware(master, part); ++} ++ + int mtd_add_partition(struct mtd_info *parent, const char *name, + long long offset, long long length) + { +@@ -274,6 +416,7 @@ int mtd_add_partition(struct mtd_info *p + if (ret) + goto err_remove_part; + ++ mtd_partition_split(parent, child); + mtd_add_partition_attrs(child); + + return 0; +@@ -422,6 +565,7 @@ int add_mtd_partitions(struct mtd_info * + goto err_del_partitions; + } + ++ mtd_partition_split(master, child); + mtd_add_partition_attrs(child); + + /* Look for subpartitions */ +@@ -438,31 +582,6 @@ err_del_partitions: + return ret; + } + +-static DEFINE_SPINLOCK(part_parser_lock); +-static LIST_HEAD(part_parsers); +- +-static struct mtd_part_parser *mtd_part_parser_get(const char *name) +-{ +- struct mtd_part_parser *p, *ret = NULL; +- +- spin_lock(&part_parser_lock); +- +- list_for_each_entry(p, &part_parsers, list) +- if (!strcmp(p->name, name) && try_module_get(p->owner)) { +- ret = p; +- break; +- } +- +- spin_unlock(&part_parser_lock); +- +- return ret; +-} +- +-static inline void mtd_part_parser_put(const struct mtd_part_parser *p) +-{ +- module_put(p->owner); +-} +- + /* + * Many partition parsers just expected the core to kfree() all their data in + * one chunk. Do that by default. +--- a/include/linux/mtd/partitions.h ++++ b/include/linux/mtd/partitions.h +@@ -75,6 +75,12 @@ struct mtd_part_parser_data { + * Functions dealing with the various ways of partitioning the space + */ + ++enum mtd_parser_type { ++ MTD_PARSER_TYPE_DEVICE = 0, ++ MTD_PARSER_TYPE_ROOTFS, ++ MTD_PARSER_TYPE_FIRMWARE, ++}; ++ + struct mtd_part_parser { + struct list_head list; + struct module *owner; +@@ -83,6 +89,7 @@ struct mtd_part_parser { + int (*parse_fn)(struct mtd_info *, const struct mtd_partition **, + struct mtd_part_parser_data *); + void (*cleanup)(const struct mtd_partition *pparts, int nr_parts); ++ enum mtd_parser_type type; + }; + + /* Container for passing around a set of parsed partitions */ +--- a/drivers/mtd/Makefile ++++ b/drivers/mtd/Makefile +@@ -9,6 +9,8 @@ mtd-y := mtdcore.o mtdsuper.o mtdconc + + obj-y += parsers/ + ++obj-$(CONFIG_MTD_SPLIT) += mtdsplit/ ++ + # 'Users' - code which presents functionality to userspace. + obj-$(CONFIG_MTD_BLKDEVS) += mtd_blkdevs.o + obj-$(CONFIG_MTD_BLOCK) += mtdblock.o +--- a/include/linux/mtd/mtd.h ++++ b/include/linux/mtd/mtd.h +@@ -608,6 +608,24 @@ static inline void mtd_align_erase_req(s + req->len += mtd->erasesize - mod; + } + ++static inline uint64_t mtd_roundup_to_eb(uint64_t sz, struct mtd_info *mtd) ++{ ++ if (mtd_mod_by_eb(sz, mtd) == 0) ++ return sz; ++ ++ /* Round up to next erase block */ ++ return (mtd_div_by_eb(sz, mtd) + 1) * mtd->erasesize; ++} ++ ++static inline uint64_t mtd_rounddown_to_eb(uint64_t sz, struct mtd_info *mtd) ++{ ++ if (mtd_mod_by_eb(sz, mtd) == 0) ++ return sz; ++ ++ /* Round down to the start of the current erase block */ ++ return (mtd_div_by_eb(sz, mtd)) * mtd->erasesize; ++} ++ + static inline uint32_t mtd_div_by_ws(uint64_t sz, struct mtd_info *mtd) + { + if (mtd->writesize_shift) +@@ -680,6 +698,13 @@ extern void __put_mtd_device(struct mtd_ + extern struct mtd_info *get_mtd_device_nm(const char *name); + extern void put_mtd_device(struct mtd_info *mtd); + ++static inline uint64_t mtdpart_get_offset(const struct mtd_info *mtd) ++{ ++ if (!mtd_is_partition(mtd)) ++ return 0; ++ ++ return mtd->part.offset; ++} + + struct mtd_notifier { + void (*add)(struct mtd_info *mtd); diff --git a/target/linux/generic/pending-5.15/402-mtd-spi-nor-write-support-for-minor-aligned-partitions.patch b/target/linux/generic/pending-5.15/402-mtd-spi-nor-write-support-for-minor-aligned-partitions.patch new file mode 100644 index 0000000000..eaf96e9534 --- /dev/null +++ b/target/linux/generic/pending-5.15/402-mtd-spi-nor-write-support-for-minor-aligned-partitions.patch @@ -0,0 +1,389 @@ +From patchwork Tue Jun 8 04:07:19 2021 +Content-Type: text/plain; charset="utf-8" +MIME-Version: 1.0 +Content-Transfer-Encoding: 7bit +X-Patchwork-Submitter: John Thomson <git@johnthomson.fastmail.com.au> +X-Patchwork-Id: 1489105 +X-Patchwork-Delegate: tudor.ambarus@gmail.com +Return-Path: + <linux-mtd-bounces+incoming=patchwork.ozlabs.org@lists.infradead.org> +X-Original-To: incoming@patchwork.ozlabs.org +Delivered-To: patchwork-incoming@bilbo.ozlabs.org +Authentication-Results: ozlabs.org; + spf=none (no SPF record) smtp.mailfrom=lists.infradead.org + (client-ip=2607:7c80:54:e::133; helo=bombadil.infradead.org; + envelope-from=linux-mtd-bounces+incoming=patchwork.ozlabs.org@lists.infradead.org; + receiver=<UNKNOWN>) +Authentication-Results: ozlabs.org; + dkim=pass (2048-bit key; + secure) header.d=lists.infradead.org header.i=@lists.infradead.org + header.a=rsa-sha256 header.s=bombadil.20210309 header.b=EMabhVoR; + dkim=fail reason="signature verification failed" (2048-bit key; + unprotected) header.d=fastmail.com.au header.i=@fastmail.com.au + header.a=rsa-sha256 header.s=fm3 header.b=dLzuZ6dB; + dkim=fail reason="signature verification failed" (2048-bit key; + unprotected) header.d=messagingengine.com header.i=@messagingengine.com + header.a=rsa-sha256 header.s=fm3 header.b=nSRGsW+C; + dkim-atps=neutral +Received: from bombadil.infradead.org (bombadil.infradead.org + [IPv6:2607:7c80:54:e::133]) + (using TLSv1.3 with cipher TLS_AES_256_GCM_SHA384 (256/256 bits) + key-exchange X25519 server-signature RSA-PSS (4096 bits) server-digest + SHA256) + (No client certificate requested) + by ozlabs.org (Postfix) with ESMTPS id 4FzcFN1j1nz9sW8 + for <incoming@patchwork.ozlabs.org>; Tue, 8 Jun 2021 14:09:28 +1000 (AEST) +DKIM-Signature: v=1; a=rsa-sha256; q=dns/txt; c=relaxed/relaxed; + d=lists.infradead.org; s=bombadil.20210309; h=Sender: + Content-Transfer-Encoding:Content-Type:List-Subscribe:List-Help:List-Post: + List-Archive:List-Unsubscribe:List-Id:MIME-Version:Message-Id:Date:Subject:Cc + :To:From:Reply-To:Content-ID:Content-Description:Resent-Date:Resent-From: + Resent-Sender:Resent-To:Resent-Cc:Resent-Message-ID:In-Reply-To:References: + List-Owner; bh=6mUWQd71FwsINycGYY1qOhKz+ecWJVNtwDkTebG3XkA=; b=EMabhVoRE3ad89 + o3L2AgyKrs+blSofUC3hoSsQe7gi3m4si8S9HW8Z+8SsS5TufUsvGwDl80qSYGlQOytQF+1yRUWvE + 6FJ/+bqv+TwjqZFibgJ6+9OVsQN9dZ/no1R0bBXIpmrf8ORUmv58QK4ZQquaFKbyXKpFeWOC2MSv4 + H2MAhyhTU8a3gtooH6G8+KvsJEfVgh6C+aDbwxyh2UY3chHKuw1kvL6AktbfUE2xl4zxi3x3kc70B + Wi3LiJBFokxVdgnROXxTU5tI0XboWYkQV64gLuQNV4XKClcuhVpzloDK8Iok6NTd7b32a7TdEFlCS + lGKsEKmxtUlW2FpfoduA==; +Received: from localhost ([::1] helo=bombadil.infradead.org) + by bombadil.infradead.org with esmtp (Exim 4.94.2 #2 (Red Hat Linux)) + id 1lqT1r-006OAW-DX; Tue, 08 Jun 2021 04:07:51 +0000 +Received: from new1-smtp.messagingengine.com ([66.111.4.221]) + by bombadil.infradead.org with esmtps (Exim 4.94.2 #2 (Red Hat Linux)) + id 1lqT1l-006O9b-Fq + for linux-mtd@lists.infradead.org; Tue, 08 Jun 2021 04:07:50 +0000 +Received: from compute2.internal (compute2.nyi.internal [10.202.2.42]) + by mailnew.nyi.internal (Postfix) with ESMTP id 4456B580622; + Tue, 8 Jun 2021 00:07:42 -0400 (EDT) +Received: from mailfrontend2 ([10.202.2.163]) + by compute2.internal (MEProxy); Tue, 08 Jun 2021 00:07:42 -0400 +DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=fastmail.com.au; + h=from:to:cc:subject:date:message-id:mime-version + :content-transfer-encoding; s=fm3; bh=ZXRH+YluM1mHCS1EWUiCY/Sg8O + LccfHe1oW5iAay6y8=; b=dLzuZ6dBYf7ZA8tWLOBFZYLi7ERsGe/4vnMXG+ovvb + dNBO0+SaFGwoqYSFrfq/TeyHfKyvxrA7+LCdopIuT4abpLHxtRwtRiafQcDYCPat + qJIqOZO+wCZC5S9Jc1OP7+t1FviGpgevqIMotci37P+RWc5u3AweMzFljZk90E8C + uorV6rXagD+OssJQzllRnAIK88+rOAC9ZyXv2gWxy4d1HSCwSWgzx2vnV9CNp918 + YC/3tiHas9krbrPIaAsdBROr7Bvoe/ShRRzruKRuvZVgg5NN90vX+/5ZjI8u04GM + p2bWCbC62CP6wlcgDaz+c/Sgr5ITd2GPENJsHfqmLRBA== +DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d= + messagingengine.com; h=cc:content-transfer-encoding:date:from + :message-id:mime-version:subject:to:x-me-proxy:x-me-proxy + :x-me-sender:x-me-sender:x-sasl-enc; s=fm3; bh=ZXRH+YluM1mHCS1EW + UiCY/Sg8OLccfHe1oW5iAay6y8=; b=nSRGsW+CQ2Zx1RVpIUu8W/VD/k5P+32BW + 5k2ltd+UhI3dfldBPzHrYiOP/IJqGkNW+V+rHASacW/vFygnaZoxNjRYKnOsu+26 + wb2yK3jpl6lsNTg3N1Z4XJrYY2lf9H29DMFbhC67l0PTc050rcZk4XsKTLAlv14Q + VA4WREYSaX/4IN4O+ES4TMq0a/3gKZh6nvbbJXbsXfK0WlSHTGZtZmW3fyrqvbXa + t+R7L8vvqWvwls0pV+Sn8LeQqb7+A69w0UOnuznjkcA3sCc2YehcHbxcUEnMH+9N + bxOjmIDeg9/4X/829tUWUJiLhE5SFmQZ1P6oFtmbWoLrDz0ZJIVBw== +X-ME-Sender: <xms:C-2-YD2uka4HsA6gcdsV2Ia7vebY4Yjp9E8q7KBMb54jnAzGL7-67Q> + <xme:C-2-YCEaxASy5VlcrvNO_jLFpMDGkFCRsuVNuZGEQsiRZygk8jPHWq7unPjeT6uYS + 2pUP6PrTQ2rggjEIg> +X-ME-Received: + <xmr:C-2-YD4exeK49N_YZWWf2BWDhVyCbCY3wwvjTyDOFxeugx7Jg08pzMUToo9oJjrBpcVTaA3kbfk> +X-ME-Proxy-Cause: + gggruggvucftvghtrhhoucdtuddrgeduledrfedtkedgjeduucetufdoteggodetrfdotf + fvucfrrhhofhhilhgvmecuhfgrshhtofgrihhlpdfqfgfvpdfurfetoffkrfgpnffqhgen + uceurghilhhouhhtmecufedttdenucesvcftvggtihhpihgvnhhtshculddquddttddmne + cujfgurhephffvufffkffoggfgsedtkeertdertddtnecuhfhrohhmpeflohhhnhcuvfhh + ohhmshhonhcuoehgihhtsehjohhhnhhthhhomhhsohhnrdhfrghsthhmrghilhdrtghomh + drrghuqeenucggtffrrghtthgvrhhnpefffeeihfdukedtuedufeetieeuudfhhefhkefh + tefgtdeuffekffelleetveduieenucevlhhushhtvghrufhiiigvpedtnecurfgrrhgrmh + epmhgrihhlfhhrohhmpehgihhtsehjohhhnhhthhhomhhsohhnrdhfrghsthhmrghilhdr + tghomhdrrghu +X-ME-Proxy: <xmx:C-2-YI0AJZGjcB3wIbI9BoC9X8VNl4i9A7cQnBkvwZ25czWJlkKCLw> + <xmx:C-2-YGGufw99T-O81-FeiSyEruv6_Pr0IHFhspQdxjv5k1VFTZ0lzQ> + <xmx:C-2-YJ8BW7DhSDSCEAPSJWrwh_hHP79qreTZtWh_kOUwSh1c0MMlAg> + <xmx:Du2-YJBeX2Fg9oFZVXGwEJ1ZrZnXHiAqNON8tbpzquYgcm2o_LM48g> +Received: by mail.messagingengine.com (Postfix) with ESMTPA; Tue, + 8 Jun 2021 00:07:35 -0400 (EDT) +From: John Thomson <git@johnthomson.fastmail.com.au> +To: Miquel Raynal <miquel.raynal@bootlin.com>, + Richard Weinberger <richard@nod.at>, Vignesh Raghavendra <vigneshr@ti.com>, + Tudor Ambarus <tudor.ambarus@microchip.com>, + Michael Walle <michael@walle.cc>, Pratyush Yadav <p.yadav@ti.com>, + linux-mtd@lists.infradead.org +Cc: linux-kernel@vger.kernel.org, + John Thomson <git@johnthomson.fastmail.com.au>, + kernel test robot <lkp@intel.com>, Dan Carpenter <dan.carpenter@oracle.com> +Subject: [PATCH] mtd: spi-nor: write support for minor aligned partitions +Date: Tue, 8 Jun 2021 14:07:19 +1000 +Message-Id: <20210608040719.14431-1-git@johnthomson.fastmail.com.au> +X-Mailer: git-send-email 2.31.1 +MIME-Version: 1.0 +X-CRM114-Version: 20100106-BlameMichelson ( TRE 0.8.0 (BSD) ) MR-646709E3 +X-CRM114-CacheID: sfid-20210607_210745_712053_67A7D864 +X-CRM114-Status: GOOD ( 26.99 ) +X-Spam-Score: -0.8 (/) +X-Spam-Report: Spam detection software, + running on the system "bombadil.infradead.org", + has NOT identified this incoming email as spam. The original + message has been attached to this so you can view it or label + similar future email. If you have any questions, see + the administrator of that system for details. + Content preview: Do not prevent writing to mtd partitions where a partition + boundary sits on a minor erasesize boundary. This addresses a FIXME that + has been present since the start of the linux git history: /* Doesn' [...] + Content analysis details: (-0.8 points, 5.0 required) + pts rule name description + ---- ---------------------- + -------------------------------------------------- + -0.7 RCVD_IN_DNSWL_LOW RBL: Sender listed at https://www.dnswl.org/, + low trust [66.111.4.221 listed in list.dnswl.org] + -0.0 SPF_PASS SPF: sender matches SPF record + -0.0 SPF_HELO_PASS SPF: HELO matches SPF record + 0.0 RCVD_IN_MSPIKE_H3 RBL: Good reputation (+3) + [66.111.4.221 listed in wl.mailspike.net] + -0.1 DKIM_VALID Message has at least one valid DKIM or DK signature + 0.1 DKIM_SIGNED Message has a DKIM or DK signature, + not necessarily + valid + -0.1 DKIM_VALID_EF Message has a valid DKIM or DK signature from + envelope-from domain + 0.0 RCVD_IN_MSPIKE_WL Mailspike good senders +X-BeenThere: linux-mtd@lists.infradead.org +X-Mailman-Version: 2.1.34 +Precedence: list +List-Id: Linux MTD discussion mailing list <linux-mtd.lists.infradead.org> +List-Unsubscribe: <http://lists.infradead.org/mailman/options/linux-mtd>, + <mailto:linux-mtd-request@lists.infradead.org?subject=unsubscribe> +List-Archive: <http://lists.infradead.org/pipermail/linux-mtd/> +List-Post: <mailto:linux-mtd@lists.infradead.org> +List-Help: <mailto:linux-mtd-request@lists.infradead.org?subject=help> +List-Subscribe: <http://lists.infradead.org/mailman/listinfo/linux-mtd>, + <mailto:linux-mtd-request@lists.infradead.org?subject=subscribe> +Sender: "linux-mtd" <linux-mtd-bounces@lists.infradead.org> +Errors-To: linux-mtd-bounces+incoming=patchwork.ozlabs.org@lists.infradead.org + +Do not prevent writing to mtd partitions where a partition boundary sits +on a minor erasesize boundary. +This addresses a FIXME that has been present since the start of the +linux git history: +/* Doesn't start on a boundary of major erase size */ +/* FIXME: Let it be writable if it is on a boundary of + * _minor_ erase size though */ + +Allow a uniform erase region spi-nor device to be configured +to use the non-uniform erase regions code path for an erase with: +CONFIG_MTD_SPI_NOR_USE_VARIABLE_ERASE=y + +On supporting hardware (SECT_4K: majority of current SPI-NOR device) +provide the facility for an erase to use the least number +of SPI-NOR operations, as well as access to 4K erase without +requiring CONFIG_MTD_SPI_NOR_USE_4K_SECTORS + +Introduce erasesize_minor to the mtd struct, +the smallest erasesize supported by the device + +On existing devices, this is useful where write support is wanted +for data on a 4K partition, such as some u-boot-env partitions, +or RouterBoot soft_config, while still netting the performance +benefits of using 64K sectors + +Performance: +time mtd erase firmware +OpenWrt 5.10 ramips MT7621 w25q128jv 0xfc0000 partition length + +Without this patch +MTD_SPI_NOR_USE_4K_SECTORS=y |n +real 2m 11.66s |0m 50.86s +user 0m 0.00s |0m 0.00s +sys 1m 56.20s |0m 50.80s + +With this patch +MTD_SPI_NOR_USE_VARIABLE_ERASE=n|y |4K_SECTORS=y +real 0m 51.68s |0m 50.85s |2m 12.89s +user 0m 0.00s |0m 0.00s |0m 0.01s +sys 0m 46.94s |0m 50.38s |2m 12.46s + +Signed-off-by: John Thomson <git@johnthomson.fastmail.com.au> +--- +Have not tested on variable erase regions device. + +checkpatch does not like the printk(KERN_WARNING +these should be changed separately beforehand? + +Changes RFC -> v1: +Fix uninitialized variable smatch warning +Reported-by: kernel test robot <lkp@intel.com> +Reported-by: Dan Carpenter <dan.carpenter@oracle.com> +--- + drivers/mtd/mtdpart.c | 52 ++++++++++++++++++++++++++++--------- + drivers/mtd/spi-nor/Kconfig | 10 +++++++ + drivers/mtd/spi-nor/core.c | 10 +++++-- + include/linux/mtd/mtd.h | 2 ++ + 4 files changed, 60 insertions(+), 14 deletions(-) + +--- a/drivers/mtd/mtdpart.c ++++ b/drivers/mtd/mtdpart.c +@@ -40,10 +40,11 @@ static struct mtd_info *allocate_partiti + struct mtd_info *master = mtd_get_master(parent); + int wr_alignment = (parent->flags & MTD_NO_ERASE) ? + master->writesize : master->erasesize; ++ int wr_alignment_minor = 0; + u64 parent_size = mtd_is_partition(parent) ? + parent->part.size : parent->size; + struct mtd_info *child; +- u32 remainder; ++ u32 remainder, remainder_minor; + char *name; + u64 tmp; + +@@ -145,6 +146,7 @@ static struct mtd_info *allocate_partiti + int i, max = parent->numeraseregions; + u64 end = child->part.offset + child->part.size; + struct mtd_erase_region_info *regions = parent->eraseregions; ++ uint32_t erasesize_minor = child->erasesize; + + /* Find the first erase regions which is part of this + * partition. */ +@@ -155,15 +157,24 @@ static struct mtd_info *allocate_partiti + if (i > 0) + i--; + +- /* Pick biggest erasesize */ + for (; i < max && regions[i].offset < end; i++) { ++ /* Pick biggest erasesize */ + if (child->erasesize < regions[i].erasesize) + child->erasesize = regions[i].erasesize; ++ /* Pick smallest non-zero erasesize */ ++ if ((erasesize_minor > regions[i].erasesize) && (regions[i].erasesize > 0)) ++ erasesize_minor = regions[i].erasesize; + } ++ ++ if (erasesize_minor < child->erasesize) ++ child->erasesize_minor = erasesize_minor; ++ + BUG_ON(child->erasesize == 0); + } else { + /* Single erase size */ + child->erasesize = master->erasesize; ++ if (master->erasesize_minor) ++ child->erasesize_minor = master->erasesize_minor; + } + + /* +@@ -171,26 +182,43 @@ static struct mtd_info *allocate_partiti + * exposes several regions with different erasesize. Adjust + * wr_alignment accordingly. + */ +- if (!(child->flags & MTD_NO_ERASE)) ++ if (!(child->flags & MTD_NO_ERASE)) { + wr_alignment = child->erasesize; ++ if (IS_ENABLED(CONFIG_MTD_SPI_NOR_USE_VARIABLE_ERASE) && child->erasesize_minor) ++ wr_alignment_minor = child->erasesize_minor; ++ } + + tmp = mtd_get_master_ofs(child, 0); + remainder = do_div(tmp, wr_alignment); + if ((child->flags & MTD_WRITEABLE) && remainder) { +- /* Doesn't start on a boundary of major erase size */ +- /* FIXME: Let it be writable if it is on a boundary of +- * _minor_ erase size though */ +- child->flags &= ~MTD_WRITEABLE; +- printk(KERN_WARNING"mtd: partition \"%s\" doesn't start on an erase/write block boundary -- force read-only\n", +- part->name); ++ if (wr_alignment_minor) { ++ tmp = mtd_get_master_ofs(child, 0); ++ remainder_minor = do_div(tmp, wr_alignment_minor); ++ if (remainder_minor == 0) ++ child->erasesize = child->erasesize_minor; ++ } ++ ++ if ((!wr_alignment_minor) || (wr_alignment_minor && remainder_minor != 0)) { ++ child->flags &= ~MTD_WRITEABLE; ++ printk(KERN_WARNING"mtd: partition \"%s\" doesn't start on an erase/write block boundary -- force read-only\n", ++ part->name); ++ } + } + + tmp = mtd_get_master_ofs(child, 0) + child->part.size; + remainder = do_div(tmp, wr_alignment); + if ((child->flags & MTD_WRITEABLE) && remainder) { +- child->flags &= ~MTD_WRITEABLE; +- printk(KERN_WARNING"mtd: partition \"%s\" doesn't end on an erase/write block -- force read-only\n", +- part->name); ++ if (wr_alignment_minor) { ++ tmp = mtd_get_master_ofs(child, 0) + child->part.size; ++ remainder_minor = do_div(tmp, wr_alignment_minor); ++ if (remainder_minor == 0) ++ child->erasesize = child->erasesize_minor; ++ } ++ if ((!wr_alignment_minor) || (wr_alignment_minor && remainder_minor != 0)) { ++ child->flags &= ~MTD_WRITEABLE; ++ printk(KERN_WARNING"mtd: partition \"%s\" doesn't end on an erase/write block -- force read-only\n", ++ part->name); ++ } + } + + child->size = child->part.size; +--- a/drivers/mtd/spi-nor/Kconfig ++++ b/drivers/mtd/spi-nor/Kconfig +@@ -10,6 +10,16 @@ menuconfig MTD_SPI_NOR + + if MTD_SPI_NOR + ++config MTD_SPI_NOR_USE_VARIABLE_ERASE ++ bool "Disable uniform_erase to allow use of all hardware supported erasesizes" ++ depends on !MTD_SPI_NOR_USE_4K_SECTORS ++ default n ++ help ++ Allow mixed use of all hardware supported erasesizes, ++ by forcing spi_nor to use the multiple eraseregions code path. ++ For example: A 68K erase will use one 64K erase, and one 4K erase ++ on supporting hardware. ++ + config MTD_SPI_NOR_USE_4K_SECTORS + bool "Use small 4096 B erase sectors" + default y +--- a/drivers/mtd/spi-nor/core.c ++++ b/drivers/mtd/spi-nor/core.c +@@ -1075,6 +1075,8 @@ static u8 spi_nor_convert_3to4_erase(u8 + + static bool spi_nor_has_uniform_erase(const struct spi_nor *nor) + { ++ if (IS_ENABLED(CONFIG_MTD_SPI_NOR_USE_VARIABLE_ERASE)) ++ return false; + return !!nor->params->erase_map.uniform_erase_type; + } + +@@ -2560,6 +2562,7 @@ static int spi_nor_select_erase(struct s + { + struct spi_nor_erase_map *map = &nor->params->erase_map; + const struct spi_nor_erase_type *erase = NULL; ++ const struct spi_nor_erase_type *erase_minor = NULL; + struct mtd_info *mtd = &nor->mtd; + u32 wanted_size = nor->info->sector_size; + int i; +@@ -2592,8 +2595,9 @@ static int spi_nor_select_erase(struct s + */ + for (i = SNOR_ERASE_TYPE_MAX - 1; i >= 0; i--) { + if (map->erase_type[i].size) { +- erase = &map->erase_type[i]; +- break; ++ if (!erase) ++ erase = &map->erase_type[i]; ++ erase_minor = &map->erase_type[i]; + } + } + +@@ -2601,6 +2605,8 @@ static int spi_nor_select_erase(struct s + return -EINVAL; + + mtd->erasesize = erase->size; ++ if (erase_minor && erase_minor->size < erase->size) ++ mtd->erasesize_minor = erase_minor->size; + return 0; + } + +--- a/include/linux/mtd/mtd.h ++++ b/include/linux/mtd/mtd.h +@@ -242,6 +242,8 @@ struct mtd_info { + * information below if they desire + */ + uint32_t erasesize; ++ /* "Minor" (smallest) erase size supported by the whole device */ ++ uint32_t erasesize_minor; + /* Minimal writable flash unit size. In case of NOR flash it is 1 (even + * though individual bits can be cleared), in case of NAND flash it is + * one NAND page (or half, or one-fourths of it), in case of ECC-ed NOR diff --git a/target/linux/generic/pending-5.15/410-mtd-parsers-ofpart-fix-parsing-subpartitions.patch b/target/linux/generic/pending-5.15/410-mtd-parsers-ofpart-fix-parsing-subpartitions.patch new file mode 100644 index 0000000000..353fa96748 --- /dev/null +++ b/target/linux/generic/pending-5.15/410-mtd-parsers-ofpart-fix-parsing-subpartitions.patch @@ -0,0 +1,76 @@ +From: =?UTF-8?q?Rafa=C5=82=20Mi=C5=82ecki?= <rafal@milecki.pl> +Date: Thu, 6 May 2021 12:33:58 +0200 +Subject: [PATCH] mtd: parsers: ofpart: fix parsing subpartitions +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +ofpart was recently patched to not scan random partition nodes as +subpartitions. That change unfortunately broke scanning valid +subpartitions like: + +partitions { + compatible = "fixed-partitions"; + #address-cells = <1>; + #size-cells = <1>; + + partition@0 { + compatible = "fixed-partitions"; + label = "bootloader"; + reg = <0x0 0x100000>; + + partition@0 { + label = "config"; + reg = <0x80000 0x80000>; + }; + }; +}; + +Fix that regression by adding 1 more code path. We actually need 3 +conditional blocks to support 3 possible cases. This change also makes +code easier to understand & follow. + +Reported-by: David Bauer <mail@david-bauer.net> +Fixes: 2d751203aacf ("mtd: parsers: ofpart: limit parsing of deprecated DT syntax +Signed-off-by: Rafał Miłecki <rafal@milecki.pl> +--- + drivers/mtd/parsers/ofpart_core.c | 26 ++++++++++++++------------ + 1 file changed, 14 insertions(+), 12 deletions(-) + +--- a/drivers/mtd/parsers/ofpart_core.c ++++ b/drivers/mtd/parsers/ofpart_core.c +@@ -57,20 +57,22 @@ static int parse_fixed_partitions(struct + if (!mtd_node) + return 0; + +- ofpart_node = of_get_child_by_name(mtd_node, "partitions"); +- if (!ofpart_node && !master->parent) { +- /* +- * We might get here even when ofpart isn't used at all (e.g., +- * when using another parser), so don't be louder than +- * KERN_DEBUG +- */ +- pr_debug("%s: 'partitions' subnode not found on %pOF. Trying to parse direct subnodes as partitions.\n", +- master->name, mtd_node); ++ if (!master->parent) { /* Master */ ++ ofpart_node = of_get_child_by_name(mtd_node, "partitions"); ++ if (!ofpart_node) { ++ /* ++ * We might get here even when ofpart isn't used at all (e.g., ++ * when using another parser), so don't be louder than ++ * KERN_DEBUG ++ */ ++ pr_debug("%s: 'partitions' subnode not found on %pOF. Trying to parse direct subnodes as partitions.\n", ++ master->name, mtd_node); ++ ofpart_node = mtd_node; ++ dedicated = false; ++ } ++ } else { /* Partition */ + ofpart_node = mtd_node; +- dedicated = false; + } +- if (!ofpart_node) +- return 0; + + of_id = of_match_node(parse_ofpart_match_table, ofpart_node); + if (dedicated && !of_id) { diff --git a/target/linux/generic/pending-5.15/419-mtd-redboot-add-of_match_table-with-DT-binding.patch b/target/linux/generic/pending-5.15/419-mtd-redboot-add-of_match_table-with-DT-binding.patch new file mode 100644 index 0000000000..7692f484ae --- /dev/null +++ b/target/linux/generic/pending-5.15/419-mtd-redboot-add-of_match_table-with-DT-binding.patch @@ -0,0 +1,22 @@ +From: =?UTF-8?q?Rafa=C5=82=20Mi=C5=82ecki?= <rafal@milecki.pl> +Subject: [PATCH] mtd: redboot: add of_match_table with DT binding +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +This allows parsing RedBoot compatible partitions for properly described +flash device in DT. + +Signed-off-by: Rafał Miłecki <rafal@milecki.pl> +--- + +--- a/drivers/mtd/parsers/redboot.c ++++ b/drivers/mtd/parsers/redboot.c +@@ -305,6 +305,7 @@ static int parse_redboot_partitions(stru + + static const struct of_device_id mtd_parser_redboot_of_match_table[] = { + { .compatible = "redboot-fis" }, ++ { .compatible = "ecoscentric,redboot-fis-partitions" }, + {}, + }; + MODULE_DEVICE_TABLE(of, mtd_parser_redboot_of_match_table); diff --git a/target/linux/generic/pending-5.15/420-mtd-redboot_space.patch b/target/linux/generic/pending-5.15/420-mtd-redboot_space.patch new file mode 100644 index 0000000000..a3cd4ecf1f --- /dev/null +++ b/target/linux/generic/pending-5.15/420-mtd-redboot_space.patch @@ -0,0 +1,41 @@ +From: Felix Fietkau <nbd@nbd.name> +Subject: add patch for including unpartitioned space in the rootfs partition for redboot devices (if applicable) + +[john@phrozen.org: used by ixp and others] + +lede-commit: 394918851f84e4d00fa16eb900e7700e95091f00 +Signed-off-by: Felix Fietkau <nbd@nbd.name> +--- + drivers/mtd/redboot.c | 19 +++++++++++++------ + 1 file changed, 13 insertions(+), 6 deletions(-) + +--- a/drivers/mtd/parsers/redboot.c ++++ b/drivers/mtd/parsers/redboot.c +@@ -279,14 +279,21 @@ static int parse_redboot_partitions(stru + #endif + names += strlen(names)+1; + +-#ifdef CONFIG_MTD_REDBOOT_PARTS_UNALLOCATED + if(fl->next && fl->img->flash_base + fl->img->size + master->erasesize <= fl->next->img->flash_base) { +- i++; +- parts[i].offset = parts[i-1].size + parts[i-1].offset; +- parts[i].size = fl->next->img->flash_base - parts[i].offset; +- parts[i].name = nullname; +- } ++ if (!strcmp(parts[i].name, "rootfs")) { ++ parts[i].size = fl->next->img->flash_base; ++ parts[i].size &= ~(master->erasesize - 1); ++ parts[i].size -= parts[i].offset; ++#ifdef CONFIG_MTD_REDBOOT_PARTS_UNALLOCATED ++ nrparts--; ++ } else { ++ i++; ++ parts[i].offset = parts[i-1].size + parts[i-1].offset; ++ parts[i].size = fl->next->img->flash_base - parts[i].offset; ++ parts[i].name = nullname; + #endif ++ } ++ } + tmp_fl = fl; + fl = fl->next; + kfree(tmp_fl); diff --git a/target/linux/generic/pending-5.15/430-mtd-add-myloader-partition-parser.patch b/target/linux/generic/pending-5.15/430-mtd-add-myloader-partition-parser.patch new file mode 100644 index 0000000000..0889c9a343 --- /dev/null +++ b/target/linux/generic/pending-5.15/430-mtd-add-myloader-partition-parser.patch @@ -0,0 +1,229 @@ +From: Florian Fainelli <f.fainelli@gmail.com> +Subject: Add myloader partition table parser + +[john@phozen.org: shoud be upstreamable] + +lede-commit: d8bf22859b51faa09d22c056fe221a45d2f7a3b8 +Signed-off-by: Florian Fainelli <f.fainelli@gmail.com> +[adjust for kernel 5.4, add myloader.c to patch] +Signed-off-by: Adrian Schmutzler <freifunk@adrianschmutzler.de> + +--- a/drivers/mtd/parsers/Kconfig ++++ b/drivers/mtd/parsers/Kconfig +@@ -57,6 +57,22 @@ config MTD_CMDLINE_PARTS + + If unsure, say 'N'. + ++config MTD_MYLOADER_PARTS ++ tristate "MyLoader partition parsing" ++ depends on ADM5120 || ATH25 || ATH79 ++ help ++ MyLoader is a bootloader which allows the user to define partitions ++ in flash devices, by putting a table in the second erase block ++ on the device, similar to a partition table. This table gives the ++ offsets and lengths of the user defined partitions. ++ ++ If you need code which can detect and parse these tables, and ++ register MTD 'partitions' corresponding to each image detected, ++ enable this option. ++ ++ You will still need the parsing functions to be called by the driver ++ for your particular device. It won't happen automatically. ++ + config MTD_OF_PARTS + tristate "OpenFirmware (device tree) partitioning parser" + default y +--- a/drivers/mtd/parsers/Makefile ++++ b/drivers/mtd/parsers/Makefile +@@ -3,6 +3,7 @@ obj-$(CONFIG_MTD_AR7_PARTS) += ar7part. + obj-$(CONFIG_MTD_BCM47XX_PARTS) += bcm47xxpart.o + obj-$(CONFIG_MTD_BCM63XX_PARTS) += bcm63xxpart.o + obj-$(CONFIG_MTD_CMDLINE_PARTS) += cmdlinepart.o ++obj-$(CONFIG_MTD_MYLOADER_PARTS) += myloader.o + obj-$(CONFIG_MTD_OF_PARTS) += ofpart.o + ofpart-y += ofpart_core.o + ofpart-$(CONFIG_MTD_OF_PARTS_BCM4908) += ofpart_bcm4908.o +--- /dev/null ++++ b/drivers/mtd/parsers/myloader.c +@@ -0,0 +1,181 @@ ++/* ++ * Parse MyLoader-style flash partition tables and produce a Linux partition ++ * array to match. ++ * ++ * Copyright (C) 2007-2009 Gabor Juhos <juhosg@openwrt.org> ++ * ++ * This file was based on drivers/mtd/redboot.c ++ * Author: Red Hat, Inc. - David Woodhouse <dwmw2@cambridge.redhat.com> ++ * ++ * This program is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 as published ++ * by the Free Software Foundation. ++ */ ++ ++#include <linux/kernel.h> ++#include <linux/module.h> ++#include <linux/version.h> ++#include <linux/slab.h> ++#include <linux/init.h> ++#include <linux/vmalloc.h> ++#include <linux/mtd/mtd.h> ++#include <linux/mtd/partitions.h> ++#include <linux/byteorder/generic.h> ++#include <linux/myloader.h> ++ ++#define BLOCK_LEN_MIN 0x10000 ++#define PART_NAME_LEN 32 ++ ++struct part_data { ++ struct mylo_partition_table tab; ++ char names[MYLO_MAX_PARTITIONS][PART_NAME_LEN]; ++}; ++ ++static int myloader_parse_partitions(struct mtd_info *master, ++ const struct mtd_partition **pparts, ++ struct mtd_part_parser_data *data) ++{ ++ struct part_data *buf; ++ struct mylo_partition_table *tab; ++ struct mylo_partition *part; ++ struct mtd_partition *mtd_parts; ++ struct mtd_partition *mtd_part; ++ int num_parts; ++ int ret, i; ++ size_t retlen; ++ char *names; ++ unsigned long offset; ++ unsigned long blocklen; ++ ++ buf = vmalloc(sizeof(*buf)); ++ if (!buf) { ++ return -ENOMEM; ++ goto out; ++ } ++ tab = &buf->tab; ++ ++ blocklen = master->erasesize; ++ if (blocklen < BLOCK_LEN_MIN) ++ blocklen = BLOCK_LEN_MIN; ++ ++ offset = blocklen; ++ ++ /* Find the partition table */ ++ for (i = 0; i < 4; i++, offset += blocklen) { ++ printk(KERN_DEBUG "%s: searching for MyLoader partition table" ++ " at offset 0x%lx\n", master->name, offset); ++ ++ ret = mtd_read(master, offset, sizeof(*buf), &retlen, ++ (void *)buf); ++ if (ret) ++ goto out_free_buf; ++ ++ if (retlen != sizeof(*buf)) { ++ ret = -EIO; ++ goto out_free_buf; ++ } ++ ++ /* Check for Partition Table magic number */ ++ if (tab->magic == le32_to_cpu(MYLO_MAGIC_PARTITIONS)) ++ break; ++ ++ } ++ ++ if (tab->magic != le32_to_cpu(MYLO_MAGIC_PARTITIONS)) { ++ printk(KERN_DEBUG "%s: no MyLoader partition table found\n", ++ master->name); ++ ret = 0; ++ goto out_free_buf; ++ } ++ ++ /* The MyLoader and the Partition Table is always present */ ++ num_parts = 2; ++ ++ /* Detect number of used partitions */ ++ for (i = 0; i < MYLO_MAX_PARTITIONS; i++) { ++ part = &tab->partitions[i]; ++ ++ if (le16_to_cpu(part->type) == PARTITION_TYPE_FREE) ++ continue; ++ ++ num_parts++; ++ } ++ ++ mtd_parts = kzalloc((num_parts * sizeof(*mtd_part) + ++ num_parts * PART_NAME_LEN), GFP_KERNEL); ++ ++ if (!mtd_parts) { ++ ret = -ENOMEM; ++ goto out_free_buf; ++ } ++ ++ mtd_part = mtd_parts; ++ names = (char *)&mtd_parts[num_parts]; ++ ++ strncpy(names, "myloader", PART_NAME_LEN); ++ mtd_part->name = names; ++ mtd_part->offset = 0; ++ mtd_part->size = offset; ++ mtd_part->mask_flags = MTD_WRITEABLE; ++ mtd_part++; ++ names += PART_NAME_LEN; ++ ++ strncpy(names, "partition_table", PART_NAME_LEN); ++ mtd_part->name = names; ++ mtd_part->offset = offset; ++ mtd_part->size = blocklen; ++ mtd_part->mask_flags = MTD_WRITEABLE; ++ mtd_part++; ++ names += PART_NAME_LEN; ++ ++ for (i = 0; i < MYLO_MAX_PARTITIONS; i++) { ++ part = &tab->partitions[i]; ++ ++ if (le16_to_cpu(part->type) == PARTITION_TYPE_FREE) ++ continue; ++ ++ if ((buf->names[i][0]) && (buf->names[i][0] != '\xff')) ++ strncpy(names, buf->names[i], PART_NAME_LEN); ++ else ++ snprintf(names, PART_NAME_LEN, "partition%d", i); ++ ++ mtd_part->offset = le32_to_cpu(part->addr); ++ mtd_part->size = le32_to_cpu(part->size); ++ mtd_part->name = names; ++ mtd_part++; ++ names += PART_NAME_LEN; ++ } ++ ++ *pparts = mtd_parts; ++ ret = num_parts; ++ ++ out_free_buf: ++ vfree(buf); ++ out: ++ return ret; ++} ++ ++static struct mtd_part_parser myloader_mtd_parser = { ++ .owner = THIS_MODULE, ++ .parse_fn = myloader_parse_partitions, ++ .name = "MyLoader", ++}; ++ ++static int __init myloader_mtd_parser_init(void) ++{ ++ register_mtd_parser(&myloader_mtd_parser); ++ ++ return 0; ++} ++ ++static void __exit myloader_mtd_parser_exit(void) ++{ ++ deregister_mtd_parser(&myloader_mtd_parser); ++} ++ ++module_init(myloader_mtd_parser_init); ++module_exit(myloader_mtd_parser_exit); ++ ++MODULE_AUTHOR("Gabor Juhos <juhosg@openwrt.org>"); ++MODULE_DESCRIPTION("Parsing code for MyLoader partition tables"); ++MODULE_LICENSE("GPL v2"); diff --git a/target/linux/generic/pending-5.15/431-mtd-bcm47xxpart-check-for-bad-blocks-when-calculatin.patch b/target/linux/generic/pending-5.15/431-mtd-bcm47xxpart-check-for-bad-blocks-when-calculatin.patch new file mode 100644 index 0000000000..bcea45d009 --- /dev/null +++ b/target/linux/generic/pending-5.15/431-mtd-bcm47xxpart-check-for-bad-blocks-when-calculatin.patch @@ -0,0 +1,68 @@ +From: =?UTF-8?q?Rafa=C5=82=20Mi=C5=82ecki?= <zajec5@gmail.com> +Subject: [PATCH] mtd: bcm47xxpart: check for bad blocks when calculating offsets + +Signed-off-by: Rafał Miłecki <zajec5@gmail.com> +--- + +--- a/drivers/mtd/parsers/parser_trx.c ++++ b/drivers/mtd/parsers/parser_trx.c +@@ -25,6 +25,33 @@ struct trx_header { + uint32_t offset[3]; + } __packed; + ++/* ++ * Calculate real end offset (address) for a given amount of data. It checks ++ * all blocks skipping bad ones. ++ */ ++static size_t parser_trx_real_offset(struct mtd_info *mtd, size_t bytes) ++{ ++ size_t real_offset = 0; ++ ++ if (mtd_block_isbad(mtd, real_offset)) ++ pr_warn("Base offset shouldn't be at bad block"); ++ ++ while (bytes >= mtd->erasesize) { ++ bytes -= mtd->erasesize; ++ real_offset += mtd->erasesize; ++ while (mtd_block_isbad(mtd, real_offset)) { ++ real_offset += mtd->erasesize; ++ ++ if (real_offset >= mtd->size) ++ return real_offset - mtd->erasesize; ++ } ++ } ++ ++ real_offset += bytes; ++ ++ return real_offset; ++} ++ + static const char *parser_trx_data_part_name(struct mtd_info *master, + size_t offset) + { +@@ -86,21 +113,21 @@ static int parser_trx_parse(struct mtd_i + if (trx.offset[2]) { + part = &parts[curr_part++]; + part->name = "loader"; +- part->offset = trx.offset[i]; ++ part->offset = parser_trx_real_offset(mtd, trx.offset[i]); + i++; + } + + if (trx.offset[i]) { + part = &parts[curr_part++]; + part->name = "linux"; +- part->offset = trx.offset[i]; ++ part->offset = parser_trx_real_offset(mtd, trx.offset[i]); + i++; + } + + if (trx.offset[i]) { + part = &parts[curr_part++]; +- part->name = parser_trx_data_part_name(mtd, trx.offset[i]); +- part->offset = trx.offset[i]; ++ part->offset = parser_trx_real_offset(mtd, trx.offset[i]); ++ part->name = parser_trx_data_part_name(mtd, part->offset); + i++; + } + diff --git a/target/linux/generic/pending-5.15/432-mtd-bcm47xxpart-detect-T_Meter-partition.patch b/target/linux/generic/pending-5.15/432-mtd-bcm47xxpart-detect-T_Meter-partition.patch new file mode 100644 index 0000000000..852654d924 --- /dev/null +++ b/target/linux/generic/pending-5.15/432-mtd-bcm47xxpart-detect-T_Meter-partition.patch @@ -0,0 +1,37 @@ +From: =?UTF-8?q?Rafa=C5=82=20Mi=C5=82ecki?= <zajec5@gmail.com> +Subject: mtd: bcm47xxpart: detect T_Meter partition + +It can be found on many Netgear devices. It consists of many 0x30 blocks +starting with 4D 54. + +Signed-off-by: Rafał Miłecki <zajec5@gmail.com> +--- + drivers/mtd/bcm47xxpart.c | 10 ++++++++++ + 1 file changed, 10 insertions(+) + +--- a/drivers/mtd/parsers/bcm47xxpart.c ++++ b/drivers/mtd/parsers/bcm47xxpart.c +@@ -35,6 +35,7 @@ + #define NVRAM_HEADER 0x48534C46 /* FLSH */ + #define POT_MAGIC1 0x54544f50 /* POTT */ + #define POT_MAGIC2 0x504f /* OP */ ++#define T_METER_MAGIC 0x4D540000 /* MT */ + #define ML_MAGIC1 0x39685a42 + #define ML_MAGIC2 0x26594131 + #define TRX_MAGIC 0x30524448 +@@ -178,6 +179,15 @@ static int bcm47xxpart_parse(struct mtd_ + MTD_WRITEABLE); + continue; + } ++ ++ /* T_Meter */ ++ if ((le32_to_cpu(buf[0x000 / 4]) & 0xFFFF0000) == T_METER_MAGIC && ++ (le32_to_cpu(buf[0x030 / 4]) & 0xFFFF0000) == T_METER_MAGIC && ++ (le32_to_cpu(buf[0x060 / 4]) & 0xFFFF0000) == T_METER_MAGIC) { ++ bcm47xxpart_add_part(&parts[curr_part++], "T_Meter", offset, ++ MTD_WRITEABLE); ++ continue; ++ } + + /* TRX */ + if (buf[0x000 / 4] == TRX_MAGIC) { diff --git a/target/linux/generic/pending-5.15/435-mtd-add-routerbootpart-parser-config.patch b/target/linux/generic/pending-5.15/435-mtd-add-routerbootpart-parser-config.patch new file mode 100644 index 0000000000..ab1e09a5f1 --- /dev/null +++ b/target/linux/generic/pending-5.15/435-mtd-add-routerbootpart-parser-config.patch @@ -0,0 +1,38 @@ +From 4437e01fb6bca63fccdba5d6c44888b0935885c2 Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?Thibaut=20VAR=C3=88NE?= <hacks@slashdirt.org> +Date: Tue, 24 Mar 2020 11:45:07 +0100 +Subject: [PATCH] generic: routerboot partition build bits (5.4) +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +This patch adds routerbootpart kernel build bits + +Signed-off-by: Thibaut VARÈNE <hacks@slashdirt.org> +--- + drivers/mtd/parsers/Kconfig | 9 +++++++++ + drivers/mtd/parsers/Makefile | 1 + + 2 files changed, 10 insertions(+) + +--- a/drivers/mtd/parsers/Kconfig ++++ b/drivers/mtd/parsers/Kconfig +@@ -195,3 +195,12 @@ config MTD_REDBOOT_PARTS_READONLY + 'FIS directory' images, enable this option. + + endif # MTD_REDBOOT_PARTS ++ ++config MTD_ROUTERBOOT_PARTS ++ tristate "RouterBoot flash partition parser" ++ depends on MTD && OF ++ help ++ MikroTik RouterBoot is implemented as a multi segment system on the ++ flash, some of which are fixed and some of which are located at ++ variable offsets. This parser handles both cases via properly ++ formatted DTS. +--- a/drivers/mtd/parsers/Makefile ++++ b/drivers/mtd/parsers/Makefile +@@ -13,3 +13,4 @@ obj-$(CONFIG_MTD_AFS_PARTS) += afs.o + obj-$(CONFIG_MTD_PARSER_TRX) += parser_trx.o + obj-$(CONFIG_MTD_SHARPSL_PARTS) += sharpslpart.o + obj-$(CONFIG_MTD_REDBOOT_PARTS) += redboot.o ++obj-$(CONFIG_MTD_ROUTERBOOT_PARTS) += routerbootpart.o diff --git a/target/linux/generic/pending-5.15/460-mtd-cfi_cmdset_0002-no-erase_suspend.patch b/target/linux/generic/pending-5.15/460-mtd-cfi_cmdset_0002-no-erase_suspend.patch new file mode 100644 index 0000000000..9983971af5 --- /dev/null +++ b/target/linux/generic/pending-5.15/460-mtd-cfi_cmdset_0002-no-erase_suspend.patch @@ -0,0 +1,25 @@ +From: Felix Fietkau <nbd@nbd.name> +Subject: kernel: disable cfi cmdset 0002 erase suspend + +on some platforms, erase suspend leads to data corruption and lockups when write +ops collide with erase ops. this has been observed on the buffalo wzr-hp-g300nh. +rather than play whack-a-mole with a hard to reproduce issue on a variety of devices, +simply disable erase suspend, as it will usually not produce any useful gain on +the small filesystems used on embedded hardware. + +Signed-off-by: Felix Fietkau <nbd@nbd.name> +--- + drivers/mtd/chips/cfi_cmdset_0002.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +--- a/drivers/mtd/chips/cfi_cmdset_0002.c ++++ b/drivers/mtd/chips/cfi_cmdset_0002.c +@@ -913,7 +913,7 @@ static int get_chip(struct map_info *map + return 0; + + case FL_ERASING: +- if (!cfip || !(cfip->EraseSuspend & (0x1|0x2)) || ++ if (1 /* no suspend */ || !cfip || !(cfip->EraseSuspend & (0x1|0x2)) || + !(mode == FL_READY || mode == FL_POINT || + (mode == FL_WRITING && (cfip->EraseSuspend & 0x2)))) + goto sleep; diff --git a/target/linux/generic/pending-5.15/461-mtd-cfi_cmdset_0002-add-buffer-write-cmd-timeout.patch b/target/linux/generic/pending-5.15/461-mtd-cfi_cmdset_0002-add-buffer-write-cmd-timeout.patch new file mode 100644 index 0000000000..0bda2ed971 --- /dev/null +++ b/target/linux/generic/pending-5.15/461-mtd-cfi_cmdset_0002-add-buffer-write-cmd-timeout.patch @@ -0,0 +1,17 @@ +From: George Kashperko <george@znau.edu.ua> +Subject: Issue map read after Write Buffer Load command to ensure chip is ready to receive data. + +Signed-off-by: George Kashperko <george@znau.edu.ua> +--- + drivers/mtd/chips/cfi_cmdset_0002.c | 1 + + 1 file changed, 1 insertion(+) +--- a/drivers/mtd/chips/cfi_cmdset_0002.c ++++ b/drivers/mtd/chips/cfi_cmdset_0002.c +@@ -2057,6 +2057,7 @@ static int __xipram do_write_buffer(stru + + /* Write Buffer Load */ + map_write(map, CMD(0x25), cmd_adr); ++ (void) map_read(map, cmd_adr); + + chip->state = FL_WRITING_TO_BUFFER; + diff --git a/target/linux/generic/pending-5.15/465-m25p80-mx-disable-software-protection.patch b/target/linux/generic/pending-5.15/465-m25p80-mx-disable-software-protection.patch new file mode 100644 index 0000000000..f58d5452ab --- /dev/null +++ b/target/linux/generic/pending-5.15/465-m25p80-mx-disable-software-protection.patch @@ -0,0 +1,18 @@ +From: Felix Fietkau <nbd@nbd.name> +Subject: Disable software protection bits for Macronix flashes. + +Signed-off-by: Felix Fietkau <nbd@nbd.name> +--- + drivers/mtd/spi-nor/spi-nor.c | 1 + + 1 file changed, 1 insertion(+) + +--- a/drivers/mtd/spi-nor/macronix.c ++++ b/drivers/mtd/spi-nor/macronix.c +@@ -93,6 +93,7 @@ static void macronix_default_init(struct + { + nor->params->quad_enable = spi_nor_sr1_bit6_quad_enable; + nor->params->set_4byte_addr_mode = spi_nor_set_4byte_addr_mode; ++ nor->flags |= SNOR_F_HAS_LOCK; + } + + static const struct spi_nor_fixups macronix_fixups = { diff --git a/target/linux/generic/pending-5.15/470-mtd-spi-nor-support-limiting-4K-sectors-support-base.patch b/target/linux/generic/pending-5.15/470-mtd-spi-nor-support-limiting-4K-sectors-support-base.patch new file mode 100644 index 0000000000..2748192dd2 --- /dev/null +++ b/target/linux/generic/pending-5.15/470-mtd-spi-nor-support-limiting-4K-sectors-support-base.patch @@ -0,0 +1,71 @@ +From: Felix Fietkau <nbd@nbd.name> +Date: Sat, 4 Nov 2017 07:40:23 +0100 +Subject: [PATCH] mtd: spi-nor: support limiting 4K sectors support based on + flash size + +Some devices need 4K sectors to be able to deal with small flash chips. +For instance, w25x05 is 64 KiB in size, and without 4K sectors, the +entire chip is just one erase block. +On bigger flash chip sizes, using 4K sectors can significantly slow down +many operations, including using a writable filesystem. There are several +platforms where it makes sense to use a single kernel on both kinds of +devices. + +To support this properly, allow configuring an upper flash chip size +limit for 4K sectors support. + +Signed-off-by: Felix Fietkau <nbd@nbd.name> +--- + +--- a/drivers/mtd/spi-nor/Kconfig ++++ b/drivers/mtd/spi-nor/Kconfig +@@ -34,6 +34,17 @@ config MTD_SPI_NOR_USE_4K_SECTORS + Please note that some tools/drivers/filesystems may not work with + 4096 B erase size (e.g. UBIFS requires 15 KiB as a minimum). + ++config MTD_SPI_NOR_USE_4K_SECTORS_LIMIT ++ int "Maximum flash chip size to use 4K sectors on (in KiB)" ++ depends on MTD_SPI_NOR_USE_4K_SECTORS ++ default "4096" ++ help ++ There are many flash chips that support 4K sectors, but are so large ++ that using them significantly slows down writing large amounts of ++ data or using a writable filesystem. ++ Any flash chip larger than the size specified in this option will ++ not use 4K sectors. ++ + source "drivers/mtd/spi-nor/controllers/Kconfig" + + endif # MTD_SPI_NOR +--- a/drivers/mtd/spi-nor/core.c ++++ b/drivers/mtd/spi-nor/core.c +@@ -2792,6 +2792,21 @@ static void spi_nor_info_init_params(str + */ + erase_mask = 0; + i = 0; ++#ifdef CONFIG_MTD_SPI_NOR_USE_4K_SECTORS ++ if ((info->flags & SECT_4K_PMC) && (params->size <= ++ CONFIG_MTD_SPI_NOR_USE_4K_SECTORS_LIMIT * 1024)) { ++ erase_mask |= BIT(i); ++ spi_nor_set_erase_type(&map->erase_type[i], 4096u, ++ SPINOR_OP_BE_4K_PMC); ++ i++; ++ } else if ((info->flags & SECT_4K) && (params->size <= ++ CONFIG_MTD_SPI_NOR_USE_4K_SECTORS_LIMIT * 1024)) { ++ erase_mask |= BIT(i); ++ spi_nor_set_erase_type(&map->erase_type[i], 4096u, ++ SPINOR_OP_BE_4K); ++ i++; ++ } ++#else + if (info->flags & SECT_4K_PMC) { + erase_mask |= BIT(i); + spi_nor_set_erase_type(&map->erase_type[i], 4096u, +@@ -2803,6 +2818,7 @@ static void spi_nor_info_init_params(str + SPINOR_OP_BE_4K); + i++; + } ++#endif + erase_mask |= BIT(i); + spi_nor_set_erase_type(&map->erase_type[i], info->sector_size, + SPINOR_OP_SE); diff --git a/target/linux/generic/pending-5.15/476-mtd-spi-nor-add-eon-en25q128.patch b/target/linux/generic/pending-5.15/476-mtd-spi-nor-add-eon-en25q128.patch new file mode 100644 index 0000000000..325fca62f3 --- /dev/null +++ b/target/linux/generic/pending-5.15/476-mtd-spi-nor-add-eon-en25q128.patch @@ -0,0 +1,18 @@ +From: Piotr Dymacz <pepe2k@gmail.com> +Subject: kernel/mtd: add support for EON EN25Q128 + +Signed-off-by: Piotr Dymacz <pepe2k@gmail.com> +--- + drivers/mtd/spi-nor/spi-nor.c | 1 + + 1 file changed, 1 insertion(+) + +--- a/drivers/mtd/spi-nor/eon.c ++++ b/drivers/mtd/spi-nor/eon.c +@@ -15,6 +15,7 @@ static const struct flash_info eon_parts + { "en25q32b", INFO(0x1c3016, 0, 64 * 1024, 64, 0) }, + { "en25p64", INFO(0x1c2017, 0, 64 * 1024, 128, 0) }, + { "en25q64", INFO(0x1c3017, 0, 64 * 1024, 128, SECT_4K) }, ++ { "en25q128", INFO(0x1c3018, 0, 64 * 1024, 256, SECT_4K) }, + { "en25q80a", INFO(0x1c3014, 0, 64 * 1024, 16, + SECT_4K | SPI_NOR_DUAL_READ) }, + { "en25qh16", INFO(0x1c7015, 0, 64 * 1024, 32, diff --git a/target/linux/generic/pending-5.15/479-mtd-spi-nor-add-xtx-xt25f128b.patch b/target/linux/generic/pending-5.15/479-mtd-spi-nor-add-xtx-xt25f128b.patch new file mode 100644 index 0000000000..796bf6becb --- /dev/null +++ b/target/linux/generic/pending-5.15/479-mtd-spi-nor-add-xtx-xt25f128b.patch @@ -0,0 +1,79 @@ +From patchwork Thu Feb 6 17:19:41 2020 +Content-Type: text/plain; charset="utf-8" +MIME-Version: 1.0 +Content-Transfer-Encoding: 7bit +X-Patchwork-Submitter: Daniel Golle <daniel@makrotopia.org> +X-Patchwork-Id: 1234465 +Date: Thu, 6 Feb 2020 19:19:41 +0200 +From: Daniel Golle <daniel@makrotopia.org> +To: linux-mtd@lists.infradead.org +Subject: [PATCH v2] mtd: spi-nor: Add support for xt25f128b chip +Message-ID: <20200206171941.GA2398@makrotopia.org> +MIME-Version: 1.0 +Content-Disposition: inline +List-Subscribe: <http://lists.infradead.org/mailman/listinfo/linux-mtd>, + <mailto:linux-mtd-request@lists.infradead.org?subject=subscribe> +Cc: Eitan Cohen <eitan@neot-semadar.com>, Piotr Dymacz <pepe2k@gmail.com>, + Tudor Ambarus <tudor.ambarus@microchip.com> +Sender: "linux-mtd" <linux-mtd-bounces@lists.infradead.org> +Errors-To: linux-mtd-bounces+incoming=patchwork.ozlabs.org@lists.infradead.org + +Add XT25F128B made by XTX Technology (Shenzhen) Limited. +This chip supports dual and quad read and uniform 4K-byte erase. +Verified on Teltonika RUT955 which comes with XT25F128B in recent +versions of the device. + +Signed-off-by: Daniel Golle <daniel@makrotopia.org> +Signed-off-by: Felix Fietkau <nbd@nbd.name> +--- + drivers/mtd/spi-nor/spi-nor.c | 4 ++++ + 1 file changed, 4 insertions(+) + +--- a/drivers/mtd/spi-nor/Makefile ++++ b/drivers/mtd/spi-nor/Makefile +@@ -17,6 +17,7 @@ spi-nor-objs += sst.o + spi-nor-objs += winbond.o + spi-nor-objs += xilinx.o + spi-nor-objs += xmc.o ++spi-nor-objs += xtx.o + obj-$(CONFIG_MTD_SPI_NOR) += spi-nor.o + + obj-$(CONFIG_MTD_SPI_NOR) += controllers/ +--- /dev/null ++++ b/drivers/mtd/spi-nor/xtx.c +@@ -0,0 +1,15 @@ ++// SPDX-License-Identifier: GPL-2.0 ++#include <linux/mtd/spi-nor.h> ++ ++#include "core.h" ++ ++static const struct flash_info xtx_parts[] = { ++ /* XTX Technology (Shenzhen) Limited */ ++ { "xt25f128b", INFO(0x0B4018, 0, 64 * 1024, 256, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) }, ++}; ++ ++const struct spi_nor_manufacturer spi_nor_xtx = { ++ .name = "xtx", ++ .parts = xtx_parts, ++ .nparts = ARRAY_SIZE(xtx_parts), ++}; +--- a/drivers/mtd/spi-nor/core.c ++++ b/drivers/mtd/spi-nor/core.c +@@ -2028,6 +2028,7 @@ static const struct spi_nor_manufacturer + &spi_nor_winbond, + &spi_nor_xilinx, + &spi_nor_xmc, ++ &spi_nor_xtx, + }; + + static const struct flash_info * +--- a/drivers/mtd/spi-nor/core.h ++++ b/drivers/mtd/spi-nor/core.h +@@ -398,6 +398,7 @@ extern const struct spi_nor_manufacturer + extern const struct spi_nor_manufacturer spi_nor_winbond; + extern const struct spi_nor_manufacturer spi_nor_xilinx; + extern const struct spi_nor_manufacturer spi_nor_xmc; ++extern const struct spi_nor_manufacturer spi_nor_xtx; + + int spi_nor_write_enable(struct spi_nor *nor); + int spi_nor_write_disable(struct spi_nor *nor); diff --git a/target/linux/generic/pending-5.15/482-mtd-spi-nor-add-support-for-Gigadevice-GD25D05.patch b/target/linux/generic/pending-5.15/482-mtd-spi-nor-add-support-for-Gigadevice-GD25D05.patch new file mode 100644 index 0000000000..c32cde559d --- /dev/null +++ b/target/linux/generic/pending-5.15/482-mtd-spi-nor-add-support-for-Gigadevice-GD25D05.patch @@ -0,0 +1,22 @@ +From d68b4aa22e8c625685bfad642dd7337948dc0ad1 Mon Sep 17 00:00:00 2001 +From: Koen Vandeputte <koen.vandeputte@ncentric.com> +Date: Mon, 6 Jan 2020 13:07:56 +0100 +Subject: [PATCH] mtd: spi-nor: add support for Gigadevice GD25D05 + +Signed-off-by: Koen Vandeputte <koen.vandeputte@ncentric.com> +--- + drivers/mtd/spi-nor/spi-nor.c | 5 +++++ + 1 file changed, 5 insertions(+) + +--- a/drivers/mtd/spi-nor/gigadevice.c ++++ b/drivers/mtd/spi-nor/gigadevice.c +@@ -24,6 +24,9 @@ static struct spi_nor_fixups gd25q256_fi + }; + + static const struct flash_info gigadevice_parts[] = { ++ { "gd25q05", INFO(0xc84010, 0, 64 * 1024, 1, ++ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | ++ SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB) }, + { "gd25q16", INFO(0xc84015, 0, 64 * 1024, 32, + SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | + SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB) }, diff --git a/target/linux/generic/pending-5.15/483-mtd-spi-nor-add-gd25q512.patch b/target/linux/generic/pending-5.15/483-mtd-spi-nor-add-gd25q512.patch new file mode 100644 index 0000000000..6f41546964 --- /dev/null +++ b/target/linux/generic/pending-5.15/483-mtd-spi-nor-add-gd25q512.patch @@ -0,0 +1,12 @@ +--- a/drivers/mtd/spi-nor/gigadevice.c ++++ b/drivers/mtd/spi-nor/gigadevice.c +@@ -53,6 +53,9 @@ static const struct flash_info gigadevic + SPI_NOR_4B_OPCODES | SPI_NOR_HAS_LOCK | + SPI_NOR_HAS_TB | SPI_NOR_TB_SR_BIT6) + .fixups = &gd25q256_fixups }, ++ { "gd25q512", INFO(0xc84020, 0, 64 * 1024, 1024, ++ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | ++ SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB | SPI_NOR_4B_OPCODES) }, + }; + + const struct spi_nor_manufacturer spi_nor_gigadevice = { diff --git a/target/linux/generic/pending-5.15/483-mtd-spinand-add-support-for-xtx-xt26g0xa.patch b/target/linux/generic/pending-5.15/483-mtd-spinand-add-support-for-xtx-xt26g0xa.patch new file mode 100644 index 0000000000..11590770cd --- /dev/null +++ b/target/linux/generic/pending-5.15/483-mtd-spinand-add-support-for-xtx-xt26g0xa.patch @@ -0,0 +1,178 @@ +From a07e31adf2753cad2fd9790db5bfc047c81e8152 Mon Sep 17 00:00:00 2001 +From: Felix Matouschek <felix@matouschek.org> +Date: Fri, 2 Jul 2021 20:31:23 +0200 +Subject: [PATCH] mtd: spinand: Add support for XTX XT26G0xA + +Add support for XTX Technology XT26G01AXXXXX, XTX26G02AXXXXX and +XTX26G04AXXXXX SPI NAND. + +These are 3V, 1G/2G/4Gbit serial SLC NAND flash devices with on-die ECC +(8bit strength per 512bytes). + +Tested on Teltonika RUTX10 flashed with OpenWrt. + +Datasheets available at +http://www.xtxtech.com/download/?AId=225 +https://datasheet.lcsc.com/szlcsc/2005251034_XTX-XT26G01AWSEGA_C558841.pdf + +Signed-off-by: Felix Matouschek <felix@matouschek.org> +--- + drivers/mtd/nand/spi/Makefile | 2 +- + drivers/mtd/nand/spi/core.c | 1 + + drivers/mtd/nand/spi/xtx.c | 122 ++++++++++++++++++++++++++++++++++ + include/linux/mtd/spinand.h | 1 + + 4 files changed, 125 insertions(+), 1 deletion(-) + create mode 100644 drivers/mtd/nand/spi/xtx.c + +--- a/drivers/mtd/nand/spi/Makefile ++++ b/drivers/mtd/nand/spi/Makefile +@@ -1,3 +1,3 @@ + # SPDX-License-Identifier: GPL-2.0 +-spinand-objs := core.o gigadevice.o macronix.o micron.o paragon.o toshiba.o winbond.o ++spinand-objs := core.o gigadevice.o macronix.o micron.o paragon.o toshiba.o winbond.o xtx.o + obj-$(CONFIG_MTD_SPI_NAND) += spinand.o +--- a/drivers/mtd/nand/spi/core.c ++++ b/drivers/mtd/nand/spi/core.c +@@ -760,6 +760,7 @@ static const struct spinand_manufacturer + ¶gon_spinand_manufacturer, + &toshiba_spinand_manufacturer, + &winbond_spinand_manufacturer, ++ &xtx_spinand_manufacturer, + }; + + static int spinand_manufacturer_match(struct spinand_device *spinand, +--- /dev/null ++++ b/drivers/mtd/nand/spi/xtx.c +@@ -0,0 +1,122 @@ ++// SPDX-License-Identifier: GPL-2.0 ++/* ++ * Author: ++ * Felix Matouschek <felix@matouschek.org> ++ */ ++ ++#include <linux/device.h> ++#include <linux/kernel.h> ++#include <linux/mtd/spinand.h> ++ ++#define SPINAND_MFR_XTX 0x0B ++ ++#define XT26G0XA_STATUS_ECC_MASK GENMASK(5, 2) ++#define XT26G0XA_STATUS_ECC_NO_DETECTED (0 << 2) ++#define XT26G0XA_STATUS_ECC_8_CORRECTED (3 << 4) ++#define XT26G0XA_STATUS_ECC_UNCOR_ERROR (2 << 4) ++ ++static SPINAND_OP_VARIANTS(read_cache_variants, ++ SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 1, NULL, 0), ++ SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0), ++ SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(0, 1, NULL, 0), ++ SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0), ++ SPINAND_PAGE_READ_FROM_CACHE_OP(true, 0, 1, NULL, 0), ++ SPINAND_PAGE_READ_FROM_CACHE_OP(false, 0, 1, NULL, 0)); ++ ++static SPINAND_OP_VARIANTS(write_cache_variants, ++ SPINAND_PROG_LOAD_X4(true, 0, NULL, 0), ++ SPINAND_PROG_LOAD(true, 0, NULL, 0)); ++ ++static SPINAND_OP_VARIANTS(update_cache_variants, ++ SPINAND_PROG_LOAD_X4(false, 0, NULL, 0), ++ SPINAND_PROG_LOAD(false, 0, NULL, 0)); ++ ++static int xt26g0xa_ooblayout_ecc(struct mtd_info *mtd, int section, ++ struct mtd_oob_region *region) ++{ ++ if (section) ++ return -ERANGE; ++ ++ region->offset = 8; ++ region->length = 40; ++ ++ return 0; ++} ++ ++static int xt26g0xa_ooblayout_free(struct mtd_info *mtd, int section, ++ struct mtd_oob_region *region) ++{ ++ if (section) ++ return -ERANGE; ++ ++ region->offset = 1; ++ region->length = 7; ++ ++ return 0; ++} ++ ++static const struct mtd_ooblayout_ops xt26g0xa_ooblayout = { ++ .ecc = xt26g0xa_ooblayout_ecc, ++ .free = xt26g0xa_ooblayout_free, ++}; ++ ++static int xt26g0xa_ecc_get_status(struct spinand_device *spinand, ++ u8 status) ++{ ++ switch (status & XT26G0XA_STATUS_ECC_MASK) { ++ case XT26G0XA_STATUS_ECC_NO_DETECTED: ++ return 0; ++ case XT26G0XA_STATUS_ECC_8_CORRECTED: ++ return 8; ++ case XT26G0XA_STATUS_ECC_UNCOR_ERROR: ++ return -EBADMSG; ++ default: /* (1 << 2) through (7 << 2) are 1-7 corrected errors */ ++ return (status & XT26G0XA_STATUS_ECC_MASK) >> 2; ++ } ++ ++ return -EINVAL; ++} ++ ++static const struct spinand_info xtx_spinand_table[] = { ++ SPINAND_INFO("XT26G01A", ++ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xE1), ++ NAND_MEMORG(1, 2048, 64, 64, 1024, 20, 1, 1, 1), ++ NAND_ECCREQ(8, 512), ++ SPINAND_INFO_OP_VARIANTS(&read_cache_variants, ++ &write_cache_variants, ++ &update_cache_variants), ++ SPINAND_HAS_QE_BIT, ++ SPINAND_ECCINFO(&xt26g0xa_ooblayout, ++ xt26g0xa_ecc_get_status)), ++ SPINAND_INFO("XT26G02A", ++ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xE2), ++ NAND_MEMORG(1, 2048, 64, 64, 2048, 40, 1, 1, 1), ++ NAND_ECCREQ(8, 512), ++ SPINAND_INFO_OP_VARIANTS(&read_cache_variants, ++ &write_cache_variants, ++ &update_cache_variants), ++ SPINAND_HAS_QE_BIT, ++ SPINAND_ECCINFO(&xt26g0xa_ooblayout, ++ xt26g0xa_ecc_get_status)), ++ SPINAND_INFO("XT26G04A", ++ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xE3), ++ NAND_MEMORG(1, 2048, 64, 128, 2048, 40, 1, 1, 1), ++ NAND_ECCREQ(8, 512), ++ SPINAND_INFO_OP_VARIANTS(&read_cache_variants, ++ &write_cache_variants, ++ &update_cache_variants), ++ SPINAND_HAS_QE_BIT, ++ SPINAND_ECCINFO(&xt26g0xa_ooblayout, ++ xt26g0xa_ecc_get_status)), ++}; ++ ++static const struct spinand_manufacturer_ops xtx_spinand_manuf_ops = { ++}; ++ ++const struct spinand_manufacturer xtx_spinand_manufacturer = { ++ .id = SPINAND_MFR_XTX, ++ .name = "XTX", ++ .chips = xtx_spinand_table, ++ .nchips = ARRAY_SIZE(xtx_spinand_table), ++ .ops = &xtx_spinand_manuf_ops, ++}; +--- a/include/linux/mtd/spinand.h ++++ b/include/linux/mtd/spinand.h +@@ -244,6 +244,7 @@ extern const struct spinand_manufacturer + extern const struct spinand_manufacturer paragon_spinand_manufacturer; + extern const struct spinand_manufacturer toshiba_spinand_manufacturer; + extern const struct spinand_manufacturer winbond_spinand_manufacturer; ++extern const struct spinand_manufacturer xtx_spinand_manufacturer; + + /** + * struct spinand_op_variants - SPI NAND operation variants diff --git a/target/linux/generic/pending-5.15/484-mtd-spi-nor-add-esmt-f25l16pa.patch b/target/linux/generic/pending-5.15/484-mtd-spi-nor-add-esmt-f25l16pa.patch new file mode 100644 index 0000000000..ce3857d511 --- /dev/null +++ b/target/linux/generic/pending-5.15/484-mtd-spi-nor-add-esmt-f25l16pa.patch @@ -0,0 +1,11 @@ +--- a/drivers/mtd/spi-nor/esmt.c ++++ b/drivers/mtd/spi-nor/esmt.c +@@ -10,6 +10,8 @@ + + static const struct flash_info esmt_parts[] = { + /* ESMT */ ++ { "f25l16pa-2s", INFO(0x8c2115, 0, 64 * 1024, 32, ++ SECT_4K | SPI_NOR_HAS_LOCK) }, + { "f25l32pa", INFO(0x8c2016, 0, 64 * 1024, 64, + SECT_4K | SPI_NOR_HAS_LOCK) }, + { "f25l32qa", INFO(0x8c4116, 0, 64 * 1024, 64, diff --git a/target/linux/generic/pending-5.15/485-mtd-spi-nor-add-xmc-xm25qh128c.patch b/target/linux/generic/pending-5.15/485-mtd-spi-nor-add-xmc-xm25qh128c.patch new file mode 100644 index 0000000000..4b3f674170 --- /dev/null +++ b/target/linux/generic/pending-5.15/485-mtd-spi-nor-add-xmc-xm25qh128c.patch @@ -0,0 +1,11 @@ +--- a/drivers/mtd/spi-nor/xmc.c ++++ b/drivers/mtd/spi-nor/xmc.c +@@ -14,6 +14,8 @@ static const struct flash_info xmc_parts + SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) }, + { "XM25QH128A", INFO(0x207018, 0, 64 * 1024, 256, + SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) }, ++ { "XM25QH128C", INFO(0x204018, 0, 64 * 1024, 256, ++ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) }, + }; + + const struct spi_nor_manufacturer spi_nor_xmc = { diff --git a/target/linux/generic/pending-5.15/490-ubi-auto-attach-mtd-device-named-ubi-or-data-on-boot.patch b/target/linux/generic/pending-5.15/490-ubi-auto-attach-mtd-device-named-ubi-or-data-on-boot.patch new file mode 100644 index 0000000000..7c9766fa7b --- /dev/null +++ b/target/linux/generic/pending-5.15/490-ubi-auto-attach-mtd-device-named-ubi-or-data-on-boot.patch @@ -0,0 +1,97 @@ +From: Daniel Golle <daniel@makrotopia.org> +Subject: ubi: auto-attach mtd device named "ubi" or "data" on boot + +Signed-off-by: Daniel Golle <daniel@makrotopia.org> +--- + drivers/mtd/ubi/build.c | 36 ++++++++++++++++++++++++++++++++++++ + 1 file changed, 36 insertions(+) + +--- a/drivers/mtd/ubi/build.c ++++ b/drivers/mtd/ubi/build.c +@@ -1192,6 +1192,73 @@ static struct mtd_info * __init open_mtd + return mtd; + } + ++/* ++ * This function tries attaching mtd partitions named either "ubi" or "data" ++ * during boot. ++ */ ++static void __init ubi_auto_attach(void) ++{ ++ int err; ++ struct mtd_info *mtd; ++ loff_t offset = 0; ++ size_t len; ++ char magic[4]; ++ ++ /* try attaching mtd device named "ubi" or "data" */ ++ mtd = open_mtd_device("ubi"); ++ if (IS_ERR(mtd)) ++ mtd = open_mtd_device("data"); ++ ++ if (IS_ERR(mtd)) ++ return; ++ ++ /* get the first not bad block */ ++ if (mtd_can_have_bb(mtd)) ++ while (mtd_block_isbad(mtd, offset)) { ++ offset += mtd->erasesize; ++ ++ if (offset > mtd->size) { ++ pr_err("UBI error: Failed to find a non-bad " ++ "block on mtd%d\n", mtd->index); ++ goto cleanup; ++ } ++ } ++ ++ /* check if the read from flash was successful */ ++ err = mtd_read(mtd, offset, 4, &len, (void *) magic); ++ if ((err && !mtd_is_bitflip(err)) || len != 4) { ++ pr_err("UBI error: unable to read from mtd%d\n", mtd->index); ++ goto cleanup; ++ } ++ ++ /* check for a valid ubi magic */ ++ if (strncmp(magic, "UBI#", 4)) { ++ pr_err("UBI error: no valid UBI magic found inside mtd%d\n", mtd->index); ++ goto cleanup; ++ } ++ ++ /* don't auto-add media types where UBI doesn't makes sense */ ++ if (mtd->type != MTD_NANDFLASH && ++ mtd->type != MTD_NORFLASH && ++ mtd->type != MTD_DATAFLASH && ++ mtd->type != MTD_MLCNANDFLASH) ++ goto cleanup; ++ ++ mutex_lock(&ubi_devices_mutex); ++ pr_notice("UBI: auto-attach mtd%d\n", mtd->index); ++ err = ubi_attach_mtd_dev(mtd, UBI_DEV_NUM_AUTO, 0, 0); ++ mutex_unlock(&ubi_devices_mutex); ++ if (err < 0) { ++ pr_err("UBI error: cannot attach mtd%d\n", mtd->index); ++ goto cleanup; ++ } ++ ++ return; ++ ++cleanup: ++ put_mtd_device(mtd); ++} ++ + static int __init ubi_init(void) + { + int err, i, k; +@@ -1275,6 +1342,12 @@ static int __init ubi_init(void) + } + } + ++ /* auto-attach mtd devices only if built-in to the kernel and no ubi.mtd ++ * parameter was given */ ++ if (IS_ENABLED(CONFIG_MTD_ROOTFS_ROOT_DEV) && ++ !ubi_is_module() && !mtd_devs) ++ ubi_auto_attach(); ++ + err = ubiblock_init(); + if (err) { + pr_err("UBI error: block: cannot initialize, error %d\n", err); diff --git a/target/linux/generic/pending-5.15/491-ubi-auto-create-ubiblock-device-for-rootfs.patch b/target/linux/generic/pending-5.15/491-ubi-auto-create-ubiblock-device-for-rootfs.patch new file mode 100644 index 0000000000..a2b48fd4fc --- /dev/null +++ b/target/linux/generic/pending-5.15/491-ubi-auto-create-ubiblock-device-for-rootfs.patch @@ -0,0 +1,69 @@ +From: Daniel Golle <daniel@makrotopia.org> +Subject: ubi: auto-create ubiblock device for rootfs + +Signed-off-by: Daniel Golle <daniel@makrotopia.org> +--- + drivers/mtd/ubi/block.c | 42 ++++++++++++++++++++++++++++++++++++++++++ + 1 file changed, 42 insertions(+) + +--- a/drivers/mtd/ubi/block.c ++++ b/drivers/mtd/ubi/block.c +@@ -652,6 +652,47 @@ static void __init ubiblock_create_from_ + } + } + ++#define UBIFS_NODE_MAGIC 0x06101831 ++static inline int ubi_vol_is_ubifs(struct ubi_volume_desc *desc) ++{ ++ int ret; ++ uint32_t magic_of, magic; ++ ret = ubi_read(desc, 0, (char *)&magic_of, 0, 4); ++ if (ret) ++ return 0; ++ magic = le32_to_cpu(magic_of); ++ return magic == UBIFS_NODE_MAGIC; ++} ++ ++static void __init ubiblock_create_auto_rootfs(void) ++{ ++ int ubi_num, ret, is_ubifs; ++ struct ubi_volume_desc *desc; ++ struct ubi_volume_info vi; ++ ++ for (ubi_num = 0; ubi_num < UBI_MAX_DEVICES; ubi_num++) { ++ desc = ubi_open_volume_nm(ubi_num, "rootfs", UBI_READONLY); ++ if (IS_ERR(desc)) ++ desc = ubi_open_volume_nm(ubi_num, "fit", UBI_READONLY);; ++ ++ if (IS_ERR(desc)) ++ continue; ++ ++ ubi_get_volume_info(desc, &vi); ++ is_ubifs = ubi_vol_is_ubifs(desc); ++ ubi_close_volume(desc); ++ if (is_ubifs) ++ break; ++ ++ ret = ubiblock_create(&vi); ++ if (ret) ++ pr_err("UBI error: block: can't add '%s' volume, err=%d\n", ++ vi.name, ret); ++ /* always break if we get here */ ++ break; ++ } ++} ++ + static void ubiblock_remove_all(void) + { + struct ubiblock *next; +@@ -684,6 +725,10 @@ int __init ubiblock_init(void) + */ + ubiblock_create_from_param(); + ++ /* auto-attach "rootfs" volume if existing and non-ubifs */ ++ if (IS_ENABLED(CONFIG_MTD_ROOTFS_ROOT_DEV)) ++ ubiblock_create_auto_rootfs(); ++ + /* + * Block devices are only created upon user requests, so we ignore + * existing volumes. diff --git a/target/linux/generic/pending-5.15/492-try-auto-mounting-ubi0-rootfs-in-init-do_mounts.c.patch b/target/linux/generic/pending-5.15/492-try-auto-mounting-ubi0-rootfs-in-init-do_mounts.c.patch new file mode 100644 index 0000000000..e3c4dd2ef4 --- /dev/null +++ b/target/linux/generic/pending-5.15/492-try-auto-mounting-ubi0-rootfs-in-init-do_mounts.c.patch @@ -0,0 +1,53 @@ +From: Daniel Golle <daniel@makrotopia.org> +Subject: try auto-mounting ubi0:rootfs in init/do_mounts.c + +Signed-off-by: Daniel Golle <daniel@makrotopia.org> +--- + init/do_mounts.c | 26 +++++++++++++++++++++++++- + 1 file changed, 25 insertions(+), 1 deletion(-) + +--- a/init/do_mounts.c ++++ b/init/do_mounts.c +@@ -474,7 +474,30 @@ retry: + out: + put_page(page); + } +- ++ ++#ifdef CONFIG_MTD_ROOTFS_ROOT_DEV ++static int __init mount_ubi_rootfs(void) ++{ ++ int flags = MS_SILENT; ++ int err, tried = 0; ++ ++ while (tried < 2) { ++ err = do_mount_root("ubi0:rootfs", "ubifs", flags, \ ++ root_mount_data); ++ switch (err) { ++ case -EACCES: ++ flags |= MS_RDONLY; ++ tried++; ++ break; ++ default: ++ return err; ++ } ++ } ++ ++ return -EINVAL; ++} ++#endif ++ + #ifdef CONFIG_ROOT_NFS + + #define NFSROOT_TIMEOUT_MIN 5 +@@ -567,6 +590,10 @@ void __init mount_root(void) + return; + } + #endif ++#ifdef CONFIG_MTD_ROOTFS_ROOT_DEV ++ if (!mount_ubi_rootfs()) ++ return; ++#endif + #ifdef CONFIG_BLOCK + { + int err = create_dev("/dev/root", ROOT_DEV); diff --git a/target/linux/generic/pending-5.15/493-ubi-set-ROOT_DEV-to-ubiblock-rootfs-if-unset.patch b/target/linux/generic/pending-5.15/493-ubi-set-ROOT_DEV-to-ubiblock-rootfs-if-unset.patch new file mode 100644 index 0000000000..2dff46807e --- /dev/null +++ b/target/linux/generic/pending-5.15/493-ubi-set-ROOT_DEV-to-ubiblock-rootfs-if-unset.patch @@ -0,0 +1,34 @@ +From: Daniel Golle <daniel@makrotopia.org> +Subject: ubi: set ROOT_DEV to ubiblock "rootfs" if unset + +Signed-off-by: Daniel Golle <daniel@makrotopia.org> +--- + drivers/mtd/ubi/block.c | 10 ++++++++++ + 1 file changed, 10 insertions(+) + +--- a/drivers/mtd/ubi/block.c ++++ b/drivers/mtd/ubi/block.c +@@ -42,6 +42,7 @@ + #include <linux/scatterlist.h> + #include <linux/idr.h> + #include <asm/div64.h> ++#include <linux/root_dev.h> + + #include "ubi-media.h" + #include "ubi.h" +@@ -458,6 +459,15 @@ int ubiblock_create(struct ubi_volume_in + dev_info(disk_to_dev(dev->gd), "created from ubi%d:%d(%s)", + dev->ubi_num, dev->vol_id, vi->name); + mutex_unlock(&devices_mutex); ++ ++ if (!strcmp(vi->name, "rootfs") && ++ IS_ENABLED(CONFIG_MTD_ROOTFS_ROOT_DEV) && ++ ROOT_DEV == 0) { ++ pr_notice("ubiblock: device ubiblock%d_%d (%s) set to be root filesystem\n", ++ dev->ubi_num, dev->vol_id, vi->name); ++ ROOT_DEV = MKDEV(gd->major, gd->first_minor); ++ } ++ + return 0; + + out_free_queue: diff --git a/target/linux/generic/pending-5.15/494-mtd-ubi-add-EOF-marker-support.patch b/target/linux/generic/pending-5.15/494-mtd-ubi-add-EOF-marker-support.patch new file mode 100644 index 0000000000..81d32ca5a5 --- /dev/null +++ b/target/linux/generic/pending-5.15/494-mtd-ubi-add-EOF-marker-support.patch @@ -0,0 +1,60 @@ +From: Gabor Juhos <juhosg@openwrt.org> +Subject: mtd: add EOF marker support to the UBI layer + +Signed-off-by: Gabor Juhos <juhosg@openwrt.org> +--- + drivers/mtd/ubi/attach.c | 25 ++++++++++++++++++++++--- + drivers/mtd/ubi/ubi.h | 1 + + 2 files changed, 23 insertions(+), 3 deletions(-) + +--- a/drivers/mtd/ubi/attach.c ++++ b/drivers/mtd/ubi/attach.c +@@ -926,6 +926,13 @@ static bool vol_ignored(int vol_id) + #endif + } + ++static bool ec_hdr_has_eof(struct ubi_ec_hdr *ech) ++{ ++ return ech->padding1[0] == 'E' && ++ ech->padding1[1] == 'O' && ++ ech->padding1[2] == 'F'; ++} ++ + /** + * scan_peb - scan and process UBI headers of a PEB. + * @ubi: UBI device description object +@@ -958,9 +965,21 @@ static int scan_peb(struct ubi_device *u + return 0; + } + +- err = ubi_io_read_ec_hdr(ubi, pnum, ech, 0); +- if (err < 0) +- return err; ++ if (!ai->eof_found) { ++ err = ubi_io_read_ec_hdr(ubi, pnum, ech, 0); ++ if (err < 0) ++ return err; ++ ++ if (ec_hdr_has_eof(ech)) { ++ pr_notice("UBI: EOF marker found, PEBs from %d will be erased\n", ++ pnum); ++ ai->eof_found = true; ++ } ++ } ++ ++ if (ai->eof_found) ++ err = UBI_IO_FF_BITFLIPS; ++ + switch (err) { + case 0: + break; +--- a/drivers/mtd/ubi/ubi.h ++++ b/drivers/mtd/ubi/ubi.h +@@ -782,6 +782,7 @@ struct ubi_attach_info { + int mean_ec; + uint64_t ec_sum; + int ec_count; ++ bool eof_found; + struct kmem_cache *aeb_slab_cache; + struct ubi_ec_hdr *ech; + struct ubi_vid_io_buf *vidb; diff --git a/target/linux/generic/pending-5.15/495-mtd-core-add-get_mtd_device_by_node.patch b/target/linux/generic/pending-5.15/495-mtd-core-add-get_mtd_device_by_node.patch new file mode 100644 index 0000000000..d5db5fdd54 --- /dev/null +++ b/target/linux/generic/pending-5.15/495-mtd-core-add-get_mtd_device_by_node.patch @@ -0,0 +1,75 @@ +From 1bd1b740f208d1cf4071932cc51860d37266c402 Mon Sep 17 00:00:00 2001 +From: Bernhard Frauendienst <kernel@nospam.obeliks.de> +Date: Sat, 1 Sep 2018 00:30:11 +0200 +Subject: [PATCH 495/497] mtd: core: add get_mtd_device_by_node + +Add function to retrieve a mtd device by its OF node. Since drivers can +assign arbitrary names to mtd devices in the absence of a label +property, there is no other reliable way to retrieve a mtd device for a +given OF node. + +Signed-off-by: Bernhard Frauendienst <kernel@nospam.obeliks.de> +Reviewed-by: Miquel Raynal <miquel.raynal@bootlin.com> +--- + drivers/mtd/mtdcore.c | 38 ++++++++++++++++++++++++++++++++++++++ + include/linux/mtd/mtd.h | 2 ++ + 2 files changed, 40 insertions(+) + +--- a/drivers/mtd/mtdcore.c ++++ b/drivers/mtd/mtdcore.c +@@ -1046,6 +1046,44 @@ out_unlock: + } + EXPORT_SYMBOL_GPL(get_mtd_device_nm); + ++/** ++ * get_mtd_device_by_node - obtain a validated handle for an MTD device ++ * by of_node ++ * @of_node: OF node of MTD device to open ++ * ++ * This function returns MTD device description structure in case of ++ * success and an error code in case of failure. ++ */ ++struct mtd_info *get_mtd_device_by_node(const struct device_node *of_node) ++{ ++ int err = -ENODEV; ++ struct mtd_info *mtd = NULL, *other; ++ ++ mutex_lock(&mtd_table_mutex); ++ ++ mtd_for_each_device(other) { ++ if (of_node == other->dev.of_node) { ++ mtd = other; ++ break; ++ } ++ } ++ ++ if (!mtd) ++ goto out_unlock; ++ ++ err = __get_mtd_device(mtd); ++ if (err) ++ goto out_unlock; ++ ++ mutex_unlock(&mtd_table_mutex); ++ return mtd; ++ ++out_unlock: ++ mutex_unlock(&mtd_table_mutex); ++ return ERR_PTR(err); ++} ++EXPORT_SYMBOL_GPL(get_mtd_device_by_node); ++ + void put_mtd_device(struct mtd_info *mtd) + { + mutex_lock(&mtd_table_mutex); +--- a/include/linux/mtd/mtd.h ++++ b/include/linux/mtd/mtd.h +@@ -698,6 +698,8 @@ extern struct mtd_info *get_mtd_device(s + extern int __get_mtd_device(struct mtd_info *mtd); + extern void __put_mtd_device(struct mtd_info *mtd); + extern struct mtd_info *get_mtd_device_nm(const char *name); ++extern struct mtd_info *get_mtd_device_by_node( ++ const struct device_node *of_node); + extern void put_mtd_device(struct mtd_info *mtd); + + static inline uint64_t mtdpart_get_offset(const struct mtd_info *mtd) diff --git a/target/linux/generic/pending-5.15/496-dt-bindings-add-bindings-for-mtd-concat-devices.patch b/target/linux/generic/pending-5.15/496-dt-bindings-add-bindings-for-mtd-concat-devices.patch new file mode 100644 index 0000000000..01f3b9ec2d --- /dev/null +++ b/target/linux/generic/pending-5.15/496-dt-bindings-add-bindings-for-mtd-concat-devices.patch @@ -0,0 +1,52 @@ +From 5734c6669fba7ddb5ef491ccff7159d15dba0b59 Mon Sep 17 00:00:00 2001 +From: Bernhard Frauendienst <kernel@nospam.obeliks.de> +Date: Wed, 5 Sep 2018 01:32:51 +0200 +Subject: [PATCH 496/497] dt-bindings: add bindings for mtd-concat devices + +Document virtual mtd-concat device bindings. + +Signed-off-by: Bernhard Frauendienst <kernel@nospam.obeliks.de> +--- + .../devicetree/bindings/mtd/mtd-concat.txt | 36 +++++++++++++++++++ + 1 file changed, 36 insertions(+) + create mode 100644 Documentation/devicetree/bindings/mtd/mtd-concat.txt + +--- /dev/null ++++ b/Documentation/devicetree/bindings/mtd/mtd-concat.txt +@@ -0,0 +1,36 @@ ++Virtual MTD concat device ++ ++Requires properties: ++- devices: list of phandles to mtd nodes that should be concatenated ++ ++Example: ++ ++&spi { ++ flash0: flash@0 { ++ ... ++ }; ++ flash1: flash@1 { ++ ... ++ }; ++}; ++ ++flash { ++ compatible = "mtd-concat"; ++ ++ devices = <&flash0 &flash1>; ++ ++ partitions { ++ compatible = "fixed-partitions"; ++ ++ partition@0 { ++ label = "boot"; ++ reg = <0x0000000 0x0040000>; ++ read-only; ++ }; ++ ++ partition@40000 { ++ label = "firmware"; ++ reg = <0x0040000 0x1fc0000>; ++ }; ++ } ++} diff --git a/target/linux/generic/pending-5.15/497-mtd-mtdconcat-add-dt-driver-for-concat-devices.patch b/target/linux/generic/pending-5.15/497-mtd-mtdconcat-add-dt-driver-for-concat-devices.patch new file mode 100644 index 0000000000..058cab09e5 --- /dev/null +++ b/target/linux/generic/pending-5.15/497-mtd-mtdconcat-add-dt-driver-for-concat-devices.patch @@ -0,0 +1,216 @@ +From e53f712d8eac71f54399b61038ccf87d2cee99d7 Mon Sep 17 00:00:00 2001 +From: Bernhard Frauendienst <kernel@nospam.obeliks.de> +Date: Sat, 25 Aug 2018 12:35:22 +0200 +Subject: [PATCH 497/497] mtd: mtdconcat: add dt driver for concat devices + +Some mtd drivers like physmap variants have support for concatenating +multiple mtd devices, but there is no generic way to define such a +concat device from within the device tree. + +This is useful for some SoC boards that use multiple flash chips as +memory banks of a single mtd device, with partitions spanning chip +borders. + +This commit adds a driver for creating virtual mtd-concat devices. They +must have a compatible = "mtd-concat" line, and define a list of devices +to concat in the 'devices' property, for example: + +flash { + compatible = "mtd-concat"; + + devices = <&flash0 &flash1>; + + partitions { + ... + }; +}; + +The driver is added to the very end of the mtd Makefile to increase the +likelyhood of all child devices already being loaded at the time of +probing, preventing unnecessary deferred probes. + +Signed-off-by: Bernhard Frauendienst <kernel@nospam.obeliks.de> +--- + drivers/mtd/Kconfig | 2 + + drivers/mtd/Makefile | 3 + + drivers/mtd/composite/Kconfig | 12 +++ + drivers/mtd/composite/Makefile | 6 ++ + drivers/mtd/composite/virt_concat.c | 128 ++++++++++++++++++++++++++++ + 5 files changed, 151 insertions(+) + create mode 100644 drivers/mtd/composite/Kconfig + create mode 100644 drivers/mtd/composite/Makefile + create mode 100644 drivers/mtd/composite/virt_concat.c + +--- a/drivers/mtd/Kconfig ++++ b/drivers/mtd/Kconfig +@@ -238,4 +238,6 @@ source "drivers/mtd/ubi/Kconfig" + + source "drivers/mtd/hyperbus/Kconfig" + ++source "drivers/mtd/composite/Kconfig" ++ + endif # MTD +--- a/drivers/mtd/Makefile ++++ b/drivers/mtd/Makefile +@@ -33,3 +33,6 @@ obj-y += chips/ lpddr/ maps/ devices/ n + obj-$(CONFIG_MTD_SPI_NOR) += spi-nor/ + obj-$(CONFIG_MTD_UBI) += ubi/ + obj-$(CONFIG_MTD_HYPERBUS) += hyperbus/ ++ ++# Composite drivers must be loaded last ++obj-y += composite/ +--- /dev/null ++++ b/drivers/mtd/composite/Kconfig +@@ -0,0 +1,12 @@ ++menu "Composite MTD device drivers" ++ depends on MTD!=n ++ ++config MTD_VIRT_CONCAT ++ tristate "Virtual concat MTD device" ++ help ++ This driver allows creation of a virtual MTD concat device, which ++ concatenates multiple underlying MTD devices to a single device. ++ This is required by some SoC boards where multiple memory banks are ++ used as one device with partitions spanning across device boundaries. ++ ++endmenu +--- /dev/null ++++ b/drivers/mtd/composite/Makefile +@@ -0,0 +1,6 @@ ++# SPDX-License-Identifier: GPL-2.0 ++# ++# linux/drivers/mtd/composite/Makefile ++# ++ ++obj-$(CONFIG_MTD_VIRT_CONCAT) += virt_concat.o +--- /dev/null ++++ b/drivers/mtd/composite/virt_concat.c +@@ -0,0 +1,128 @@ ++// SPDX-License-Identifier: GPL-2.0+ ++/* ++ * Virtual concat MTD device driver ++ * ++ * Copyright (C) 2018 Bernhard Frauendienst ++ * Author: Bernhard Frauendienst, kernel@nospam.obeliks.de ++ */ ++ ++#include <linux/module.h> ++#include <linux/device.h> ++#include <linux/mtd/concat.h> ++#include <linux/mtd/mtd.h> ++#include <linux/mtd/partitions.h> ++#include <linux/of.h> ++#include <linux/of_platform.h> ++#include <linux/slab.h> ++ ++/* ++ * struct of_virt_concat - platform device driver data. ++ * @cmtd the final mtd_concat device ++ * @num_devices the number of devices in @devices ++ * @devices points to an array of devices already loaded ++ */ ++struct of_virt_concat { ++ struct mtd_info *cmtd; ++ int num_devices; ++ struct mtd_info **devices; ++}; ++ ++static int virt_concat_remove(struct platform_device *pdev) ++{ ++ struct of_virt_concat *info; ++ int i; ++ ++ info = platform_get_drvdata(pdev); ++ if (!info) ++ return 0; ++ ++ // unset data for when this is called after a probe error ++ platform_set_drvdata(pdev, NULL); ++ ++ if (info->cmtd) { ++ mtd_device_unregister(info->cmtd); ++ mtd_concat_destroy(info->cmtd); ++ } ++ ++ if (info->devices) { ++ for (i = 0; i < info->num_devices; i++) ++ put_mtd_device(info->devices[i]); ++ } ++ ++ return 0; ++} ++ ++static int virt_concat_probe(struct platform_device *pdev) ++{ ++ struct device_node *node = pdev->dev.of_node; ++ struct of_phandle_iterator it; ++ struct of_virt_concat *info; ++ struct mtd_info *mtd; ++ int err = 0, count; ++ ++ count = of_count_phandle_with_args(node, "devices", NULL); ++ if (count <= 0) ++ return -EINVAL; ++ ++ info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL); ++ if (!info) ++ return -ENOMEM; ++ info->devices = devm_kcalloc(&pdev->dev, count, ++ sizeof(*(info->devices)), GFP_KERNEL); ++ if (!info->devices) { ++ err = -ENOMEM; ++ goto err_remove; ++ } ++ ++ platform_set_drvdata(pdev, info); ++ ++ of_for_each_phandle(&it, err, node, "devices", NULL, 0) { ++ mtd = get_mtd_device_by_node(it.node); ++ if (IS_ERR(mtd)) { ++ of_node_put(it.node); ++ err = -EPROBE_DEFER; ++ goto err_remove; ++ } ++ ++ info->devices[info->num_devices++] = mtd; ++ } ++ ++ info->cmtd = mtd_concat_create(info->devices, info->num_devices, ++ dev_name(&pdev->dev)); ++ if (!info->cmtd) { ++ err = -ENXIO; ++ goto err_remove; ++ } ++ ++ info->cmtd->dev.parent = &pdev->dev; ++ mtd_set_of_node(info->cmtd, node); ++ mtd_device_register(info->cmtd, NULL, 0); ++ ++ return 0; ++ ++err_remove: ++ virt_concat_remove(pdev); ++ ++ return err; ++} ++ ++static const struct of_device_id virt_concat_of_match[] = { ++ { .compatible = "mtd-concat", }, ++ { /* sentinel */ } ++}; ++MODULE_DEVICE_TABLE(of, virt_concat_of_match); ++ ++static struct platform_driver virt_concat_driver = { ++ .probe = virt_concat_probe, ++ .remove = virt_concat_remove, ++ .driver = { ++ .name = "virt-mtdconcat", ++ .of_match_table = virt_concat_of_match, ++ }, ++}; ++ ++module_platform_driver(virt_concat_driver); ++ ++MODULE_LICENSE("GPL v2"); ++MODULE_AUTHOR("Bernhard Frauendienst <kernel@nospam.obeliks.de>"); ++MODULE_DESCRIPTION("Virtual concat MTD device driver"); diff --git a/target/linux/generic/pending-5.15/498-mtd-spi-nor-locking-support-for-MX25L6405D.patch b/target/linux/generic/pending-5.15/498-mtd-spi-nor-locking-support-for-MX25L6405D.patch new file mode 100644 index 0000000000..62e977c8d1 --- /dev/null +++ b/target/linux/generic/pending-5.15/498-mtd-spi-nor-locking-support-for-MX25L6405D.patch @@ -0,0 +1,34 @@ +From 8bf2ce6ea4ee840b70f55a27f80e1cd308051b13 Mon Sep 17 00:00:00 2001 +From: Nick Hainke <vincent@systemli.org> +Date: Mon, 27 Dec 2021 00:38:13 +0100 +Subject: [PATCH 1/2] mtd: spi-nor: locking support for MX25L6405D + +Macronix MX25L6405D supports locking with four block-protection bits. +Currently, the driver only sets three bits. If the bootloader does not +sustain the flash chip in an unlocked state, the flash might be +non-writeable. Add the corresponding flag to enable locking support with +four bits in the status register. + +Tested on Nanostation M2 XM. + +Similar to commit 7ea40b54e83b ("mtd: spi-nor: enable locking support for +MX25L12805D") + +Signed-off-by: David Bauer <mail@david-bauer.net> +Signed-off-by: Nick Hainke <vincent@systemli.org> +--- + drivers/mtd/spi-nor/macronix.c | 3 ++- + 1 file changed, 2 insertions(+), 1 deletion(-) + +--- a/drivers/mtd/spi-nor/macronix.c ++++ b/drivers/mtd/spi-nor/macronix.c +@@ -42,7 +42,8 @@ static const struct flash_info macronix_ + { "mx25l1606e", INFO(0xc22015, 0, 64 * 1024, 32, SECT_4K) }, + { "mx25l3205d", INFO(0xc22016, 0, 64 * 1024, 64, SECT_4K) }, + { "mx25l3255e", INFO(0xc29e16, 0, 64 * 1024, 64, SECT_4K) }, +- { "mx25l6405d", INFO(0xc22017, 0, 64 * 1024, 128, SECT_4K) }, ++ { "mx25l6405d", INFO(0xc22017, 0, 64 * 1024, 128, SECT_4K | ++ SPI_NOR_HAS_LOCK | SPI_NOR_4BIT_BP) }, + { "mx25u2033e", INFO(0xc22532, 0, 64 * 1024, 4, SECT_4K) }, + { "mx25u3235f", INFO(0xc22536, 0, 64 * 1024, 64, + SECT_4K | SPI_NOR_DUAL_READ | diff --git a/target/linux/generic/pending-5.15/499-mtd-spi-nor-disable-16-bit-sr-for-macronix.patch b/target/linux/generic/pending-5.15/499-mtd-spi-nor-disable-16-bit-sr-for-macronix.patch new file mode 100644 index 0000000000..ec14f6341c --- /dev/null +++ b/target/linux/generic/pending-5.15/499-mtd-spi-nor-disable-16-bit-sr-for-macronix.patch @@ -0,0 +1,30 @@ +From 245224608b5368c10407da07557e546743d3c489 Mon Sep 17 00:00:00 2001 +From: Nick Hainke <vincent@systemli.org> +Date: Mon, 27 Dec 2021 09:33:13 +0100 +Subject: [PATCH 2/2] mtd: spi-nor: disable 16-bit-sr for macronix + +Macronix flash chips seem to consist of only one status register. +These chips will not work with the "16-bit Write Status (01h) Command". +Disable SNOR_F_HAS_16BIT_SR for all Macronix chips. + +Tested with MX25L6405D. + +Fixes: 39d1e3340c73 ("mtd: spi-nor: Fix clearing of QE bit on +lock()/unlock()") + +Signed-off-by: David Bauer <mail@david-bauer.net> +Signed-off-by: Nick Hainke <vincent@systemli.org> +--- + drivers/mtd/spi-nor/macronix.c | 1 + + 1 file changed, 1 insertion(+) + +--- a/drivers/mtd/spi-nor/macronix.c ++++ b/drivers/mtd/spi-nor/macronix.c +@@ -94,6 +94,7 @@ static void macronix_default_init(struct + { + nor->params->quad_enable = spi_nor_sr1_bit6_quad_enable; + nor->params->set_4byte_addr_mode = spi_nor_set_4byte_addr_mode; ++ nor->flags &= ~SNOR_F_HAS_16BIT_SR; + nor->flags |= SNOR_F_HAS_LOCK; + } + diff --git a/target/linux/generic/pending-5.15/500-fs_cdrom_dependencies.patch b/target/linux/generic/pending-5.15/500-fs_cdrom_dependencies.patch new file mode 100644 index 0000000000..0a5a3aae5d --- /dev/null +++ b/target/linux/generic/pending-5.15/500-fs_cdrom_dependencies.patch @@ -0,0 +1,40 @@ +--- a/fs/hfs/Kconfig ++++ b/fs/hfs/Kconfig +@@ -2,6 +2,7 @@ + config HFS_FS + tristate "Apple Macintosh file system support" + depends on BLOCK ++ select CDROM + select NLS + help + If you say Y here, you will be able to mount Macintosh-formatted +--- a/fs/hfsplus/Kconfig ++++ b/fs/hfsplus/Kconfig +@@ -2,6 +2,7 @@ + config HFSPLUS_FS + tristate "Apple Extended HFS file system support" + depends on BLOCK ++ select CDROM + select NLS + select NLS_UTF8 + help +--- a/fs/isofs/Kconfig ++++ b/fs/isofs/Kconfig +@@ -1,6 +1,7 @@ + # SPDX-License-Identifier: GPL-2.0-only + config ISO9660_FS + tristate "ISO 9660 CDROM file system support" ++ select CDROM + help + This is the standard file system used on CD-ROMs. It was previously + known as "High Sierra File System" and is called "hsfs" on other +--- a/fs/udf/Kconfig ++++ b/fs/udf/Kconfig +@@ -1,6 +1,7 @@ + # SPDX-License-Identifier: GPL-2.0-only + config UDF_FS + tristate "UDF file system support" ++ select CDROM + select CRC_ITU_T + select NLS + help diff --git a/target/linux/generic/pending-5.15/530-jffs2_make_lzma_available.patch b/target/linux/generic/pending-5.15/530-jffs2_make_lzma_available.patch new file mode 100644 index 0000000000..20483a391d --- /dev/null +++ b/target/linux/generic/pending-5.15/530-jffs2_make_lzma_available.patch @@ -0,0 +1,5180 @@ +From: Alexandros C. Couloumbis <alex@ozo.com> +Subject: fs: add jffs2/lzma support (not activated by default yet) + +lede-commit: c2c88d315fa0e881f8b19da07b62859b915b11b2 +Signed-off-by: Alexandros C. Couloumbis <alex@ozo.com> +--- + fs/jffs2/Kconfig | 9 + + fs/jffs2/Makefile | 3 + + fs/jffs2/compr.c | 6 + + fs/jffs2/compr.h | 10 +- + fs/jffs2/compr_lzma.c | 128 +++ + fs/jffs2/super.c | 33 +- + include/linux/lzma.h | 62 ++ + include/linux/lzma/LzFind.h | 115 +++ + include/linux/lzma/LzHash.h | 54 + + include/linux/lzma/LzmaDec.h | 231 +++++ + include/linux/lzma/LzmaEnc.h | 80 ++ + include/linux/lzma/Types.h | 226 +++++ + include/uapi/linux/jffs2.h | 1 + + lib/Kconfig | 6 + + lib/Makefile | 12 + + lib/lzma/LzFind.c | 761 ++++++++++++++ + lib/lzma/LzmaDec.c | 999 +++++++++++++++++++ + lib/lzma/LzmaEnc.c | 2271 ++++++++++++++++++++++++++++++++++++++++++ + lib/lzma/Makefile | 7 + + 19 files changed, 5008 insertions(+), 6 deletions(-) + create mode 100644 fs/jffs2/compr_lzma.c + create mode 100644 include/linux/lzma.h + create mode 100644 include/linux/lzma/LzFind.h + create mode 100644 include/linux/lzma/LzHash.h + create mode 100644 include/linux/lzma/LzmaDec.h + create mode 100644 include/linux/lzma/LzmaEnc.h + create mode 100644 include/linux/lzma/Types.h + create mode 100644 lib/lzma/LzFind.c + create mode 100644 lib/lzma/LzmaDec.c + create mode 100644 lib/lzma/LzmaEnc.c + create mode 100644 lib/lzma/Makefile + +--- a/fs/jffs2/Kconfig ++++ b/fs/jffs2/Kconfig +@@ -136,6 +136,15 @@ config JFFS2_LZO + This feature was added in July, 2007. Say 'N' if you need + compatibility with older bootloaders or kernels. + ++config JFFS2_LZMA ++ bool "JFFS2 LZMA compression support" if JFFS2_COMPRESSION_OPTIONS ++ select LZMA_COMPRESS ++ select LZMA_DECOMPRESS ++ depends on JFFS2_FS ++ default n ++ help ++ JFFS2 wrapper to the LZMA C SDK ++ + config JFFS2_RTIME + bool "JFFS2 RTIME compression support" if JFFS2_COMPRESSION_OPTIONS + depends on JFFS2_FS +--- a/fs/jffs2/Makefile ++++ b/fs/jffs2/Makefile +@@ -19,4 +19,7 @@ jffs2-$(CONFIG_JFFS2_RUBIN) += compr_rub + jffs2-$(CONFIG_JFFS2_RTIME) += compr_rtime.o + jffs2-$(CONFIG_JFFS2_ZLIB) += compr_zlib.o + jffs2-$(CONFIG_JFFS2_LZO) += compr_lzo.o ++jffs2-$(CONFIG_JFFS2_LZMA) += compr_lzma.o + jffs2-$(CONFIG_JFFS2_SUMMARY) += summary.o ++ ++CFLAGS_compr_lzma.o += -Iinclude/linux -Ilib/lzma +--- a/fs/jffs2/compr.c ++++ b/fs/jffs2/compr.c +@@ -378,6 +378,9 @@ int __init jffs2_compressors_init(void) + #ifdef CONFIG_JFFS2_LZO + jffs2_lzo_init(); + #endif ++#ifdef CONFIG_JFFS2_LZMA ++ jffs2_lzma_init(); ++#endif + /* Setting default compression mode */ + #ifdef CONFIG_JFFS2_CMODE_NONE + jffs2_compression_mode = JFFS2_COMPR_MODE_NONE; +@@ -401,6 +404,9 @@ int __init jffs2_compressors_init(void) + int jffs2_compressors_exit(void) + { + /* Unregistering compressors */ ++#ifdef CONFIG_JFFS2_LZMA ++ jffs2_lzma_exit(); ++#endif + #ifdef CONFIG_JFFS2_LZO + jffs2_lzo_exit(); + #endif +--- a/fs/jffs2/compr.h ++++ b/fs/jffs2/compr.h +@@ -29,9 +29,9 @@ + #define JFFS2_DYNRUBIN_PRIORITY 20 + #define JFFS2_LZARI_PRIORITY 30 + #define JFFS2_RTIME_PRIORITY 50 +-#define JFFS2_ZLIB_PRIORITY 60 +-#define JFFS2_LZO_PRIORITY 80 +- ++#define JFFS2_LZMA_PRIORITY 70 ++#define JFFS2_ZLIB_PRIORITY 80 ++#define JFFS2_LZO_PRIORITY 90 + + #define JFFS2_RUBINMIPS_DISABLED /* RUBINs will be used only */ + #define JFFS2_DYNRUBIN_DISABLED /* for decompression */ +@@ -101,5 +101,9 @@ void jffs2_zlib_exit(void); + int jffs2_lzo_init(void); + void jffs2_lzo_exit(void); + #endif ++#ifdef CONFIG_JFFS2_LZMA ++int jffs2_lzma_init(void); ++void jffs2_lzma_exit(void); ++#endif + + #endif /* __JFFS2_COMPR_H__ */ +--- /dev/null ++++ b/fs/jffs2/compr_lzma.c +@@ -0,0 +1,128 @@ ++/* ++ * JFFS2 -- Journalling Flash File System, Version 2. ++ * ++ * For licensing information, see the file 'LICENCE' in this directory. ++ * ++ * JFFS2 wrapper to the LZMA C SDK ++ * ++ */ ++ ++#include <linux/lzma.h> ++#include "compr.h" ++ ++#ifdef __KERNEL__ ++ static DEFINE_MUTEX(deflate_mutex); ++#endif ++ ++CLzmaEncHandle *p; ++Byte propsEncoded[LZMA_PROPS_SIZE]; ++SizeT propsSize = sizeof(propsEncoded); ++ ++STATIC void lzma_free_workspace(void) ++{ ++ LzmaEnc_Destroy(p, &lzma_alloc, &lzma_alloc); ++} ++ ++STATIC int INIT lzma_alloc_workspace(CLzmaEncProps *props) ++{ ++ if ((p = (CLzmaEncHandle *)LzmaEnc_Create(&lzma_alloc)) == NULL) ++ { ++ PRINT_ERROR("Failed to allocate lzma deflate workspace\n"); ++ return -ENOMEM; ++ } ++ ++ if (LzmaEnc_SetProps(p, props) != SZ_OK) ++ { ++ lzma_free_workspace(); ++ return -1; ++ } ++ ++ if (LzmaEnc_WriteProperties(p, propsEncoded, &propsSize) != SZ_OK) ++ { ++ lzma_free_workspace(); ++ return -1; ++ } ++ ++ return 0; ++} ++ ++STATIC int jffs2_lzma_compress(unsigned char *data_in, unsigned char *cpage_out, ++ uint32_t *sourcelen, uint32_t *dstlen) ++{ ++ SizeT compress_size = (SizeT)(*dstlen); ++ int ret; ++ ++ #ifdef __KERNEL__ ++ mutex_lock(&deflate_mutex); ++ #endif ++ ++ ret = LzmaEnc_MemEncode(p, cpage_out, &compress_size, data_in, *sourcelen, ++ 0, NULL, &lzma_alloc, &lzma_alloc); ++ ++ #ifdef __KERNEL__ ++ mutex_unlock(&deflate_mutex); ++ #endif ++ ++ if (ret != SZ_OK) ++ return -1; ++ ++ *dstlen = (uint32_t)compress_size; ++ ++ return 0; ++} ++ ++STATIC int jffs2_lzma_decompress(unsigned char *data_in, unsigned char *cpage_out, ++ uint32_t srclen, uint32_t destlen) ++{ ++ int ret; ++ SizeT dl = (SizeT)destlen; ++ SizeT sl = (SizeT)srclen; ++ ELzmaStatus status; ++ ++ ret = LzmaDecode(cpage_out, &dl, data_in, &sl, propsEncoded, ++ propsSize, LZMA_FINISH_ANY, &status, &lzma_alloc); ++ ++ if (ret != SZ_OK || status == LZMA_STATUS_NOT_FINISHED || dl != (SizeT)destlen) ++ return -1; ++ ++ return 0; ++} ++ ++static struct jffs2_compressor jffs2_lzma_comp = { ++ .priority = JFFS2_LZMA_PRIORITY, ++ .name = "lzma", ++ .compr = JFFS2_COMPR_LZMA, ++ .compress = &jffs2_lzma_compress, ++ .decompress = &jffs2_lzma_decompress, ++ .disabled = 0, ++}; ++ ++int INIT jffs2_lzma_init(void) ++{ ++ int ret; ++ CLzmaEncProps props; ++ LzmaEncProps_Init(&props); ++ ++ props.dictSize = LZMA_BEST_DICT(0x2000); ++ props.level = LZMA_BEST_LEVEL; ++ props.lc = LZMA_BEST_LC; ++ props.lp = LZMA_BEST_LP; ++ props.pb = LZMA_BEST_PB; ++ props.fb = LZMA_BEST_FB; ++ ++ ret = lzma_alloc_workspace(&props); ++ if (ret < 0) ++ return ret; ++ ++ ret = jffs2_register_compressor(&jffs2_lzma_comp); ++ if (ret) ++ lzma_free_workspace(); ++ ++ return ret; ++} ++ ++void jffs2_lzma_exit(void) ++{ ++ jffs2_unregister_compressor(&jffs2_lzma_comp); ++ lzma_free_workspace(); ++} +--- a/fs/jffs2/super.c ++++ b/fs/jffs2/super.c +@@ -374,14 +374,41 @@ static int __init init_jffs2_fs(void) + BUILD_BUG_ON(sizeof(struct jffs2_raw_inode) != 68); + BUILD_BUG_ON(sizeof(struct jffs2_raw_summary) != 32); + +- pr_info("version 2.2." ++ pr_info("version 2.2" + #ifdef CONFIG_JFFS2_FS_WRITEBUFFER + " (NAND)" + #endif + #ifdef CONFIG_JFFS2_SUMMARY +- " (SUMMARY) " ++ " (SUMMARY)" + #endif +- " © 2001-2006 Red Hat, Inc.\n"); ++#ifdef CONFIG_JFFS2_ZLIB ++ " (ZLIB)" ++#endif ++#ifdef CONFIG_JFFS2_LZO ++ " (LZO)" ++#endif ++#ifdef CONFIG_JFFS2_LZMA ++ " (LZMA)" ++#endif ++#ifdef CONFIG_JFFS2_RTIME ++ " (RTIME)" ++#endif ++#ifdef CONFIG_JFFS2_RUBIN ++ " (RUBIN)" ++#endif ++#ifdef CONFIG_JFFS2_CMODE_NONE ++ " (CMODE_NONE)" ++#endif ++#ifdef CONFIG_JFFS2_CMODE_PRIORITY ++ " (CMODE_PRIORITY)" ++#endif ++#ifdef CONFIG_JFFS2_CMODE_SIZE ++ " (CMODE_SIZE)" ++#endif ++#ifdef CONFIG_JFFS2_CMODE_FAVOURLZO ++ " (CMODE_FAVOURLZO)" ++#endif ++ " (c) 2001-2006 Red Hat, Inc.\n"); + + jffs2_inode_cachep = kmem_cache_create("jffs2_i", + sizeof(struct jffs2_inode_info), +--- /dev/null ++++ b/include/linux/lzma.h +@@ -0,0 +1,62 @@ ++#ifndef __LZMA_H__ ++#define __LZMA_H__ ++ ++#ifdef __KERNEL__ ++ #include <linux/kernel.h> ++ #include <linux/sched.h> ++ #include <linux/slab.h> ++ #include <linux/vmalloc.h> ++ #include <linux/init.h> ++ #define LZMA_MALLOC vmalloc ++ #define LZMA_FREE vfree ++ #define PRINT_ERROR(msg) printk(KERN_WARNING #msg) ++ #define INIT __init ++ #define STATIC static ++#else ++ #include <stdint.h> ++ #include <stdlib.h> ++ #include <stdio.h> ++ #include <unistd.h> ++ #include <string.h> ++ #include <asm/types.h> ++ #include <errno.h> ++ #include <linux/jffs2.h> ++ #ifndef PAGE_SIZE ++ extern int page_size; ++ #define PAGE_SIZE page_size ++ #endif ++ #define LZMA_MALLOC malloc ++ #define LZMA_FREE free ++ #define PRINT_ERROR(msg) fprintf(stderr, msg) ++ #define INIT ++ #define STATIC ++#endif ++ ++#include "lzma/LzmaDec.h" ++#include "lzma/LzmaEnc.h" ++ ++#define LZMA_BEST_LEVEL (9) ++#define LZMA_BEST_LC (0) ++#define LZMA_BEST_LP (0) ++#define LZMA_BEST_PB (0) ++#define LZMA_BEST_FB (273) ++ ++#define LZMA_BEST_DICT(n) (((int)((n) / 2)) * 2) ++ ++static void *p_lzma_malloc(void *p, size_t size) ++{ ++ if (size == 0) ++ return NULL; ++ ++ return LZMA_MALLOC(size); ++} ++ ++static void p_lzma_free(void *p, void *address) ++{ ++ if (address != NULL) ++ LZMA_FREE(address); ++} ++ ++static ISzAlloc lzma_alloc = { .Alloc = p_lzma_malloc, .Free = p_lzma_free }; ++ ++#endif +--- /dev/null ++++ b/include/linux/lzma/LzFind.h +@@ -0,0 +1,115 @@ ++/* LzFind.h -- Match finder for LZ algorithms ++2009-04-22 : Igor Pavlov : Public domain */ ++ ++#ifndef __LZ_FIND_H ++#define __LZ_FIND_H ++ ++#include "Types.h" ++ ++#ifdef __cplusplus ++extern "C" { ++#endif ++ ++typedef UInt32 CLzRef; ++ ++typedef struct _CMatchFinder ++{ ++ Byte *buffer; ++ UInt32 pos; ++ UInt32 posLimit; ++ UInt32 streamPos; ++ UInt32 lenLimit; ++ ++ UInt32 cyclicBufferPos; ++ UInt32 cyclicBufferSize; /* it must be = (historySize + 1) */ ++ ++ UInt32 matchMaxLen; ++ CLzRef *hash; ++ CLzRef *son; ++ UInt32 hashMask; ++ UInt32 cutValue; ++ ++ Byte *bufferBase; ++ ISeqInStream *stream; ++ int streamEndWasReached; ++ ++ UInt32 blockSize; ++ UInt32 keepSizeBefore; ++ UInt32 keepSizeAfter; ++ ++ UInt32 numHashBytes; ++ int directInput; ++ size_t directInputRem; ++ int btMode; ++ int bigHash; ++ UInt32 historySize; ++ UInt32 fixedHashSize; ++ UInt32 hashSizeSum; ++ UInt32 numSons; ++ SRes result; ++ UInt32 crc[256]; ++} CMatchFinder; ++ ++#define Inline_MatchFinder_GetPointerToCurrentPos(p) ((p)->buffer) ++#define Inline_MatchFinder_GetIndexByte(p, index) ((p)->buffer[(Int32)(index)]) ++ ++#define Inline_MatchFinder_GetNumAvailableBytes(p) ((p)->streamPos - (p)->pos) ++ ++int MatchFinder_NeedMove(CMatchFinder *p); ++Byte *MatchFinder_GetPointerToCurrentPos(CMatchFinder *p); ++void MatchFinder_MoveBlock(CMatchFinder *p); ++void MatchFinder_ReadIfRequired(CMatchFinder *p); ++ ++void MatchFinder_Construct(CMatchFinder *p); ++ ++/* Conditions: ++ historySize <= 3 GB ++ keepAddBufferBefore + matchMaxLen + keepAddBufferAfter < 511MB ++*/ ++int MatchFinder_Create(CMatchFinder *p, UInt32 historySize, ++ UInt32 keepAddBufferBefore, UInt32 matchMaxLen, UInt32 keepAddBufferAfter, ++ ISzAlloc *alloc); ++void MatchFinder_Free(CMatchFinder *p, ISzAlloc *alloc); ++void MatchFinder_Normalize3(UInt32 subValue, CLzRef *items, UInt32 numItems); ++void MatchFinder_ReduceOffsets(CMatchFinder *p, UInt32 subValue); ++ ++UInt32 * GetMatchesSpec1(UInt32 lenLimit, UInt32 curMatch, UInt32 pos, const Byte *buffer, CLzRef *son, ++ UInt32 _cyclicBufferPos, UInt32 _cyclicBufferSize, UInt32 _cutValue, ++ UInt32 *distances, UInt32 maxLen); ++ ++/* ++Conditions: ++ Mf_GetNumAvailableBytes_Func must be called before each Mf_GetMatchLen_Func. ++ Mf_GetPointerToCurrentPos_Func's result must be used only before any other function ++*/ ++ ++typedef void (*Mf_Init_Func)(void *object); ++typedef Byte (*Mf_GetIndexByte_Func)(void *object, Int32 index); ++typedef UInt32 (*Mf_GetNumAvailableBytes_Func)(void *object); ++typedef const Byte * (*Mf_GetPointerToCurrentPos_Func)(void *object); ++typedef UInt32 (*Mf_GetMatches_Func)(void *object, UInt32 *distances); ++typedef void (*Mf_Skip_Func)(void *object, UInt32); ++ ++typedef struct _IMatchFinder ++{ ++ Mf_Init_Func Init; ++ Mf_GetIndexByte_Func GetIndexByte; ++ Mf_GetNumAvailableBytes_Func GetNumAvailableBytes; ++ Mf_GetPointerToCurrentPos_Func GetPointerToCurrentPos; ++ Mf_GetMatches_Func GetMatches; ++ Mf_Skip_Func Skip; ++} IMatchFinder; ++ ++void MatchFinder_CreateVTable(CMatchFinder *p, IMatchFinder *vTable); ++ ++void MatchFinder_Init(CMatchFinder *p); ++UInt32 Bt3Zip_MatchFinder_GetMatches(CMatchFinder *p, UInt32 *distances); ++UInt32 Hc3Zip_MatchFinder_GetMatches(CMatchFinder *p, UInt32 *distances); ++void Bt3Zip_MatchFinder_Skip(CMatchFinder *p, UInt32 num); ++void Hc3Zip_MatchFinder_Skip(CMatchFinder *p, UInt32 num); ++ ++#ifdef __cplusplus ++} ++#endif ++ ++#endif +--- /dev/null ++++ b/include/linux/lzma/LzHash.h +@@ -0,0 +1,54 @@ ++/* LzHash.h -- HASH functions for LZ algorithms ++2009-02-07 : Igor Pavlov : Public domain */ ++ ++#ifndef __LZ_HASH_H ++#define __LZ_HASH_H ++ ++#define kHash2Size (1 << 10) ++#define kHash3Size (1 << 16) ++#define kHash4Size (1 << 20) ++ ++#define kFix3HashSize (kHash2Size) ++#define kFix4HashSize (kHash2Size + kHash3Size) ++#define kFix5HashSize (kHash2Size + kHash3Size + kHash4Size) ++ ++#define HASH2_CALC hashValue = cur[0] | ((UInt32)cur[1] << 8); ++ ++#define HASH3_CALC { \ ++ UInt32 temp = p->crc[cur[0]] ^ cur[1]; \ ++ hash2Value = temp & (kHash2Size - 1); \ ++ hashValue = (temp ^ ((UInt32)cur[2] << 8)) & p->hashMask; } ++ ++#define HASH4_CALC { \ ++ UInt32 temp = p->crc[cur[0]] ^ cur[1]; \ ++ hash2Value = temp & (kHash2Size - 1); \ ++ hash3Value = (temp ^ ((UInt32)cur[2] << 8)) & (kHash3Size - 1); \ ++ hashValue = (temp ^ ((UInt32)cur[2] << 8) ^ (p->crc[cur[3]] << 5)) & p->hashMask; } ++ ++#define HASH5_CALC { \ ++ UInt32 temp = p->crc[cur[0]] ^ cur[1]; \ ++ hash2Value = temp & (kHash2Size - 1); \ ++ hash3Value = (temp ^ ((UInt32)cur[2] << 8)) & (kHash3Size - 1); \ ++ hash4Value = (temp ^ ((UInt32)cur[2] << 8) ^ (p->crc[cur[3]] << 5)); \ ++ hashValue = (hash4Value ^ (p->crc[cur[4]] << 3)) & p->hashMask; \ ++ hash4Value &= (kHash4Size - 1); } ++ ++/* #define HASH_ZIP_CALC hashValue = ((cur[0] | ((UInt32)cur[1] << 8)) ^ p->crc[cur[2]]) & 0xFFFF; */ ++#define HASH_ZIP_CALC hashValue = ((cur[2] | ((UInt32)cur[0] << 8)) ^ p->crc[cur[1]]) & 0xFFFF; ++ ++ ++#define MT_HASH2_CALC \ ++ hash2Value = (p->crc[cur[0]] ^ cur[1]) & (kHash2Size - 1); ++ ++#define MT_HASH3_CALC { \ ++ UInt32 temp = p->crc[cur[0]] ^ cur[1]; \ ++ hash2Value = temp & (kHash2Size - 1); \ ++ hash3Value = (temp ^ ((UInt32)cur[2] << 8)) & (kHash3Size - 1); } ++ ++#define MT_HASH4_CALC { \ ++ UInt32 temp = p->crc[cur[0]] ^ cur[1]; \ ++ hash2Value = temp & (kHash2Size - 1); \ ++ hash3Value = (temp ^ ((UInt32)cur[2] << 8)) & (kHash3Size - 1); \ ++ hash4Value = (temp ^ ((UInt32)cur[2] << 8) ^ (p->crc[cur[3]] << 5)) & (kHash4Size - 1); } ++ ++#endif +--- /dev/null ++++ b/include/linux/lzma/LzmaDec.h +@@ -0,0 +1,231 @@ ++/* LzmaDec.h -- LZMA Decoder ++2009-02-07 : Igor Pavlov : Public domain */ ++ ++#ifndef __LZMA_DEC_H ++#define __LZMA_DEC_H ++ ++#include "Types.h" ++ ++#ifdef __cplusplus ++extern "C" { ++#endif ++ ++/* #define _LZMA_PROB32 */ ++/* _LZMA_PROB32 can increase the speed on some CPUs, ++ but memory usage for CLzmaDec::probs will be doubled in that case */ ++ ++#ifdef _LZMA_PROB32 ++#define CLzmaProb UInt32 ++#else ++#define CLzmaProb UInt16 ++#endif ++ ++ ++/* ---------- LZMA Properties ---------- */ ++ ++#define LZMA_PROPS_SIZE 5 ++ ++typedef struct _CLzmaProps ++{ ++ unsigned lc, lp, pb; ++ UInt32 dicSize; ++} CLzmaProps; ++ ++/* LzmaProps_Decode - decodes properties ++Returns: ++ SZ_OK ++ SZ_ERROR_UNSUPPORTED - Unsupported properties ++*/ ++ ++SRes LzmaProps_Decode(CLzmaProps *p, const Byte *data, unsigned size); ++ ++ ++/* ---------- LZMA Decoder state ---------- */ ++ ++/* LZMA_REQUIRED_INPUT_MAX = number of required input bytes for worst case. ++ Num bits = log2((2^11 / 31) ^ 22) + 26 < 134 + 26 = 160; */ ++ ++#define LZMA_REQUIRED_INPUT_MAX 20 ++ ++typedef struct ++{ ++ CLzmaProps prop; ++ CLzmaProb *probs; ++ Byte *dic; ++ const Byte *buf; ++ UInt32 range, code; ++ SizeT dicPos; ++ SizeT dicBufSize; ++ UInt32 processedPos; ++ UInt32 checkDicSize; ++ unsigned state; ++ UInt32 reps[4]; ++ unsigned remainLen; ++ int needFlush; ++ int needInitState; ++ UInt32 numProbs; ++ unsigned tempBufSize; ++ Byte tempBuf[LZMA_REQUIRED_INPUT_MAX]; ++} CLzmaDec; ++ ++#define LzmaDec_Construct(p) { (p)->dic = 0; (p)->probs = 0; } ++ ++void LzmaDec_Init(CLzmaDec *p); ++ ++/* There are two types of LZMA streams: ++ 0) Stream with end mark. That end mark adds about 6 bytes to compressed size. ++ 1) Stream without end mark. You must know exact uncompressed size to decompress such stream. */ ++ ++typedef enum ++{ ++ LZMA_FINISH_ANY, /* finish at any point */ ++ LZMA_FINISH_END /* block must be finished at the end */ ++} ELzmaFinishMode; ++ ++/* ELzmaFinishMode has meaning only if the decoding reaches output limit !!! ++ ++ You must use LZMA_FINISH_END, when you know that current output buffer ++ covers last bytes of block. In other cases you must use LZMA_FINISH_ANY. ++ ++ If LZMA decoder sees end marker before reaching output limit, it returns SZ_OK, ++ and output value of destLen will be less than output buffer size limit. ++ You can check status result also. ++ ++ You can use multiple checks to test data integrity after full decompression: ++ 1) Check Result and "status" variable. ++ 2) Check that output(destLen) = uncompressedSize, if you know real uncompressedSize. ++ 3) Check that output(srcLen) = compressedSize, if you know real compressedSize. ++ You must use correct finish mode in that case. */ ++ ++typedef enum ++{ ++ LZMA_STATUS_NOT_SPECIFIED, /* use main error code instead */ ++ LZMA_STATUS_FINISHED_WITH_MARK, /* stream was finished with end mark. */ ++ LZMA_STATUS_NOT_FINISHED, /* stream was not finished */ ++ LZMA_STATUS_NEEDS_MORE_INPUT, /* you must provide more input bytes */ ++ LZMA_STATUS_MAYBE_FINISHED_WITHOUT_MARK /* there is probability that stream was finished without end mark */ ++} ELzmaStatus; ++ ++/* ELzmaStatus is used only as output value for function call */ ++ ++ ++/* ---------- Interfaces ---------- */ ++ ++/* There are 3 levels of interfaces: ++ 1) Dictionary Interface ++ 2) Buffer Interface ++ 3) One Call Interface ++ You can select any of these interfaces, but don't mix functions from different ++ groups for same object. */ ++ ++ ++/* There are two variants to allocate state for Dictionary Interface: ++ 1) LzmaDec_Allocate / LzmaDec_Free ++ 2) LzmaDec_AllocateProbs / LzmaDec_FreeProbs ++ You can use variant 2, if you set dictionary buffer manually. ++ For Buffer Interface you must always use variant 1. ++ ++LzmaDec_Allocate* can return: ++ SZ_OK ++ SZ_ERROR_MEM - Memory allocation error ++ SZ_ERROR_UNSUPPORTED - Unsupported properties ++*/ ++ ++SRes LzmaDec_AllocateProbs(CLzmaDec *p, const Byte *props, unsigned propsSize, ISzAlloc *alloc); ++void LzmaDec_FreeProbs(CLzmaDec *p, ISzAlloc *alloc); ++ ++SRes LzmaDec_Allocate(CLzmaDec *state, const Byte *prop, unsigned propsSize, ISzAlloc *alloc); ++void LzmaDec_Free(CLzmaDec *state, ISzAlloc *alloc); ++ ++/* ---------- Dictionary Interface ---------- */ ++ ++/* You can use it, if you want to eliminate the overhead for data copying from ++ dictionary to some other external buffer. ++ You must work with CLzmaDec variables directly in this interface. ++ ++ STEPS: ++ LzmaDec_Constr() ++ LzmaDec_Allocate() ++ for (each new stream) ++ { ++ LzmaDec_Init() ++ while (it needs more decompression) ++ { ++ LzmaDec_DecodeToDic() ++ use data from CLzmaDec::dic and update CLzmaDec::dicPos ++ } ++ } ++ LzmaDec_Free() ++*/ ++ ++/* LzmaDec_DecodeToDic ++ ++ The decoding to internal dictionary buffer (CLzmaDec::dic). ++ You must manually update CLzmaDec::dicPos, if it reaches CLzmaDec::dicBufSize !!! ++ ++finishMode: ++ It has meaning only if the decoding reaches output limit (dicLimit). ++ LZMA_FINISH_ANY - Decode just dicLimit bytes. ++ LZMA_FINISH_END - Stream must be finished after dicLimit. ++ ++Returns: ++ SZ_OK ++ status: ++ LZMA_STATUS_FINISHED_WITH_MARK ++ LZMA_STATUS_NOT_FINISHED ++ LZMA_STATUS_NEEDS_MORE_INPUT ++ LZMA_STATUS_MAYBE_FINISHED_WITHOUT_MARK ++ SZ_ERROR_DATA - Data error ++*/ ++ ++SRes LzmaDec_DecodeToDic(CLzmaDec *p, SizeT dicLimit, ++ const Byte *src, SizeT *srcLen, ELzmaFinishMode finishMode, ELzmaStatus *status); ++ ++ ++/* ---------- Buffer Interface ---------- */ ++ ++/* It's zlib-like interface. ++ See LzmaDec_DecodeToDic description for information about STEPS and return results, ++ but you must use LzmaDec_DecodeToBuf instead of LzmaDec_DecodeToDic and you don't need ++ to work with CLzmaDec variables manually. ++ ++finishMode: ++ It has meaning only if the decoding reaches output limit (*destLen). ++ LZMA_FINISH_ANY - Decode just destLen bytes. ++ LZMA_FINISH_END - Stream must be finished after (*destLen). ++*/ ++ ++SRes LzmaDec_DecodeToBuf(CLzmaDec *p, Byte *dest, SizeT *destLen, ++ const Byte *src, SizeT *srcLen, ELzmaFinishMode finishMode, ELzmaStatus *status); ++ ++ ++/* ---------- One Call Interface ---------- */ ++ ++/* LzmaDecode ++ ++finishMode: ++ It has meaning only if the decoding reaches output limit (*destLen). ++ LZMA_FINISH_ANY - Decode just destLen bytes. ++ LZMA_FINISH_END - Stream must be finished after (*destLen). ++ ++Returns: ++ SZ_OK ++ status: ++ LZMA_STATUS_FINISHED_WITH_MARK ++ LZMA_STATUS_NOT_FINISHED ++ LZMA_STATUS_MAYBE_FINISHED_WITHOUT_MARK ++ SZ_ERROR_DATA - Data error ++ SZ_ERROR_MEM - Memory allocation error ++ SZ_ERROR_UNSUPPORTED - Unsupported properties ++ SZ_ERROR_INPUT_EOF - It needs more bytes in input buffer (src). ++*/ ++ ++SRes LzmaDecode(Byte *dest, SizeT *destLen, const Byte *src, SizeT *srcLen, ++ const Byte *propData, unsigned propSize, ELzmaFinishMode finishMode, ++ ELzmaStatus *status, ISzAlloc *alloc); ++ ++#ifdef __cplusplus ++} ++#endif ++ ++#endif +--- /dev/null ++++ b/include/linux/lzma/LzmaEnc.h +@@ -0,0 +1,80 @@ ++/* LzmaEnc.h -- LZMA Encoder ++2009-02-07 : Igor Pavlov : Public domain */ ++ ++#ifndef __LZMA_ENC_H ++#define __LZMA_ENC_H ++ ++#include "Types.h" ++ ++#ifdef __cplusplus ++extern "C" { ++#endif ++ ++#define LZMA_PROPS_SIZE 5 ++ ++typedef struct _CLzmaEncProps ++{ ++ int level; /* 0 <= level <= 9 */ ++ UInt32 dictSize; /* (1 << 12) <= dictSize <= (1 << 27) for 32-bit version ++ (1 << 12) <= dictSize <= (1 << 30) for 64-bit version ++ default = (1 << 24) */ ++ int lc; /* 0 <= lc <= 8, default = 3 */ ++ int lp; /* 0 <= lp <= 4, default = 0 */ ++ int pb; /* 0 <= pb <= 4, default = 2 */ ++ int algo; /* 0 - fast, 1 - normal, default = 1 */ ++ int fb; /* 5 <= fb <= 273, default = 32 */ ++ int btMode; /* 0 - hashChain Mode, 1 - binTree mode - normal, default = 1 */ ++ int numHashBytes; /* 2, 3 or 4, default = 4 */ ++ UInt32 mc; /* 1 <= mc <= (1 << 30), default = 32 */ ++ unsigned writeEndMark; /* 0 - do not write EOPM, 1 - write EOPM, default = 0 */ ++ int numThreads; /* 1 or 2, default = 2 */ ++} CLzmaEncProps; ++ ++void LzmaEncProps_Init(CLzmaEncProps *p); ++void LzmaEncProps_Normalize(CLzmaEncProps *p); ++UInt32 LzmaEncProps_GetDictSize(const CLzmaEncProps *props2); ++ ++ ++/* ---------- CLzmaEncHandle Interface ---------- */ ++ ++/* LzmaEnc_* functions can return the following exit codes: ++Returns: ++ SZ_OK - OK ++ SZ_ERROR_MEM - Memory allocation error ++ SZ_ERROR_PARAM - Incorrect paramater in props ++ SZ_ERROR_WRITE - Write callback error. ++ SZ_ERROR_PROGRESS - some break from progress callback ++ SZ_ERROR_THREAD - errors in multithreading functions (only for Mt version) ++*/ ++ ++typedef void * CLzmaEncHandle; ++ ++CLzmaEncHandle LzmaEnc_Create(ISzAlloc *alloc); ++void LzmaEnc_Destroy(CLzmaEncHandle p, ISzAlloc *alloc, ISzAlloc *allocBig); ++SRes LzmaEnc_SetProps(CLzmaEncHandle p, const CLzmaEncProps *props); ++SRes LzmaEnc_WriteProperties(CLzmaEncHandle p, Byte *properties, SizeT *size); ++SRes LzmaEnc_Encode(CLzmaEncHandle p, ISeqOutStream *outStream, ISeqInStream *inStream, ++ ICompressProgress *progress, ISzAlloc *alloc, ISzAlloc *allocBig); ++SRes LzmaEnc_MemEncode(CLzmaEncHandle p, Byte *dest, SizeT *destLen, const Byte *src, SizeT srcLen, ++ int writeEndMark, ICompressProgress *progress, ISzAlloc *alloc, ISzAlloc *allocBig); ++ ++/* ---------- One Call Interface ---------- */ ++ ++/* LzmaEncode ++Return code: ++ SZ_OK - OK ++ SZ_ERROR_MEM - Memory allocation error ++ SZ_ERROR_PARAM - Incorrect paramater ++ SZ_ERROR_OUTPUT_EOF - output buffer overflow ++ SZ_ERROR_THREAD - errors in multithreading functions (only for Mt version) ++*/ ++ ++SRes LzmaEncode(Byte *dest, SizeT *destLen, const Byte *src, SizeT srcLen, ++ const CLzmaEncProps *props, Byte *propsEncoded, SizeT *propsSize, int writeEndMark, ++ ICompressProgress *progress, ISzAlloc *alloc, ISzAlloc *allocBig); ++ ++#ifdef __cplusplus ++} ++#endif ++ ++#endif +--- /dev/null ++++ b/include/linux/lzma/Types.h +@@ -0,0 +1,226 @@ ++/* Types.h -- Basic types ++2009-11-23 : Igor Pavlov : Public domain */ ++ ++#ifndef __7Z_TYPES_H ++#define __7Z_TYPES_H ++ ++#include <stddef.h> ++ ++#ifdef _WIN32 ++#include <windows.h> ++#endif ++ ++#ifndef EXTERN_C_BEGIN ++#ifdef __cplusplus ++#define EXTERN_C_BEGIN extern "C" { ++#define EXTERN_C_END } ++#else ++#define EXTERN_C_BEGIN ++#define EXTERN_C_END ++#endif ++#endif ++ ++EXTERN_C_BEGIN ++ ++#define SZ_OK 0 ++ ++#define SZ_ERROR_DATA 1 ++#define SZ_ERROR_MEM 2 ++#define SZ_ERROR_CRC 3 ++#define SZ_ERROR_UNSUPPORTED 4 ++#define SZ_ERROR_PARAM 5 ++#define SZ_ERROR_INPUT_EOF 6 ++#define SZ_ERROR_OUTPUT_EOF 7 ++#define SZ_ERROR_READ 8 ++#define SZ_ERROR_WRITE 9 ++#define SZ_ERROR_PROGRESS 10 ++#define SZ_ERROR_FAIL 11 ++#define SZ_ERROR_THREAD 12 ++ ++#define SZ_ERROR_ARCHIVE 16 ++#define SZ_ERROR_NO_ARCHIVE 17 ++ ++typedef int SRes; ++ ++#ifdef _WIN32 ++typedef DWORD WRes; ++#else ++typedef int WRes; ++#endif ++ ++#ifndef RINOK ++#define RINOK(x) { int __result__ = (x); if (__result__ != 0) return __result__; } ++#endif ++ ++typedef unsigned char Byte; ++typedef short Int16; ++typedef unsigned short UInt16; ++ ++#ifdef _LZMA_UINT32_IS_ULONG ++typedef long Int32; ++typedef unsigned long UInt32; ++#else ++typedef int Int32; ++typedef unsigned int UInt32; ++#endif ++ ++#ifdef _SZ_NO_INT_64 ++ ++/* define _SZ_NO_INT_64, if your compiler doesn't support 64-bit integers. ++ NOTES: Some code will work incorrectly in that case! */ ++ ++typedef long Int64; ++typedef unsigned long UInt64; ++ ++#else ++ ++#if defined(_MSC_VER) || defined(__BORLANDC__) ++typedef __int64 Int64; ++typedef unsigned __int64 UInt64; ++#else ++typedef long long int Int64; ++typedef unsigned long long int UInt64; ++#endif ++ ++#endif ++ ++#ifdef _LZMA_NO_SYSTEM_SIZE_T ++typedef UInt32 SizeT; ++#else ++typedef size_t SizeT; ++#endif ++ ++typedef int Bool; ++#define True 1 ++#define False 0 ++ ++ ++#ifdef _WIN32 ++#define MY_STD_CALL __stdcall ++#else ++#define MY_STD_CALL ++#endif ++ ++#ifdef _MSC_VER ++ ++#if _MSC_VER >= 1300 ++#define MY_NO_INLINE __declspec(noinline) ++#else ++#define MY_NO_INLINE ++#endif ++ ++#define MY_CDECL __cdecl ++#define MY_FAST_CALL __fastcall ++ ++#else ++ ++#define MY_CDECL ++#define MY_FAST_CALL ++ ++#endif ++ ++ ++/* The following interfaces use first parameter as pointer to structure */ ++ ++typedef struct ++{ ++ SRes (*Read)(void *p, void *buf, size_t *size); ++ /* if (input(*size) != 0 && output(*size) == 0) means end_of_stream. ++ (output(*size) < input(*size)) is allowed */ ++} ISeqInStream; ++ ++/* it can return SZ_ERROR_INPUT_EOF */ ++SRes SeqInStream_Read(ISeqInStream *stream, void *buf, size_t size); ++SRes SeqInStream_Read2(ISeqInStream *stream, void *buf, size_t size, SRes errorType); ++SRes SeqInStream_ReadByte(ISeqInStream *stream, Byte *buf); ++ ++typedef struct ++{ ++ size_t (*Write)(void *p, const void *buf, size_t size); ++ /* Returns: result - the number of actually written bytes. ++ (result < size) means error */ ++} ISeqOutStream; ++ ++typedef enum ++{ ++ SZ_SEEK_SET = 0, ++ SZ_SEEK_CUR = 1, ++ SZ_SEEK_END = 2 ++} ESzSeek; ++ ++typedef struct ++{ ++ SRes (*Read)(void *p, void *buf, size_t *size); /* same as ISeqInStream::Read */ ++ SRes (*Seek)(void *p, Int64 *pos, ESzSeek origin); ++} ISeekInStream; ++ ++typedef struct ++{ ++ SRes (*Look)(void *p, void **buf, size_t *size); ++ /* if (input(*size) != 0 && output(*size) == 0) means end_of_stream. ++ (output(*size) > input(*size)) is not allowed ++ (output(*size) < input(*size)) is allowed */ ++ SRes (*Skip)(void *p, size_t offset); ++ /* offset must be <= output(*size) of Look */ ++ ++ SRes (*Read)(void *p, void *buf, size_t *size); ++ /* reads directly (without buffer). It's same as ISeqInStream::Read */ ++ SRes (*Seek)(void *p, Int64 *pos, ESzSeek origin); ++} ILookInStream; ++ ++SRes LookInStream_LookRead(ILookInStream *stream, void *buf, size_t *size); ++SRes LookInStream_SeekTo(ILookInStream *stream, UInt64 offset); ++ ++/* reads via ILookInStream::Read */ ++SRes LookInStream_Read2(ILookInStream *stream, void *buf, size_t size, SRes errorType); ++SRes LookInStream_Read(ILookInStream *stream, void *buf, size_t size); ++ ++#define LookToRead_BUF_SIZE (1 << 14) ++ ++typedef struct ++{ ++ ILookInStream s; ++ ISeekInStream *realStream; ++ size_t pos; ++ size_t size; ++ Byte buf[LookToRead_BUF_SIZE]; ++} CLookToRead; ++ ++void LookToRead_CreateVTable(CLookToRead *p, int lookahead); ++void LookToRead_Init(CLookToRead *p); ++ ++typedef struct ++{ ++ ISeqInStream s; ++ ILookInStream *realStream; ++} CSecToLook; ++ ++void SecToLook_CreateVTable(CSecToLook *p); ++ ++typedef struct ++{ ++ ISeqInStream s; ++ ILookInStream *realStream; ++} CSecToRead; ++ ++void SecToRead_CreateVTable(CSecToRead *p); ++ ++typedef struct ++{ ++ SRes (*Progress)(void *p, UInt64 inSize, UInt64 outSize); ++ /* Returns: result. (result != SZ_OK) means break. ++ Value (UInt64)(Int64)-1 for size means unknown value. */ ++} ICompressProgress; ++ ++typedef struct ++{ ++ void *(*Alloc)(void *p, size_t size); ++ void (*Free)(void *p, void *address); /* address can be 0 */ ++} ISzAlloc; ++ ++#define IAlloc_Alloc(p, size) (p)->Alloc((p), size) ++#define IAlloc_Free(p, a) (p)->Free((p), a) ++ ++EXTERN_C_END ++ ++#endif +--- a/include/uapi/linux/jffs2.h ++++ b/include/uapi/linux/jffs2.h +@@ -46,6 +46,7 @@ + #define JFFS2_COMPR_DYNRUBIN 0x05 + #define JFFS2_COMPR_ZLIB 0x06 + #define JFFS2_COMPR_LZO 0x07 ++#define JFFS2_COMPR_LZMA 0x08 + /* Compatibility flags. */ + #define JFFS2_COMPAT_MASK 0xc000 /* What do to if an unknown nodetype is found */ + #define JFFS2_NODE_ACCURATE 0x2000 +--- a/lib/Kconfig ++++ b/lib/Kconfig +@@ -315,6 +315,12 @@ config ZSTD_DECOMPRESS + + source "lib/xz/Kconfig" + ++config LZMA_COMPRESS ++ tristate ++ ++config LZMA_DECOMPRESS ++ tristate ++ + # + # These all provide a common interface (hence the apparent duplication with + # ZLIB_INFLATE; DECOMPRESS_GZIP is just a wrapper.) +--- a/lib/Makefile ++++ b/lib/Makefile +@@ -136,6 +136,16 @@ CFLAGS_kobject.o += -DDEBUG + CFLAGS_kobject_uevent.o += -DDEBUG + endif + ++ifdef CONFIG_JFFS2_ZLIB ++ CONFIG_ZLIB_INFLATE:=y ++ CONFIG_ZLIB_DEFLATE:=y ++endif ++ ++ifdef CONFIG_JFFS2_LZMA ++ CONFIG_LZMA_DECOMPRESS:=y ++ CONFIG_LZMA_COMPRESS:=y ++endif ++ + obj-$(CONFIG_DEBUG_INFO_REDUCED) += debug_info.o + CFLAGS_debug_info.o += $(call cc-option, -femit-struct-debug-detailed=any) + +@@ -191,6 +201,8 @@ obj-$(CONFIG_ZSTD_COMPRESS) += zstd/ + obj-$(CONFIG_ZSTD_DECOMPRESS) += zstd/ + obj-$(CONFIG_XZ_DEC) += xz/ + obj-$(CONFIG_RAID6_PQ) += raid6/ ++obj-$(CONFIG_LZMA_COMPRESS) += lzma/ ++obj-$(CONFIG_LZMA_DECOMPRESS) += lzma/ + + lib-$(CONFIG_DECOMPRESS_GZIP) += decompress_inflate.o + lib-$(CONFIG_DECOMPRESS_BZIP2) += decompress_bunzip2.o +--- /dev/null ++++ b/lib/lzma/LzFind.c +@@ -0,0 +1,761 @@ ++/* LzFind.c -- Match finder for LZ algorithms ++2009-04-22 : Igor Pavlov : Public domain */ ++ ++#include <string.h> ++ ++#include "LzFind.h" ++#include "LzHash.h" ++ ++#define kEmptyHashValue 0 ++#define kMaxValForNormalize ((UInt32)0xFFFFFFFF) ++#define kNormalizeStepMin (1 << 10) /* it must be power of 2 */ ++#define kNormalizeMask (~(kNormalizeStepMin - 1)) ++#define kMaxHistorySize ((UInt32)3 << 30) ++ ++#define kStartMaxLen 3 ++ ++static void LzInWindow_Free(CMatchFinder *p, ISzAlloc *alloc) ++{ ++ if (!p->directInput) ++ { ++ alloc->Free(alloc, p->bufferBase); ++ p->bufferBase = 0; ++ } ++} ++ ++/* keepSizeBefore + keepSizeAfter + keepSizeReserv must be < 4G) */ ++ ++static int LzInWindow_Create(CMatchFinder *p, UInt32 keepSizeReserv, ISzAlloc *alloc) ++{ ++ UInt32 blockSize = p->keepSizeBefore + p->keepSizeAfter + keepSizeReserv; ++ if (p->directInput) ++ { ++ p->blockSize = blockSize; ++ return 1; ++ } ++ if (p->bufferBase == 0 || p->blockSize != blockSize) ++ { ++ LzInWindow_Free(p, alloc); ++ p->blockSize = blockSize; ++ p->bufferBase = (Byte *)alloc->Alloc(alloc, (size_t)blockSize); ++ } ++ return (p->bufferBase != 0); ++} ++ ++Byte *MatchFinder_GetPointerToCurrentPos(CMatchFinder *p) { return p->buffer; } ++Byte MatchFinder_GetIndexByte(CMatchFinder *p, Int32 index) { return p->buffer[index]; } ++ ++UInt32 MatchFinder_GetNumAvailableBytes(CMatchFinder *p) { return p->streamPos - p->pos; } ++ ++void MatchFinder_ReduceOffsets(CMatchFinder *p, UInt32 subValue) ++{ ++ p->posLimit -= subValue; ++ p->pos -= subValue; ++ p->streamPos -= subValue; ++} ++ ++static void MatchFinder_ReadBlock(CMatchFinder *p) ++{ ++ if (p->streamEndWasReached || p->result != SZ_OK) ++ return; ++ if (p->directInput) ++ { ++ UInt32 curSize = 0xFFFFFFFF - p->streamPos; ++ if (curSize > p->directInputRem) ++ curSize = (UInt32)p->directInputRem; ++ p->directInputRem -= curSize; ++ p->streamPos += curSize; ++ if (p->directInputRem == 0) ++ p->streamEndWasReached = 1; ++ return; ++ } ++ for (;;) ++ { ++ Byte *dest = p->buffer + (p->streamPos - p->pos); ++ size_t size = (p->bufferBase + p->blockSize - dest); ++ if (size == 0) ++ return; ++ p->result = p->stream->Read(p->stream, dest, &size); ++ if (p->result != SZ_OK) ++ return; ++ if (size == 0) ++ { ++ p->streamEndWasReached = 1; ++ return; ++ } ++ p->streamPos += (UInt32)size; ++ if (p->streamPos - p->pos > p->keepSizeAfter) ++ return; ++ } ++} ++ ++void MatchFinder_MoveBlock(CMatchFinder *p) ++{ ++ memmove(p->bufferBase, ++ p->buffer - p->keepSizeBefore, ++ (size_t)(p->streamPos - p->pos + p->keepSizeBefore)); ++ p->buffer = p->bufferBase + p->keepSizeBefore; ++} ++ ++int MatchFinder_NeedMove(CMatchFinder *p) ++{ ++ if (p->directInput) ++ return 0; ++ /* if (p->streamEndWasReached) return 0; */ ++ return ((size_t)(p->bufferBase + p->blockSize - p->buffer) <= p->keepSizeAfter); ++} ++ ++void MatchFinder_ReadIfRequired(CMatchFinder *p) ++{ ++ if (p->streamEndWasReached) ++ return; ++ if (p->keepSizeAfter >= p->streamPos - p->pos) ++ MatchFinder_ReadBlock(p); ++} ++ ++static void MatchFinder_CheckAndMoveAndRead(CMatchFinder *p) ++{ ++ if (MatchFinder_NeedMove(p)) ++ MatchFinder_MoveBlock(p); ++ MatchFinder_ReadBlock(p); ++} ++ ++static void MatchFinder_SetDefaultSettings(CMatchFinder *p) ++{ ++ p->cutValue = 32; ++ p->btMode = 1; ++ p->numHashBytes = 4; ++ p->bigHash = 0; ++} ++ ++#define kCrcPoly 0xEDB88320 ++ ++void MatchFinder_Construct(CMatchFinder *p) ++{ ++ UInt32 i; ++ p->bufferBase = 0; ++ p->directInput = 0; ++ p->hash = 0; ++ MatchFinder_SetDefaultSettings(p); ++ ++ for (i = 0; i < 256; i++) ++ { ++ UInt32 r = i; ++ int j; ++ for (j = 0; j < 8; j++) ++ r = (r >> 1) ^ (kCrcPoly & ~((r & 1) - 1)); ++ p->crc[i] = r; ++ } ++} ++ ++static void MatchFinder_FreeThisClassMemory(CMatchFinder *p, ISzAlloc *alloc) ++{ ++ alloc->Free(alloc, p->hash); ++ p->hash = 0; ++} ++ ++void MatchFinder_Free(CMatchFinder *p, ISzAlloc *alloc) ++{ ++ MatchFinder_FreeThisClassMemory(p, alloc); ++ LzInWindow_Free(p, alloc); ++} ++ ++static CLzRef* AllocRefs(UInt32 num, ISzAlloc *alloc) ++{ ++ size_t sizeInBytes = (size_t)num * sizeof(CLzRef); ++ if (sizeInBytes / sizeof(CLzRef) != num) ++ return 0; ++ return (CLzRef *)alloc->Alloc(alloc, sizeInBytes); ++} ++ ++int MatchFinder_Create(CMatchFinder *p, UInt32 historySize, ++ UInt32 keepAddBufferBefore, UInt32 matchMaxLen, UInt32 keepAddBufferAfter, ++ ISzAlloc *alloc) ++{ ++ UInt32 sizeReserv; ++ if (historySize > kMaxHistorySize) ++ { ++ MatchFinder_Free(p, alloc); ++ return 0; ++ } ++ sizeReserv = historySize >> 1; ++ if (historySize > ((UInt32)2 << 30)) ++ sizeReserv = historySize >> 2; ++ sizeReserv += (keepAddBufferBefore + matchMaxLen + keepAddBufferAfter) / 2 + (1 << 19); ++ ++ p->keepSizeBefore = historySize + keepAddBufferBefore + 1; ++ p->keepSizeAfter = matchMaxLen + keepAddBufferAfter; ++ /* we need one additional byte, since we use MoveBlock after pos++ and before dictionary using */ ++ if (LzInWindow_Create(p, sizeReserv, alloc)) ++ { ++ UInt32 newCyclicBufferSize = historySize + 1; ++ UInt32 hs; ++ p->matchMaxLen = matchMaxLen; ++ { ++ p->fixedHashSize = 0; ++ if (p->numHashBytes == 2) ++ hs = (1 << 16) - 1; ++ else ++ { ++ hs = historySize - 1; ++ hs |= (hs >> 1); ++ hs |= (hs >> 2); ++ hs |= (hs >> 4); ++ hs |= (hs >> 8); ++ hs >>= 1; ++ hs |= 0xFFFF; /* don't change it! It's required for Deflate */ ++ if (hs > (1 << 24)) ++ { ++ if (p->numHashBytes == 3) ++ hs = (1 << 24) - 1; ++ else ++ hs >>= 1; ++ } ++ } ++ p->hashMask = hs; ++ hs++; ++ if (p->numHashBytes > 2) p->fixedHashSize += kHash2Size; ++ if (p->numHashBytes > 3) p->fixedHashSize += kHash3Size; ++ if (p->numHashBytes > 4) p->fixedHashSize += kHash4Size; ++ hs += p->fixedHashSize; ++ } ++ ++ { ++ UInt32 prevSize = p->hashSizeSum + p->numSons; ++ UInt32 newSize; ++ p->historySize = historySize; ++ p->hashSizeSum = hs; ++ p->cyclicBufferSize = newCyclicBufferSize; ++ p->numSons = (p->btMode ? newCyclicBufferSize * 2 : newCyclicBufferSize); ++ newSize = p->hashSizeSum + p->numSons; ++ if (p->hash != 0 && prevSize == newSize) ++ return 1; ++ MatchFinder_FreeThisClassMemory(p, alloc); ++ p->hash = AllocRefs(newSize, alloc); ++ if (p->hash != 0) ++ { ++ p->son = p->hash + p->hashSizeSum; ++ return 1; ++ } ++ } ++ } ++ MatchFinder_Free(p, alloc); ++ return 0; ++} ++ ++static void MatchFinder_SetLimits(CMatchFinder *p) ++{ ++ UInt32 limit = kMaxValForNormalize - p->pos; ++ UInt32 limit2 = p->cyclicBufferSize - p->cyclicBufferPos; ++ if (limit2 < limit) ++ limit = limit2; ++ limit2 = p->streamPos - p->pos; ++ if (limit2 <= p->keepSizeAfter) ++ { ++ if (limit2 > 0) ++ limit2 = 1; ++ } ++ else ++ limit2 -= p->keepSizeAfter; ++ if (limit2 < limit) ++ limit = limit2; ++ { ++ UInt32 lenLimit = p->streamPos - p->pos; ++ if (lenLimit > p->matchMaxLen) ++ lenLimit = p->matchMaxLen; ++ p->lenLimit = lenLimit; ++ } ++ p->posLimit = p->pos + limit; ++} ++ ++void MatchFinder_Init(CMatchFinder *p) ++{ ++ UInt32 i; ++ for (i = 0; i < p->hashSizeSum; i++) ++ p->hash[i] = kEmptyHashValue; ++ p->cyclicBufferPos = 0; ++ p->buffer = p->bufferBase; ++ p->pos = p->streamPos = p->cyclicBufferSize; ++ p->result = SZ_OK; ++ p->streamEndWasReached = 0; ++ MatchFinder_ReadBlock(p); ++ MatchFinder_SetLimits(p); ++} ++ ++static UInt32 MatchFinder_GetSubValue(CMatchFinder *p) ++{ ++ return (p->pos - p->historySize - 1) & kNormalizeMask; ++} ++ ++void MatchFinder_Normalize3(UInt32 subValue, CLzRef *items, UInt32 numItems) ++{ ++ UInt32 i; ++ for (i = 0; i < numItems; i++) ++ { ++ UInt32 value = items[i]; ++ if (value <= subValue) ++ value = kEmptyHashValue; ++ else ++ value -= subValue; ++ items[i] = value; ++ } ++} ++ ++static void MatchFinder_Normalize(CMatchFinder *p) ++{ ++ UInt32 subValue = MatchFinder_GetSubValue(p); ++ MatchFinder_Normalize3(subValue, p->hash, p->hashSizeSum + p->numSons); ++ MatchFinder_ReduceOffsets(p, subValue); ++} ++ ++static void MatchFinder_CheckLimits(CMatchFinder *p) ++{ ++ if (p->pos == kMaxValForNormalize) ++ MatchFinder_Normalize(p); ++ if (!p->streamEndWasReached && p->keepSizeAfter == p->streamPos - p->pos) ++ MatchFinder_CheckAndMoveAndRead(p); ++ if (p->cyclicBufferPos == p->cyclicBufferSize) ++ p->cyclicBufferPos = 0; ++ MatchFinder_SetLimits(p); ++} ++ ++static UInt32 * Hc_GetMatchesSpec(UInt32 lenLimit, UInt32 curMatch, UInt32 pos, const Byte *cur, CLzRef *son, ++ UInt32 _cyclicBufferPos, UInt32 _cyclicBufferSize, UInt32 cutValue, ++ UInt32 *distances, UInt32 maxLen) ++{ ++ son[_cyclicBufferPos] = curMatch; ++ for (;;) ++ { ++ UInt32 delta = pos - curMatch; ++ if (cutValue-- == 0 || delta >= _cyclicBufferSize) ++ return distances; ++ { ++ const Byte *pb = cur - delta; ++ curMatch = son[_cyclicBufferPos - delta + ((delta > _cyclicBufferPos) ? _cyclicBufferSize : 0)]; ++ if (pb[maxLen] == cur[maxLen] && *pb == *cur) ++ { ++ UInt32 len = 0; ++ while (++len != lenLimit) ++ if (pb[len] != cur[len]) ++ break; ++ if (maxLen < len) ++ { ++ *distances++ = maxLen = len; ++ *distances++ = delta - 1; ++ if (len == lenLimit) ++ return distances; ++ } ++ } ++ } ++ } ++} ++ ++UInt32 * GetMatchesSpec1(UInt32 lenLimit, UInt32 curMatch, UInt32 pos, const Byte *cur, CLzRef *son, ++ UInt32 _cyclicBufferPos, UInt32 _cyclicBufferSize, UInt32 cutValue, ++ UInt32 *distances, UInt32 maxLen) ++{ ++ CLzRef *ptr0 = son + (_cyclicBufferPos << 1) + 1; ++ CLzRef *ptr1 = son + (_cyclicBufferPos << 1); ++ UInt32 len0 = 0, len1 = 0; ++ for (;;) ++ { ++ UInt32 delta = pos - curMatch; ++ if (cutValue-- == 0 || delta >= _cyclicBufferSize) ++ { ++ *ptr0 = *ptr1 = kEmptyHashValue; ++ return distances; ++ } ++ { ++ CLzRef *pair = son + ((_cyclicBufferPos - delta + ((delta > _cyclicBufferPos) ? _cyclicBufferSize : 0)) << 1); ++ const Byte *pb = cur - delta; ++ UInt32 len = (len0 < len1 ? len0 : len1); ++ if (pb[len] == cur[len]) ++ { ++ if (++len != lenLimit && pb[len] == cur[len]) ++ while (++len != lenLimit) ++ if (pb[len] != cur[len]) ++ break; ++ if (maxLen < len) ++ { ++ *distances++ = maxLen = len; ++ *distances++ = delta - 1; ++ if (len == lenLimit) ++ { ++ *ptr1 = pair[0]; ++ *ptr0 = pair[1]; ++ return distances; ++ } ++ } ++ } ++ if (pb[len] < cur[len]) ++ { ++ *ptr1 = curMatch; ++ ptr1 = pair + 1; ++ curMatch = *ptr1; ++ len1 = len; ++ } ++ else ++ { ++ *ptr0 = curMatch; ++ ptr0 = pair; ++ curMatch = *ptr0; ++ len0 = len; ++ } ++ } ++ } ++} ++ ++static void SkipMatchesSpec(UInt32 lenLimit, UInt32 curMatch, UInt32 pos, const Byte *cur, CLzRef *son, ++ UInt32 _cyclicBufferPos, UInt32 _cyclicBufferSize, UInt32 cutValue) ++{ ++ CLzRef *ptr0 = son + (_cyclicBufferPos << 1) + 1; ++ CLzRef *ptr1 = son + (_cyclicBufferPos << 1); ++ UInt32 len0 = 0, len1 = 0; ++ for (;;) ++ { ++ UInt32 delta = pos - curMatch; ++ if (cutValue-- == 0 || delta >= _cyclicBufferSize) ++ { ++ *ptr0 = *ptr1 = kEmptyHashValue; ++ return; ++ } ++ { ++ CLzRef *pair = son + ((_cyclicBufferPos - delta + ((delta > _cyclicBufferPos) ? _cyclicBufferSize : 0)) << 1); ++ const Byte *pb = cur - delta; ++ UInt32 len = (len0 < len1 ? len0 : len1); ++ if (pb[len] == cur[len]) ++ { ++ while (++len != lenLimit) ++ if (pb[len] != cur[len]) ++ break; ++ { ++ if (len == lenLimit) ++ { ++ *ptr1 = pair[0]; ++ *ptr0 = pair[1]; ++ return; ++ } ++ } ++ } ++ if (pb[len] < cur[len]) ++ { ++ *ptr1 = curMatch; ++ ptr1 = pair + 1; ++ curMatch = *ptr1; ++ len1 = len; ++ } ++ else ++ { ++ *ptr0 = curMatch; ++ ptr0 = pair; ++ curMatch = *ptr0; ++ len0 = len; ++ } ++ } ++ } ++} ++ ++#define MOVE_POS \ ++ ++p->cyclicBufferPos; \ ++ p->buffer++; \ ++ if (++p->pos == p->posLimit) MatchFinder_CheckLimits(p); ++ ++#define MOVE_POS_RET MOVE_POS return offset; ++ ++static void MatchFinder_MovePos(CMatchFinder *p) { MOVE_POS; } ++ ++#define GET_MATCHES_HEADER2(minLen, ret_op) \ ++ UInt32 lenLimit; UInt32 hashValue; const Byte *cur; UInt32 curMatch; \ ++ lenLimit = p->lenLimit; { if (lenLimit < minLen) { MatchFinder_MovePos(p); ret_op; }} \ ++ cur = p->buffer; ++ ++#define GET_MATCHES_HEADER(minLen) GET_MATCHES_HEADER2(minLen, return 0) ++#define SKIP_HEADER(minLen) GET_MATCHES_HEADER2(minLen, continue) ++ ++#define MF_PARAMS(p) p->pos, p->buffer, p->son, p->cyclicBufferPos, p->cyclicBufferSize, p->cutValue ++ ++#define GET_MATCHES_FOOTER(offset, maxLen) \ ++ offset = (UInt32)(GetMatchesSpec1(lenLimit, curMatch, MF_PARAMS(p), \ ++ distances + offset, maxLen) - distances); MOVE_POS_RET; ++ ++#define SKIP_FOOTER \ ++ SkipMatchesSpec(lenLimit, curMatch, MF_PARAMS(p)); MOVE_POS; ++ ++static UInt32 Bt2_MatchFinder_GetMatches(CMatchFinder *p, UInt32 *distances) ++{ ++ UInt32 offset; ++ GET_MATCHES_HEADER(2) ++ HASH2_CALC; ++ curMatch = p->hash[hashValue]; ++ p->hash[hashValue] = p->pos; ++ offset = 0; ++ GET_MATCHES_FOOTER(offset, 1) ++} ++ ++UInt32 Bt3Zip_MatchFinder_GetMatches(CMatchFinder *p, UInt32 *distances) ++{ ++ UInt32 offset; ++ GET_MATCHES_HEADER(3) ++ HASH_ZIP_CALC; ++ curMatch = p->hash[hashValue]; ++ p->hash[hashValue] = p->pos; ++ offset = 0; ++ GET_MATCHES_FOOTER(offset, 2) ++} ++ ++static UInt32 Bt3_MatchFinder_GetMatches(CMatchFinder *p, UInt32 *distances) ++{ ++ UInt32 hash2Value, delta2, maxLen, offset; ++ GET_MATCHES_HEADER(3) ++ ++ HASH3_CALC; ++ ++ delta2 = p->pos - p->hash[hash2Value]; ++ curMatch = p->hash[kFix3HashSize + hashValue]; ++ ++ p->hash[hash2Value] = ++ p->hash[kFix3HashSize + hashValue] = p->pos; ++ ++ ++ maxLen = 2; ++ offset = 0; ++ if (delta2 < p->cyclicBufferSize && *(cur - delta2) == *cur) ++ { ++ for (; maxLen != lenLimit; maxLen++) ++ if (cur[(ptrdiff_t)maxLen - delta2] != cur[maxLen]) ++ break; ++ distances[0] = maxLen; ++ distances[1] = delta2 - 1; ++ offset = 2; ++ if (maxLen == lenLimit) ++ { ++ SkipMatchesSpec(lenLimit, curMatch, MF_PARAMS(p)); ++ MOVE_POS_RET; ++ } ++ } ++ GET_MATCHES_FOOTER(offset, maxLen) ++} ++ ++static UInt32 Bt4_MatchFinder_GetMatches(CMatchFinder *p, UInt32 *distances) ++{ ++ UInt32 hash2Value, hash3Value, delta2, delta3, maxLen, offset; ++ GET_MATCHES_HEADER(4) ++ ++ HASH4_CALC; ++ ++ delta2 = p->pos - p->hash[ hash2Value]; ++ delta3 = p->pos - p->hash[kFix3HashSize + hash3Value]; ++ curMatch = p->hash[kFix4HashSize + hashValue]; ++ ++ p->hash[ hash2Value] = ++ p->hash[kFix3HashSize + hash3Value] = ++ p->hash[kFix4HashSize + hashValue] = p->pos; ++ ++ maxLen = 1; ++ offset = 0; ++ if (delta2 < p->cyclicBufferSize && *(cur - delta2) == *cur) ++ { ++ distances[0] = maxLen = 2; ++ distances[1] = delta2 - 1; ++ offset = 2; ++ } ++ if (delta2 != delta3 && delta3 < p->cyclicBufferSize && *(cur - delta3) == *cur) ++ { ++ maxLen = 3; ++ distances[offset + 1] = delta3 - 1; ++ offset += 2; ++ delta2 = delta3; ++ } ++ if (offset != 0) ++ { ++ for (; maxLen != lenLimit; maxLen++) ++ if (cur[(ptrdiff_t)maxLen - delta2] != cur[maxLen]) ++ break; ++ distances[offset - 2] = maxLen; ++ if (maxLen == lenLimit) ++ { ++ SkipMatchesSpec(lenLimit, curMatch, MF_PARAMS(p)); ++ MOVE_POS_RET; ++ } ++ } ++ if (maxLen < 3) ++ maxLen = 3; ++ GET_MATCHES_FOOTER(offset, maxLen) ++} ++ ++static UInt32 Hc4_MatchFinder_GetMatches(CMatchFinder *p, UInt32 *distances) ++{ ++ UInt32 hash2Value, hash3Value, delta2, delta3, maxLen, offset; ++ GET_MATCHES_HEADER(4) ++ ++ HASH4_CALC; ++ ++ delta2 = p->pos - p->hash[ hash2Value]; ++ delta3 = p->pos - p->hash[kFix3HashSize + hash3Value]; ++ curMatch = p->hash[kFix4HashSize + hashValue]; ++ ++ p->hash[ hash2Value] = ++ p->hash[kFix3HashSize + hash3Value] = ++ p->hash[kFix4HashSize + hashValue] = p->pos; ++ ++ maxLen = 1; ++ offset = 0; ++ if (delta2 < p->cyclicBufferSize && *(cur - delta2) == *cur) ++ { ++ distances[0] = maxLen = 2; ++ distances[1] = delta2 - 1; ++ offset = 2; ++ } ++ if (delta2 != delta3 && delta3 < p->cyclicBufferSize && *(cur - delta3) == *cur) ++ { ++ maxLen = 3; ++ distances[offset + 1] = delta3 - 1; ++ offset += 2; ++ delta2 = delta3; ++ } ++ if (offset != 0) ++ { ++ for (; maxLen != lenLimit; maxLen++) ++ if (cur[(ptrdiff_t)maxLen - delta2] != cur[maxLen]) ++ break; ++ distances[offset - 2] = maxLen; ++ if (maxLen == lenLimit) ++ { ++ p->son[p->cyclicBufferPos] = curMatch; ++ MOVE_POS_RET; ++ } ++ } ++ if (maxLen < 3) ++ maxLen = 3; ++ offset = (UInt32)(Hc_GetMatchesSpec(lenLimit, curMatch, MF_PARAMS(p), ++ distances + offset, maxLen) - (distances)); ++ MOVE_POS_RET ++} ++ ++UInt32 Hc3Zip_MatchFinder_GetMatches(CMatchFinder *p, UInt32 *distances) ++{ ++ UInt32 offset; ++ GET_MATCHES_HEADER(3) ++ HASH_ZIP_CALC; ++ curMatch = p->hash[hashValue]; ++ p->hash[hashValue] = p->pos; ++ offset = (UInt32)(Hc_GetMatchesSpec(lenLimit, curMatch, MF_PARAMS(p), ++ distances, 2) - (distances)); ++ MOVE_POS_RET ++} ++ ++static void Bt2_MatchFinder_Skip(CMatchFinder *p, UInt32 num) ++{ ++ do ++ { ++ SKIP_HEADER(2) ++ HASH2_CALC; ++ curMatch = p->hash[hashValue]; ++ p->hash[hashValue] = p->pos; ++ SKIP_FOOTER ++ } ++ while (--num != 0); ++} ++ ++void Bt3Zip_MatchFinder_Skip(CMatchFinder *p, UInt32 num) ++{ ++ do ++ { ++ SKIP_HEADER(3) ++ HASH_ZIP_CALC; ++ curMatch = p->hash[hashValue]; ++ p->hash[hashValue] = p->pos; ++ SKIP_FOOTER ++ } ++ while (--num != 0); ++} ++ ++static void Bt3_MatchFinder_Skip(CMatchFinder *p, UInt32 num) ++{ ++ do ++ { ++ UInt32 hash2Value; ++ SKIP_HEADER(3) ++ HASH3_CALC; ++ curMatch = p->hash[kFix3HashSize + hashValue]; ++ p->hash[hash2Value] = ++ p->hash[kFix3HashSize + hashValue] = p->pos; ++ SKIP_FOOTER ++ } ++ while (--num != 0); ++} ++ ++static void Bt4_MatchFinder_Skip(CMatchFinder *p, UInt32 num) ++{ ++ do ++ { ++ UInt32 hash2Value, hash3Value; ++ SKIP_HEADER(4) ++ HASH4_CALC; ++ curMatch = p->hash[kFix4HashSize + hashValue]; ++ p->hash[ hash2Value] = ++ p->hash[kFix3HashSize + hash3Value] = p->pos; ++ p->hash[kFix4HashSize + hashValue] = p->pos; ++ SKIP_FOOTER ++ } ++ while (--num != 0); ++} ++ ++static void Hc4_MatchFinder_Skip(CMatchFinder *p, UInt32 num) ++{ ++ do ++ { ++ UInt32 hash2Value, hash3Value; ++ SKIP_HEADER(4) ++ HASH4_CALC; ++ curMatch = p->hash[kFix4HashSize + hashValue]; ++ p->hash[ hash2Value] = ++ p->hash[kFix3HashSize + hash3Value] = ++ p->hash[kFix4HashSize + hashValue] = p->pos; ++ p->son[p->cyclicBufferPos] = curMatch; ++ MOVE_POS ++ } ++ while (--num != 0); ++} ++ ++void Hc3Zip_MatchFinder_Skip(CMatchFinder *p, UInt32 num) ++{ ++ do ++ { ++ SKIP_HEADER(3) ++ HASH_ZIP_CALC; ++ curMatch = p->hash[hashValue]; ++ p->hash[hashValue] = p->pos; ++ p->son[p->cyclicBufferPos] = curMatch; ++ MOVE_POS ++ } ++ while (--num != 0); ++} ++ ++void MatchFinder_CreateVTable(CMatchFinder *p, IMatchFinder *vTable) ++{ ++ vTable->Init = (Mf_Init_Func)MatchFinder_Init; ++ vTable->GetIndexByte = (Mf_GetIndexByte_Func)MatchFinder_GetIndexByte; ++ vTable->GetNumAvailableBytes = (Mf_GetNumAvailableBytes_Func)MatchFinder_GetNumAvailableBytes; ++ vTable->GetPointerToCurrentPos = (Mf_GetPointerToCurrentPos_Func)MatchFinder_GetPointerToCurrentPos; ++ if (!p->btMode) ++ { ++ vTable->GetMatches = (Mf_GetMatches_Func)Hc4_MatchFinder_GetMatches; ++ vTable->Skip = (Mf_Skip_Func)Hc4_MatchFinder_Skip; ++ } ++ else if (p->numHashBytes == 2) ++ { ++ vTable->GetMatches = (Mf_GetMatches_Func)Bt2_MatchFinder_GetMatches; ++ vTable->Skip = (Mf_Skip_Func)Bt2_MatchFinder_Skip; ++ } ++ else if (p->numHashBytes == 3) ++ { ++ vTable->GetMatches = (Mf_GetMatches_Func)Bt3_MatchFinder_GetMatches; ++ vTable->Skip = (Mf_Skip_Func)Bt3_MatchFinder_Skip; ++ } ++ else ++ { ++ vTable->GetMatches = (Mf_GetMatches_Func)Bt4_MatchFinder_GetMatches; ++ vTable->Skip = (Mf_Skip_Func)Bt4_MatchFinder_Skip; ++ } ++} +--- /dev/null ++++ b/lib/lzma/LzmaDec.c +@@ -0,0 +1,999 @@ ++/* LzmaDec.c -- LZMA Decoder ++2009-09-20 : Igor Pavlov : Public domain */ ++ ++#include "LzmaDec.h" ++ ++#include <string.h> ++ ++#define kNumTopBits 24 ++#define kTopValue ((UInt32)1 << kNumTopBits) ++ ++#define kNumBitModelTotalBits 11 ++#define kBitModelTotal (1 << kNumBitModelTotalBits) ++#define kNumMoveBits 5 ++ ++#define RC_INIT_SIZE 5 ++ ++#define NORMALIZE if (range < kTopValue) { range <<= 8; code = (code << 8) | (*buf++); } ++ ++#define IF_BIT_0(p) ttt = *(p); NORMALIZE; bound = (range >> kNumBitModelTotalBits) * ttt; if (code < bound) ++#define UPDATE_0(p) range = bound; *(p) = (CLzmaProb)(ttt + ((kBitModelTotal - ttt) >> kNumMoveBits)); ++#define UPDATE_1(p) range -= bound; code -= bound; *(p) = (CLzmaProb)(ttt - (ttt >> kNumMoveBits)); ++#define GET_BIT2(p, i, A0, A1) IF_BIT_0(p) \ ++ { UPDATE_0(p); i = (i + i); A0; } else \ ++ { UPDATE_1(p); i = (i + i) + 1; A1; } ++#define GET_BIT(p, i) GET_BIT2(p, i, ; , ;) ++ ++#define TREE_GET_BIT(probs, i) { GET_BIT((probs + i), i); } ++#define TREE_DECODE(probs, limit, i) \ ++ { i = 1; do { TREE_GET_BIT(probs, i); } while (i < limit); i -= limit; } ++ ++/* #define _LZMA_SIZE_OPT */ ++ ++#ifdef _LZMA_SIZE_OPT ++#define TREE_6_DECODE(probs, i) TREE_DECODE(probs, (1 << 6), i) ++#else ++#define TREE_6_DECODE(probs, i) \ ++ { i = 1; \ ++ TREE_GET_BIT(probs, i); \ ++ TREE_GET_BIT(probs, i); \ ++ TREE_GET_BIT(probs, i); \ ++ TREE_GET_BIT(probs, i); \ ++ TREE_GET_BIT(probs, i); \ ++ TREE_GET_BIT(probs, i); \ ++ i -= 0x40; } ++#endif ++ ++#define NORMALIZE_CHECK if (range < kTopValue) { if (buf >= bufLimit) return DUMMY_ERROR; range <<= 8; code = (code << 8) | (*buf++); } ++ ++#define IF_BIT_0_CHECK(p) ttt = *(p); NORMALIZE_CHECK; bound = (range >> kNumBitModelTotalBits) * ttt; if (code < bound) ++#define UPDATE_0_CHECK range = bound; ++#define UPDATE_1_CHECK range -= bound; code -= bound; ++#define GET_BIT2_CHECK(p, i, A0, A1) IF_BIT_0_CHECK(p) \ ++ { UPDATE_0_CHECK; i = (i + i); A0; } else \ ++ { UPDATE_1_CHECK; i = (i + i) + 1; A1; } ++#define GET_BIT_CHECK(p, i) GET_BIT2_CHECK(p, i, ; , ;) ++#define TREE_DECODE_CHECK(probs, limit, i) \ ++ { i = 1; do { GET_BIT_CHECK(probs + i, i) } while (i < limit); i -= limit; } ++ ++ ++#define kNumPosBitsMax 4 ++#define kNumPosStatesMax (1 << kNumPosBitsMax) ++ ++#define kLenNumLowBits 3 ++#define kLenNumLowSymbols (1 << kLenNumLowBits) ++#define kLenNumMidBits 3 ++#define kLenNumMidSymbols (1 << kLenNumMidBits) ++#define kLenNumHighBits 8 ++#define kLenNumHighSymbols (1 << kLenNumHighBits) ++ ++#define LenChoice 0 ++#define LenChoice2 (LenChoice + 1) ++#define LenLow (LenChoice2 + 1) ++#define LenMid (LenLow + (kNumPosStatesMax << kLenNumLowBits)) ++#define LenHigh (LenMid + (kNumPosStatesMax << kLenNumMidBits)) ++#define kNumLenProbs (LenHigh + kLenNumHighSymbols) ++ ++ ++#define kNumStates 12 ++#define kNumLitStates 7 ++ ++#define kStartPosModelIndex 4 ++#define kEndPosModelIndex 14 ++#define kNumFullDistances (1 << (kEndPosModelIndex >> 1)) ++ ++#define kNumPosSlotBits 6 ++#define kNumLenToPosStates 4 ++ ++#define kNumAlignBits 4 ++#define kAlignTableSize (1 << kNumAlignBits) ++ ++#define kMatchMinLen 2 ++#define kMatchSpecLenStart (kMatchMinLen + kLenNumLowSymbols + kLenNumMidSymbols + kLenNumHighSymbols) ++ ++#define IsMatch 0 ++#define IsRep (IsMatch + (kNumStates << kNumPosBitsMax)) ++#define IsRepG0 (IsRep + kNumStates) ++#define IsRepG1 (IsRepG0 + kNumStates) ++#define IsRepG2 (IsRepG1 + kNumStates) ++#define IsRep0Long (IsRepG2 + kNumStates) ++#define PosSlot (IsRep0Long + (kNumStates << kNumPosBitsMax)) ++#define SpecPos (PosSlot + (kNumLenToPosStates << kNumPosSlotBits)) ++#define Align (SpecPos + kNumFullDistances - kEndPosModelIndex) ++#define LenCoder (Align + kAlignTableSize) ++#define RepLenCoder (LenCoder + kNumLenProbs) ++#define Literal (RepLenCoder + kNumLenProbs) ++ ++#define LZMA_BASE_SIZE 1846 ++#define LZMA_LIT_SIZE 768 ++ ++#define LzmaProps_GetNumProbs(p) ((UInt32)LZMA_BASE_SIZE + (LZMA_LIT_SIZE << ((p)->lc + (p)->lp))) ++ ++#if Literal != LZMA_BASE_SIZE ++StopCompilingDueBUG ++#endif ++ ++#define LZMA_DIC_MIN (1 << 12) ++ ++/* First LZMA-symbol is always decoded. ++And it decodes new LZMA-symbols while (buf < bufLimit), but "buf" is without last normalization ++Out: ++ Result: ++ SZ_OK - OK ++ SZ_ERROR_DATA - Error ++ p->remainLen: ++ < kMatchSpecLenStart : normal remain ++ = kMatchSpecLenStart : finished ++ = kMatchSpecLenStart + 1 : Flush marker ++ = kMatchSpecLenStart + 2 : State Init Marker ++*/ ++ ++static int MY_FAST_CALL LzmaDec_DecodeReal(CLzmaDec *p, SizeT limit, const Byte *bufLimit) ++{ ++ CLzmaProb *probs = p->probs; ++ ++ unsigned state = p->state; ++ UInt32 rep0 = p->reps[0], rep1 = p->reps[1], rep2 = p->reps[2], rep3 = p->reps[3]; ++ unsigned pbMask = ((unsigned)1 << (p->prop.pb)) - 1; ++ unsigned lpMask = ((unsigned)1 << (p->prop.lp)) - 1; ++ unsigned lc = p->prop.lc; ++ ++ Byte *dic = p->dic; ++ SizeT dicBufSize = p->dicBufSize; ++ SizeT dicPos = p->dicPos; ++ ++ UInt32 processedPos = p->processedPos; ++ UInt32 checkDicSize = p->checkDicSize; ++ unsigned len = 0; ++ ++ const Byte *buf = p->buf; ++ UInt32 range = p->range; ++ UInt32 code = p->code; ++ ++ do ++ { ++ CLzmaProb *prob; ++ UInt32 bound; ++ unsigned ttt; ++ unsigned posState = processedPos & pbMask; ++ ++ prob = probs + IsMatch + (state << kNumPosBitsMax) + posState; ++ IF_BIT_0(prob) ++ { ++ unsigned symbol; ++ UPDATE_0(prob); ++ prob = probs + Literal; ++ if (checkDicSize != 0 || processedPos != 0) ++ prob += (LZMA_LIT_SIZE * (((processedPos & lpMask) << lc) + ++ (dic[(dicPos == 0 ? dicBufSize : dicPos) - 1] >> (8 - lc)))); ++ ++ if (state < kNumLitStates) ++ { ++ state -= (state < 4) ? state : 3; ++ symbol = 1; ++ do { GET_BIT(prob + symbol, symbol) } while (symbol < 0x100); ++ } ++ else ++ { ++ unsigned matchByte = p->dic[(dicPos - rep0) + ((dicPos < rep0) ? dicBufSize : 0)]; ++ unsigned offs = 0x100; ++ state -= (state < 10) ? 3 : 6; ++ symbol = 1; ++ do ++ { ++ unsigned bit; ++ CLzmaProb *probLit; ++ matchByte <<= 1; ++ bit = (matchByte & offs); ++ probLit = prob + offs + bit + symbol; ++ GET_BIT2(probLit, symbol, offs &= ~bit, offs &= bit) ++ } ++ while (symbol < 0x100); ++ } ++ dic[dicPos++] = (Byte)symbol; ++ processedPos++; ++ continue; ++ } ++ else ++ { ++ UPDATE_1(prob); ++ prob = probs + IsRep + state; ++ IF_BIT_0(prob) ++ { ++ UPDATE_0(prob); ++ state += kNumStates; ++ prob = probs + LenCoder; ++ } ++ else ++ { ++ UPDATE_1(prob); ++ if (checkDicSize == 0 && processedPos == 0) ++ return SZ_ERROR_DATA; ++ prob = probs + IsRepG0 + state; ++ IF_BIT_0(prob) ++ { ++ UPDATE_0(prob); ++ prob = probs + IsRep0Long + (state << kNumPosBitsMax) + posState; ++ IF_BIT_0(prob) ++ { ++ UPDATE_0(prob); ++ dic[dicPos] = dic[(dicPos - rep0) + ((dicPos < rep0) ? dicBufSize : 0)]; ++ dicPos++; ++ processedPos++; ++ state = state < kNumLitStates ? 9 : 11; ++ continue; ++ } ++ UPDATE_1(prob); ++ } ++ else ++ { ++ UInt32 distance; ++ UPDATE_1(prob); ++ prob = probs + IsRepG1 + state; ++ IF_BIT_0(prob) ++ { ++ UPDATE_0(prob); ++ distance = rep1; ++ } ++ else ++ { ++ UPDATE_1(prob); ++ prob = probs + IsRepG2 + state; ++ IF_BIT_0(prob) ++ { ++ UPDATE_0(prob); ++ distance = rep2; ++ } ++ else ++ { ++ UPDATE_1(prob); ++ distance = rep3; ++ rep3 = rep2; ++ } ++ rep2 = rep1; ++ } ++ rep1 = rep0; ++ rep0 = distance; ++ } ++ state = state < kNumLitStates ? 8 : 11; ++ prob = probs + RepLenCoder; ++ } ++ { ++ unsigned limit, offset; ++ CLzmaProb *probLen = prob + LenChoice; ++ IF_BIT_0(probLen) ++ { ++ UPDATE_0(probLen); ++ probLen = prob + LenLow + (posState << kLenNumLowBits); ++ offset = 0; ++ limit = (1 << kLenNumLowBits); ++ } ++ else ++ { ++ UPDATE_1(probLen); ++ probLen = prob + LenChoice2; ++ IF_BIT_0(probLen) ++ { ++ UPDATE_0(probLen); ++ probLen = prob + LenMid + (posState << kLenNumMidBits); ++ offset = kLenNumLowSymbols; ++ limit = (1 << kLenNumMidBits); ++ } ++ else ++ { ++ UPDATE_1(probLen); ++ probLen = prob + LenHigh; ++ offset = kLenNumLowSymbols + kLenNumMidSymbols; ++ limit = (1 << kLenNumHighBits); ++ } ++ } ++ TREE_DECODE(probLen, limit, len); ++ len += offset; ++ } ++ ++ if (state >= kNumStates) ++ { ++ UInt32 distance; ++ prob = probs + PosSlot + ++ ((len < kNumLenToPosStates ? len : kNumLenToPosStates - 1) << kNumPosSlotBits); ++ TREE_6_DECODE(prob, distance); ++ if (distance >= kStartPosModelIndex) ++ { ++ unsigned posSlot = (unsigned)distance; ++ int numDirectBits = (int)(((distance >> 1) - 1)); ++ distance = (2 | (distance & 1)); ++ if (posSlot < kEndPosModelIndex) ++ { ++ distance <<= numDirectBits; ++ prob = probs + SpecPos + distance - posSlot - 1; ++ { ++ UInt32 mask = 1; ++ unsigned i = 1; ++ do ++ { ++ GET_BIT2(prob + i, i, ; , distance |= mask); ++ mask <<= 1; ++ } ++ while (--numDirectBits != 0); ++ } ++ } ++ else ++ { ++ numDirectBits -= kNumAlignBits; ++ do ++ { ++ NORMALIZE ++ range >>= 1; ++ ++ { ++ UInt32 t; ++ code -= range; ++ t = (0 - ((UInt32)code >> 31)); /* (UInt32)((Int32)code >> 31) */ ++ distance = (distance << 1) + (t + 1); ++ code += range & t; ++ } ++ /* ++ distance <<= 1; ++ if (code >= range) ++ { ++ code -= range; ++ distance |= 1; ++ } ++ */ ++ } ++ while (--numDirectBits != 0); ++ prob = probs + Align; ++ distance <<= kNumAlignBits; ++ { ++ unsigned i = 1; ++ GET_BIT2(prob + i, i, ; , distance |= 1); ++ GET_BIT2(prob + i, i, ; , distance |= 2); ++ GET_BIT2(prob + i, i, ; , distance |= 4); ++ GET_BIT2(prob + i, i, ; , distance |= 8); ++ } ++ if (distance == (UInt32)0xFFFFFFFF) ++ { ++ len += kMatchSpecLenStart; ++ state -= kNumStates; ++ break; ++ } ++ } ++ } ++ rep3 = rep2; ++ rep2 = rep1; ++ rep1 = rep0; ++ rep0 = distance + 1; ++ if (checkDicSize == 0) ++ { ++ if (distance >= processedPos) ++ return SZ_ERROR_DATA; ++ } ++ else if (distance >= checkDicSize) ++ return SZ_ERROR_DATA; ++ state = (state < kNumStates + kNumLitStates) ? kNumLitStates : kNumLitStates + 3; ++ } ++ ++ len += kMatchMinLen; ++ ++ if (limit == dicPos) ++ return SZ_ERROR_DATA; ++ { ++ SizeT rem = limit - dicPos; ++ unsigned curLen = ((rem < len) ? (unsigned)rem : len); ++ SizeT pos = (dicPos - rep0) + ((dicPos < rep0) ? dicBufSize : 0); ++ ++ processedPos += curLen; ++ ++ len -= curLen; ++ if (pos + curLen <= dicBufSize) ++ { ++ Byte *dest = dic + dicPos; ++ ptrdiff_t src = (ptrdiff_t)pos - (ptrdiff_t)dicPos; ++ const Byte *lim = dest + curLen; ++ dicPos += curLen; ++ do ++ *(dest) = (Byte)*(dest + src); ++ while (++dest != lim); ++ } ++ else ++ { ++ do ++ { ++ dic[dicPos++] = dic[pos]; ++ if (++pos == dicBufSize) ++ pos = 0; ++ } ++ while (--curLen != 0); ++ } ++ } ++ } ++ } ++ while (dicPos < limit && buf < bufLimit); ++ NORMALIZE; ++ p->buf = buf; ++ p->range = range; ++ p->code = code; ++ p->remainLen = len; ++ p->dicPos = dicPos; ++ p->processedPos = processedPos; ++ p->reps[0] = rep0; ++ p->reps[1] = rep1; ++ p->reps[2] = rep2; ++ p->reps[3] = rep3; ++ p->state = state; ++ ++ return SZ_OK; ++} ++ ++static void MY_FAST_CALL LzmaDec_WriteRem(CLzmaDec *p, SizeT limit) ++{ ++ if (p->remainLen != 0 && p->remainLen < kMatchSpecLenStart) ++ { ++ Byte *dic = p->dic; ++ SizeT dicPos = p->dicPos; ++ SizeT dicBufSize = p->dicBufSize; ++ unsigned len = p->remainLen; ++ UInt32 rep0 = p->reps[0]; ++ if (limit - dicPos < len) ++ len = (unsigned)(limit - dicPos); ++ ++ if (p->checkDicSize == 0 && p->prop.dicSize - p->processedPos <= len) ++ p->checkDicSize = p->prop.dicSize; ++ ++ p->processedPos += len; ++ p->remainLen -= len; ++ while (len-- != 0) ++ { ++ dic[dicPos] = dic[(dicPos - rep0) + ((dicPos < rep0) ? dicBufSize : 0)]; ++ dicPos++; ++ } ++ p->dicPos = dicPos; ++ } ++} ++ ++static int MY_FAST_CALL LzmaDec_DecodeReal2(CLzmaDec *p, SizeT limit, const Byte *bufLimit) ++{ ++ do ++ { ++ SizeT limit2 = limit; ++ if (p->checkDicSize == 0) ++ { ++ UInt32 rem = p->prop.dicSize - p->processedPos; ++ if (limit - p->dicPos > rem) ++ limit2 = p->dicPos + rem; ++ } ++ RINOK(LzmaDec_DecodeReal(p, limit2, bufLimit)); ++ if (p->processedPos >= p->prop.dicSize) ++ p->checkDicSize = p->prop.dicSize; ++ LzmaDec_WriteRem(p, limit); ++ } ++ while (p->dicPos < limit && p->buf < bufLimit && p->remainLen < kMatchSpecLenStart); ++ ++ if (p->remainLen > kMatchSpecLenStart) ++ { ++ p->remainLen = kMatchSpecLenStart; ++ } ++ return 0; ++} ++ ++typedef enum ++{ ++ DUMMY_ERROR, /* unexpected end of input stream */ ++ DUMMY_LIT, ++ DUMMY_MATCH, ++ DUMMY_REP ++} ELzmaDummy; ++ ++static ELzmaDummy LzmaDec_TryDummy(const CLzmaDec *p, const Byte *buf, SizeT inSize) ++{ ++ UInt32 range = p->range; ++ UInt32 code = p->code; ++ const Byte *bufLimit = buf + inSize; ++ CLzmaProb *probs = p->probs; ++ unsigned state = p->state; ++ ELzmaDummy res; ++ ++ { ++ CLzmaProb *prob; ++ UInt32 bound; ++ unsigned ttt; ++ unsigned posState = (p->processedPos) & ((1 << p->prop.pb) - 1); ++ ++ prob = probs + IsMatch + (state << kNumPosBitsMax) + posState; ++ IF_BIT_0_CHECK(prob) ++ { ++ UPDATE_0_CHECK ++ ++ /* if (bufLimit - buf >= 7) return DUMMY_LIT; */ ++ ++ prob = probs + Literal; ++ if (p->checkDicSize != 0 || p->processedPos != 0) ++ prob += (LZMA_LIT_SIZE * ++ ((((p->processedPos) & ((1 << (p->prop.lp)) - 1)) << p->prop.lc) + ++ (p->dic[(p->dicPos == 0 ? p->dicBufSize : p->dicPos) - 1] >> (8 - p->prop.lc)))); ++ ++ if (state < kNumLitStates) ++ { ++ unsigned symbol = 1; ++ do { GET_BIT_CHECK(prob + symbol, symbol) } while (symbol < 0x100); ++ } ++ else ++ { ++ unsigned matchByte = p->dic[p->dicPos - p->reps[0] + ++ ((p->dicPos < p->reps[0]) ? p->dicBufSize : 0)]; ++ unsigned offs = 0x100; ++ unsigned symbol = 1; ++ do ++ { ++ unsigned bit; ++ CLzmaProb *probLit; ++ matchByte <<= 1; ++ bit = (matchByte & offs); ++ probLit = prob + offs + bit + symbol; ++ GET_BIT2_CHECK(probLit, symbol, offs &= ~bit, offs &= bit) ++ } ++ while (symbol < 0x100); ++ } ++ res = DUMMY_LIT; ++ } ++ else ++ { ++ unsigned len; ++ UPDATE_1_CHECK; ++ ++ prob = probs + IsRep + state; ++ IF_BIT_0_CHECK(prob) ++ { ++ UPDATE_0_CHECK; ++ state = 0; ++ prob = probs + LenCoder; ++ res = DUMMY_MATCH; ++ } ++ else ++ { ++ UPDATE_1_CHECK; ++ res = DUMMY_REP; ++ prob = probs + IsRepG0 + state; ++ IF_BIT_0_CHECK(prob) ++ { ++ UPDATE_0_CHECK; ++ prob = probs + IsRep0Long + (state << kNumPosBitsMax) + posState; ++ IF_BIT_0_CHECK(prob) ++ { ++ UPDATE_0_CHECK; ++ NORMALIZE_CHECK; ++ return DUMMY_REP; ++ } ++ else ++ { ++ UPDATE_1_CHECK; ++ } ++ } ++ else ++ { ++ UPDATE_1_CHECK; ++ prob = probs + IsRepG1 + state; ++ IF_BIT_0_CHECK(prob) ++ { ++ UPDATE_0_CHECK; ++ } ++ else ++ { ++ UPDATE_1_CHECK; ++ prob = probs + IsRepG2 + state; ++ IF_BIT_0_CHECK(prob) ++ { ++ UPDATE_0_CHECK; ++ } ++ else ++ { ++ UPDATE_1_CHECK; ++ } ++ } ++ } ++ state = kNumStates; ++ prob = probs + RepLenCoder; ++ } ++ { ++ unsigned limit, offset; ++ CLzmaProb *probLen = prob + LenChoice; ++ IF_BIT_0_CHECK(probLen) ++ { ++ UPDATE_0_CHECK; ++ probLen = prob + LenLow + (posState << kLenNumLowBits); ++ offset = 0; ++ limit = 1 << kLenNumLowBits; ++ } ++ else ++ { ++ UPDATE_1_CHECK; ++ probLen = prob + LenChoice2; ++ IF_BIT_0_CHECK(probLen) ++ { ++ UPDATE_0_CHECK; ++ probLen = prob + LenMid + (posState << kLenNumMidBits); ++ offset = kLenNumLowSymbols; ++ limit = 1 << kLenNumMidBits; ++ } ++ else ++ { ++ UPDATE_1_CHECK; ++ probLen = prob + LenHigh; ++ offset = kLenNumLowSymbols + kLenNumMidSymbols; ++ limit = 1 << kLenNumHighBits; ++ } ++ } ++ TREE_DECODE_CHECK(probLen, limit, len); ++ len += offset; ++ } ++ ++ if (state < 4) ++ { ++ unsigned posSlot; ++ prob = probs + PosSlot + ++ ((len < kNumLenToPosStates ? len : kNumLenToPosStates - 1) << ++ kNumPosSlotBits); ++ TREE_DECODE_CHECK(prob, 1 << kNumPosSlotBits, posSlot); ++ if (posSlot >= kStartPosModelIndex) ++ { ++ int numDirectBits = ((posSlot >> 1) - 1); ++ ++ /* if (bufLimit - buf >= 8) return DUMMY_MATCH; */ ++ ++ if (posSlot < kEndPosModelIndex) ++ { ++ prob = probs + SpecPos + ((2 | (posSlot & 1)) << numDirectBits) - posSlot - 1; ++ } ++ else ++ { ++ numDirectBits -= kNumAlignBits; ++ do ++ { ++ NORMALIZE_CHECK ++ range >>= 1; ++ code -= range & (((code - range) >> 31) - 1); ++ /* if (code >= range) code -= range; */ ++ } ++ while (--numDirectBits != 0); ++ prob = probs + Align; ++ numDirectBits = kNumAlignBits; ++ } ++ { ++ unsigned i = 1; ++ do ++ { ++ GET_BIT_CHECK(prob + i, i); ++ } ++ while (--numDirectBits != 0); ++ } ++ } ++ } ++ } ++ } ++ NORMALIZE_CHECK; ++ return res; ++} ++ ++ ++static void LzmaDec_InitRc(CLzmaDec *p, const Byte *data) ++{ ++ p->code = ((UInt32)data[1] << 24) | ((UInt32)data[2] << 16) | ((UInt32)data[3] << 8) | ((UInt32)data[4]); ++ p->range = 0xFFFFFFFF; ++ p->needFlush = 0; ++} ++ ++void LzmaDec_InitDicAndState(CLzmaDec *p, Bool initDic, Bool initState) ++{ ++ p->needFlush = 1; ++ p->remainLen = 0; ++ p->tempBufSize = 0; ++ ++ if (initDic) ++ { ++ p->processedPos = 0; ++ p->checkDicSize = 0; ++ p->needInitState = 1; ++ } ++ if (initState) ++ p->needInitState = 1; ++} ++ ++void LzmaDec_Init(CLzmaDec *p) ++{ ++ p->dicPos = 0; ++ LzmaDec_InitDicAndState(p, True, True); ++} ++ ++static void LzmaDec_InitStateReal(CLzmaDec *p) ++{ ++ UInt32 numProbs = Literal + ((UInt32)LZMA_LIT_SIZE << (p->prop.lc + p->prop.lp)); ++ UInt32 i; ++ CLzmaProb *probs = p->probs; ++ for (i = 0; i < numProbs; i++) ++ probs[i] = kBitModelTotal >> 1; ++ p->reps[0] = p->reps[1] = p->reps[2] = p->reps[3] = 1; ++ p->state = 0; ++ p->needInitState = 0; ++} ++ ++SRes LzmaDec_DecodeToDic(CLzmaDec *p, SizeT dicLimit, const Byte *src, SizeT *srcLen, ++ ELzmaFinishMode finishMode, ELzmaStatus *status) ++{ ++ SizeT inSize = *srcLen; ++ (*srcLen) = 0; ++ LzmaDec_WriteRem(p, dicLimit); ++ ++ *status = LZMA_STATUS_NOT_SPECIFIED; ++ ++ while (p->remainLen != kMatchSpecLenStart) ++ { ++ int checkEndMarkNow; ++ ++ if (p->needFlush != 0) ++ { ++ for (; inSize > 0 && p->tempBufSize < RC_INIT_SIZE; (*srcLen)++, inSize--) ++ p->tempBuf[p->tempBufSize++] = *src++; ++ if (p->tempBufSize < RC_INIT_SIZE) ++ { ++ *status = LZMA_STATUS_NEEDS_MORE_INPUT; ++ return SZ_OK; ++ } ++ if (p->tempBuf[0] != 0) ++ return SZ_ERROR_DATA; ++ ++ LzmaDec_InitRc(p, p->tempBuf); ++ p->tempBufSize = 0; ++ } ++ ++ checkEndMarkNow = 0; ++ if (p->dicPos >= dicLimit) ++ { ++ if (p->remainLen == 0 && p->code == 0) ++ { ++ *status = LZMA_STATUS_MAYBE_FINISHED_WITHOUT_MARK; ++ return SZ_OK; ++ } ++ if (finishMode == LZMA_FINISH_ANY) ++ { ++ *status = LZMA_STATUS_NOT_FINISHED; ++ return SZ_OK; ++ } ++ if (p->remainLen != 0) ++ { ++ *status = LZMA_STATUS_NOT_FINISHED; ++ return SZ_ERROR_DATA; ++ } ++ checkEndMarkNow = 1; ++ } ++ ++ if (p->needInitState) ++ LzmaDec_InitStateReal(p); ++ ++ if (p->tempBufSize == 0) ++ { ++ SizeT processed; ++ const Byte *bufLimit; ++ if (inSize < LZMA_REQUIRED_INPUT_MAX || checkEndMarkNow) ++ { ++ int dummyRes = LzmaDec_TryDummy(p, src, inSize); ++ if (dummyRes == DUMMY_ERROR) ++ { ++ memcpy(p->tempBuf, src, inSize); ++ p->tempBufSize = (unsigned)inSize; ++ (*srcLen) += inSize; ++ *status = LZMA_STATUS_NEEDS_MORE_INPUT; ++ return SZ_OK; ++ } ++ if (checkEndMarkNow && dummyRes != DUMMY_MATCH) ++ { ++ *status = LZMA_STATUS_NOT_FINISHED; ++ return SZ_ERROR_DATA; ++ } ++ bufLimit = src; ++ } ++ else ++ bufLimit = src + inSize - LZMA_REQUIRED_INPUT_MAX; ++ p->buf = src; ++ if (LzmaDec_DecodeReal2(p, dicLimit, bufLimit) != 0) ++ return SZ_ERROR_DATA; ++ processed = (SizeT)(p->buf - src); ++ (*srcLen) += processed; ++ src += processed; ++ inSize -= processed; ++ } ++ else ++ { ++ unsigned rem = p->tempBufSize, lookAhead = 0; ++ while (rem < LZMA_REQUIRED_INPUT_MAX && lookAhead < inSize) ++ p->tempBuf[rem++] = src[lookAhead++]; ++ p->tempBufSize = rem; ++ if (rem < LZMA_REQUIRED_INPUT_MAX || checkEndMarkNow) ++ { ++ int dummyRes = LzmaDec_TryDummy(p, p->tempBuf, rem); ++ if (dummyRes == DUMMY_ERROR) ++ { ++ (*srcLen) += lookAhead; ++ *status = LZMA_STATUS_NEEDS_MORE_INPUT; ++ return SZ_OK; ++ } ++ if (checkEndMarkNow && dummyRes != DUMMY_MATCH) ++ { ++ *status = LZMA_STATUS_NOT_FINISHED; ++ return SZ_ERROR_DATA; ++ } ++ } ++ p->buf = p->tempBuf; ++ if (LzmaDec_DecodeReal2(p, dicLimit, p->buf) != 0) ++ return SZ_ERROR_DATA; ++ lookAhead -= (rem - (unsigned)(p->buf - p->tempBuf)); ++ (*srcLen) += lookAhead; ++ src += lookAhead; ++ inSize -= lookAhead; ++ p->tempBufSize = 0; ++ } ++ } ++ if (p->code == 0) ++ *status = LZMA_STATUS_FINISHED_WITH_MARK; ++ return (p->code == 0) ? SZ_OK : SZ_ERROR_DATA; ++} ++ ++SRes LzmaDec_DecodeToBuf(CLzmaDec *p, Byte *dest, SizeT *destLen, const Byte *src, SizeT *srcLen, ELzmaFinishMode finishMode, ELzmaStatus *status) ++{ ++ SizeT outSize = *destLen; ++ SizeT inSize = *srcLen; ++ *srcLen = *destLen = 0; ++ for (;;) ++ { ++ SizeT inSizeCur = inSize, outSizeCur, dicPos; ++ ELzmaFinishMode curFinishMode; ++ SRes res; ++ if (p->dicPos == p->dicBufSize) ++ p->dicPos = 0; ++ dicPos = p->dicPos; ++ if (outSize > p->dicBufSize - dicPos) ++ { ++ outSizeCur = p->dicBufSize; ++ curFinishMode = LZMA_FINISH_ANY; ++ } ++ else ++ { ++ outSizeCur = dicPos + outSize; ++ curFinishMode = finishMode; ++ } ++ ++ res = LzmaDec_DecodeToDic(p, outSizeCur, src, &inSizeCur, curFinishMode, status); ++ src += inSizeCur; ++ inSize -= inSizeCur; ++ *srcLen += inSizeCur; ++ outSizeCur = p->dicPos - dicPos; ++ memcpy(dest, p->dic + dicPos, outSizeCur); ++ dest += outSizeCur; ++ outSize -= outSizeCur; ++ *destLen += outSizeCur; ++ if (res != 0) ++ return res; ++ if (outSizeCur == 0 || outSize == 0) ++ return SZ_OK; ++ } ++} ++ ++void LzmaDec_FreeProbs(CLzmaDec *p, ISzAlloc *alloc) ++{ ++ alloc->Free(alloc, p->probs); ++ p->probs = 0; ++} ++ ++static void LzmaDec_FreeDict(CLzmaDec *p, ISzAlloc *alloc) ++{ ++ alloc->Free(alloc, p->dic); ++ p->dic = 0; ++} ++ ++void LzmaDec_Free(CLzmaDec *p, ISzAlloc *alloc) ++{ ++ LzmaDec_FreeProbs(p, alloc); ++ LzmaDec_FreeDict(p, alloc); ++} ++ ++SRes LzmaProps_Decode(CLzmaProps *p, const Byte *data, unsigned size) ++{ ++ UInt32 dicSize; ++ Byte d; ++ ++ if (size < LZMA_PROPS_SIZE) ++ return SZ_ERROR_UNSUPPORTED; ++ else ++ dicSize = data[1] | ((UInt32)data[2] << 8) | ((UInt32)data[3] << 16) | ((UInt32)data[4] << 24); ++ ++ if (dicSize < LZMA_DIC_MIN) ++ dicSize = LZMA_DIC_MIN; ++ p->dicSize = dicSize; ++ ++ d = data[0]; ++ if (d >= (9 * 5 * 5)) ++ return SZ_ERROR_UNSUPPORTED; ++ ++ p->lc = d % 9; ++ d /= 9; ++ p->pb = d / 5; ++ p->lp = d % 5; ++ ++ return SZ_OK; ++} ++ ++static SRes LzmaDec_AllocateProbs2(CLzmaDec *p, const CLzmaProps *propNew, ISzAlloc *alloc) ++{ ++ UInt32 numProbs = LzmaProps_GetNumProbs(propNew); ++ if (p->probs == 0 || numProbs != p->numProbs) ++ { ++ LzmaDec_FreeProbs(p, alloc); ++ p->probs = (CLzmaProb *)alloc->Alloc(alloc, numProbs * sizeof(CLzmaProb)); ++ p->numProbs = numProbs; ++ if (p->probs == 0) ++ return SZ_ERROR_MEM; ++ } ++ return SZ_OK; ++} ++ ++SRes LzmaDec_AllocateProbs(CLzmaDec *p, const Byte *props, unsigned propsSize, ISzAlloc *alloc) ++{ ++ CLzmaProps propNew; ++ RINOK(LzmaProps_Decode(&propNew, props, propsSize)); ++ RINOK(LzmaDec_AllocateProbs2(p, &propNew, alloc)); ++ p->prop = propNew; ++ return SZ_OK; ++} ++ ++SRes LzmaDec_Allocate(CLzmaDec *p, const Byte *props, unsigned propsSize, ISzAlloc *alloc) ++{ ++ CLzmaProps propNew; ++ SizeT dicBufSize; ++ RINOK(LzmaProps_Decode(&propNew, props, propsSize)); ++ RINOK(LzmaDec_AllocateProbs2(p, &propNew, alloc)); ++ dicBufSize = propNew.dicSize; ++ if (p->dic == 0 || dicBufSize != p->dicBufSize) ++ { ++ LzmaDec_FreeDict(p, alloc); ++ p->dic = (Byte *)alloc->Alloc(alloc, dicBufSize); ++ if (p->dic == 0) ++ { ++ LzmaDec_FreeProbs(p, alloc); ++ return SZ_ERROR_MEM; ++ } ++ } ++ p->dicBufSize = dicBufSize; ++ p->prop = propNew; ++ return SZ_OK; ++} ++ ++SRes LzmaDecode(Byte *dest, SizeT *destLen, const Byte *src, SizeT *srcLen, ++ const Byte *propData, unsigned propSize, ELzmaFinishMode finishMode, ++ ELzmaStatus *status, ISzAlloc *alloc) ++{ ++ CLzmaDec p; ++ SRes res; ++ SizeT inSize = *srcLen; ++ SizeT outSize = *destLen; ++ *srcLen = *destLen = 0; ++ if (inSize < RC_INIT_SIZE) ++ return SZ_ERROR_INPUT_EOF; ++ ++ LzmaDec_Construct(&p); ++ res = LzmaDec_AllocateProbs(&p, propData, propSize, alloc); ++ if (res != 0) ++ return res; ++ p.dic = dest; ++ p.dicBufSize = outSize; ++ ++ LzmaDec_Init(&p); ++ ++ *srcLen = inSize; ++ res = LzmaDec_DecodeToDic(&p, outSize, src, srcLen, finishMode, status); ++ ++ if (res == SZ_OK && *status == LZMA_STATUS_NEEDS_MORE_INPUT) ++ res = SZ_ERROR_INPUT_EOF; ++ ++ (*destLen) = p.dicPos; ++ LzmaDec_FreeProbs(&p, alloc); ++ return res; ++} +--- /dev/null ++++ b/lib/lzma/LzmaEnc.c +@@ -0,0 +1,2271 @@ ++/* LzmaEnc.c -- LZMA Encoder ++2009-11-24 : Igor Pavlov : Public domain */ ++ ++#include <string.h> ++ ++/* #define SHOW_STAT */ ++/* #define SHOW_STAT2 */ ++ ++#if defined(SHOW_STAT) || defined(SHOW_STAT2) ++#include <stdio.h> ++#endif ++ ++#include "LzmaEnc.h" ++ ++/* disable MT */ ++#define _7ZIP_ST ++ ++#include "LzFind.h" ++#ifndef _7ZIP_ST ++#include "LzFindMt.h" ++#endif ++ ++#ifdef SHOW_STAT ++static int ttt = 0; ++#endif ++ ++#define kBlockSizeMax ((1 << LZMA_NUM_BLOCK_SIZE_BITS) - 1) ++ ++#define kBlockSize (9 << 10) ++#define kUnpackBlockSize (1 << 18) ++#define kMatchArraySize (1 << 21) ++#define kMatchRecordMaxSize ((LZMA_MATCH_LEN_MAX * 2 + 3) * LZMA_MATCH_LEN_MAX) ++ ++#define kNumMaxDirectBits (31) ++ ++#define kNumTopBits 24 ++#define kTopValue ((UInt32)1 << kNumTopBits) ++ ++#define kNumBitModelTotalBits 11 ++#define kBitModelTotal (1 << kNumBitModelTotalBits) ++#define kNumMoveBits 5 ++#define kProbInitValue (kBitModelTotal >> 1) ++ ++#define kNumMoveReducingBits 4 ++#define kNumBitPriceShiftBits 4 ++#define kBitPrice (1 << kNumBitPriceShiftBits) ++ ++void LzmaEncProps_Init(CLzmaEncProps *p) ++{ ++ p->level = 5; ++ p->dictSize = p->mc = 0; ++ p->lc = p->lp = p->pb = p->algo = p->fb = p->btMode = p->numHashBytes = p->numThreads = -1; ++ p->writeEndMark = 0; ++} ++ ++void LzmaEncProps_Normalize(CLzmaEncProps *p) ++{ ++ int level = p->level; ++ if (level < 0) level = 5; ++ p->level = level; ++ if (p->dictSize == 0) p->dictSize = (level <= 5 ? (1 << (level * 2 + 14)) : (level == 6 ? (1 << 25) : (1 << 26))); ++ if (p->lc < 0) p->lc = 3; ++ if (p->lp < 0) p->lp = 0; ++ if (p->pb < 0) p->pb = 2; ++ if (p->algo < 0) p->algo = (level < 5 ? 0 : 1); ++ if (p->fb < 0) p->fb = (level < 7 ? 32 : 64); ++ if (p->btMode < 0) p->btMode = (p->algo == 0 ? 0 : 1); ++ if (p->numHashBytes < 0) p->numHashBytes = 4; ++ if (p->mc == 0) p->mc = (16 + (p->fb >> 1)) >> (p->btMode ? 0 : 1); ++ if (p->numThreads < 0) ++ p->numThreads = ++ #ifndef _7ZIP_ST ++ ((p->btMode && p->algo) ? 2 : 1); ++ #else ++ 1; ++ #endif ++} ++ ++UInt32 LzmaEncProps_GetDictSize(const CLzmaEncProps *props2) ++{ ++ CLzmaEncProps props = *props2; ++ LzmaEncProps_Normalize(&props); ++ return props.dictSize; ++} ++ ++/* #define LZMA_LOG_BSR */ ++/* Define it for Intel's CPU */ ++ ++ ++#ifdef LZMA_LOG_BSR ++ ++#define kDicLogSizeMaxCompress 30 ++ ++#define BSR2_RET(pos, res) { unsigned long i; _BitScanReverse(&i, (pos)); res = (i + i) + ((pos >> (i - 1)) & 1); } ++ ++UInt32 GetPosSlot1(UInt32 pos) ++{ ++ UInt32 res; ++ BSR2_RET(pos, res); ++ return res; ++} ++#define GetPosSlot2(pos, res) { BSR2_RET(pos, res); } ++#define GetPosSlot(pos, res) { if (pos < 2) res = pos; else BSR2_RET(pos, res); } ++ ++#else ++ ++#define kNumLogBits (9 + (int)sizeof(size_t) / 2) ++#define kDicLogSizeMaxCompress ((kNumLogBits - 1) * 2 + 7) ++ ++void LzmaEnc_FastPosInit(Byte *g_FastPos) ++{ ++ int c = 2, slotFast; ++ g_FastPos[0] = 0; ++ g_FastPos[1] = 1; ++ ++ for (slotFast = 2; slotFast < kNumLogBits * 2; slotFast++) ++ { ++ UInt32 k = (1 << ((slotFast >> 1) - 1)); ++ UInt32 j; ++ for (j = 0; j < k; j++, c++) ++ g_FastPos[c] = (Byte)slotFast; ++ } ++} ++ ++#define BSR2_RET(pos, res) { UInt32 i = 6 + ((kNumLogBits - 1) & \ ++ (0 - (((((UInt32)1 << (kNumLogBits + 6)) - 1) - pos) >> 31))); \ ++ res = p->g_FastPos[pos >> i] + (i * 2); } ++/* ++#define BSR2_RET(pos, res) { res = (pos < (1 << (kNumLogBits + 6))) ? \ ++ p->g_FastPos[pos >> 6] + 12 : \ ++ p->g_FastPos[pos >> (6 + kNumLogBits - 1)] + (6 + (kNumLogBits - 1)) * 2; } ++*/ ++ ++#define GetPosSlot1(pos) p->g_FastPos[pos] ++#define GetPosSlot2(pos, res) { BSR2_RET(pos, res); } ++#define GetPosSlot(pos, res) { if (pos < kNumFullDistances) res = p->g_FastPos[pos]; else BSR2_RET(pos, res); } ++ ++#endif ++ ++ ++#define LZMA_NUM_REPS 4 ++ ++typedef unsigned CState; ++ ++typedef struct ++{ ++ UInt32 price; ++ ++ CState state; ++ int prev1IsChar; ++ int prev2; ++ ++ UInt32 posPrev2; ++ UInt32 backPrev2; ++ ++ UInt32 posPrev; ++ UInt32 backPrev; ++ UInt32 backs[LZMA_NUM_REPS]; ++} COptimal; ++ ++#define kNumOpts (1 << 12) ++ ++#define kNumLenToPosStates 4 ++#define kNumPosSlotBits 6 ++#define kDicLogSizeMin 0 ++#define kDicLogSizeMax 32 ++#define kDistTableSizeMax (kDicLogSizeMax * 2) ++ ++ ++#define kNumAlignBits 4 ++#define kAlignTableSize (1 << kNumAlignBits) ++#define kAlignMask (kAlignTableSize - 1) ++ ++#define kStartPosModelIndex 4 ++#define kEndPosModelIndex 14 ++#define kNumPosModels (kEndPosModelIndex - kStartPosModelIndex) ++ ++#define kNumFullDistances (1 << (kEndPosModelIndex >> 1)) ++ ++#ifdef _LZMA_PROB32 ++#define CLzmaProb UInt32 ++#else ++#define CLzmaProb UInt16 ++#endif ++ ++#define LZMA_PB_MAX 4 ++#define LZMA_LC_MAX 8 ++#define LZMA_LP_MAX 4 ++ ++#define LZMA_NUM_PB_STATES_MAX (1 << LZMA_PB_MAX) ++ ++ ++#define kLenNumLowBits 3 ++#define kLenNumLowSymbols (1 << kLenNumLowBits) ++#define kLenNumMidBits 3 ++#define kLenNumMidSymbols (1 << kLenNumMidBits) ++#define kLenNumHighBits 8 ++#define kLenNumHighSymbols (1 << kLenNumHighBits) ++ ++#define kLenNumSymbolsTotal (kLenNumLowSymbols + kLenNumMidSymbols + kLenNumHighSymbols) ++ ++#define LZMA_MATCH_LEN_MIN 2 ++#define LZMA_MATCH_LEN_MAX (LZMA_MATCH_LEN_MIN + kLenNumSymbolsTotal - 1) ++ ++#define kNumStates 12 ++ ++typedef struct ++{ ++ CLzmaProb choice; ++ CLzmaProb choice2; ++ CLzmaProb low[LZMA_NUM_PB_STATES_MAX << kLenNumLowBits]; ++ CLzmaProb mid[LZMA_NUM_PB_STATES_MAX << kLenNumMidBits]; ++ CLzmaProb high[kLenNumHighSymbols]; ++} CLenEnc; ++ ++typedef struct ++{ ++ CLenEnc p; ++ UInt32 prices[LZMA_NUM_PB_STATES_MAX][kLenNumSymbolsTotal]; ++ UInt32 tableSize; ++ UInt32 counters[LZMA_NUM_PB_STATES_MAX]; ++} CLenPriceEnc; ++ ++typedef struct ++{ ++ UInt32 range; ++ Byte cache; ++ UInt64 low; ++ UInt64 cacheSize; ++ Byte *buf; ++ Byte *bufLim; ++ Byte *bufBase; ++ ISeqOutStream *outStream; ++ UInt64 processed; ++ SRes res; ++} CRangeEnc; ++ ++typedef struct ++{ ++ CLzmaProb *litProbs; ++ ++ CLzmaProb isMatch[kNumStates][LZMA_NUM_PB_STATES_MAX]; ++ CLzmaProb isRep[kNumStates]; ++ CLzmaProb isRepG0[kNumStates]; ++ CLzmaProb isRepG1[kNumStates]; ++ CLzmaProb isRepG2[kNumStates]; ++ CLzmaProb isRep0Long[kNumStates][LZMA_NUM_PB_STATES_MAX]; ++ ++ CLzmaProb posSlotEncoder[kNumLenToPosStates][1 << kNumPosSlotBits]; ++ CLzmaProb posEncoders[kNumFullDistances - kEndPosModelIndex]; ++ CLzmaProb posAlignEncoder[1 << kNumAlignBits]; ++ ++ CLenPriceEnc lenEnc; ++ CLenPriceEnc repLenEnc; ++ ++ UInt32 reps[LZMA_NUM_REPS]; ++ UInt32 state; ++} CSaveState; ++ ++typedef struct ++{ ++ IMatchFinder matchFinder; ++ void *matchFinderObj; ++ ++ #ifndef _7ZIP_ST ++ Bool mtMode; ++ CMatchFinderMt matchFinderMt; ++ #endif ++ ++ CMatchFinder matchFinderBase; ++ ++ #ifndef _7ZIP_ST ++ Byte pad[128]; ++ #endif ++ ++ UInt32 optimumEndIndex; ++ UInt32 optimumCurrentIndex; ++ ++ UInt32 longestMatchLength; ++ UInt32 numPairs; ++ UInt32 numAvail; ++ COptimal opt[kNumOpts]; ++ ++ #ifndef LZMA_LOG_BSR ++ Byte g_FastPos[1 << kNumLogBits]; ++ #endif ++ ++ UInt32 ProbPrices[kBitModelTotal >> kNumMoveReducingBits]; ++ UInt32 matches[LZMA_MATCH_LEN_MAX * 2 + 2 + 1]; ++ UInt32 numFastBytes; ++ UInt32 additionalOffset; ++ UInt32 reps[LZMA_NUM_REPS]; ++ UInt32 state; ++ ++ UInt32 posSlotPrices[kNumLenToPosStates][kDistTableSizeMax]; ++ UInt32 distancesPrices[kNumLenToPosStates][kNumFullDistances]; ++ UInt32 alignPrices[kAlignTableSize]; ++ UInt32 alignPriceCount; ++ ++ UInt32 distTableSize; ++ ++ unsigned lc, lp, pb; ++ unsigned lpMask, pbMask; ++ ++ CLzmaProb *litProbs; ++ ++ CLzmaProb isMatch[kNumStates][LZMA_NUM_PB_STATES_MAX]; ++ CLzmaProb isRep[kNumStates]; ++ CLzmaProb isRepG0[kNumStates]; ++ CLzmaProb isRepG1[kNumStates]; ++ CLzmaProb isRepG2[kNumStates]; ++ CLzmaProb isRep0Long[kNumStates][LZMA_NUM_PB_STATES_MAX]; ++ ++ CLzmaProb posSlotEncoder[kNumLenToPosStates][1 << kNumPosSlotBits]; ++ CLzmaProb posEncoders[kNumFullDistances - kEndPosModelIndex]; ++ CLzmaProb posAlignEncoder[1 << kNumAlignBits]; ++ ++ CLenPriceEnc lenEnc; ++ CLenPriceEnc repLenEnc; ++ ++ unsigned lclp; ++ ++ Bool fastMode; ++ ++ CRangeEnc rc; ++ ++ Bool writeEndMark; ++ UInt64 nowPos64; ++ UInt32 matchPriceCount; ++ Bool finished; ++ Bool multiThread; ++ ++ SRes result; ++ UInt32 dictSize; ++ UInt32 matchFinderCycles; ++ ++ int needInit; ++ ++ CSaveState saveState; ++} CLzmaEnc; ++ ++void LzmaEnc_SaveState(CLzmaEncHandle pp) ++{ ++ CLzmaEnc *p = (CLzmaEnc *)pp; ++ CSaveState *dest = &p->saveState; ++ int i; ++ dest->lenEnc = p->lenEnc; ++ dest->repLenEnc = p->repLenEnc; ++ dest->state = p->state; ++ ++ for (i = 0; i < kNumStates; i++) ++ { ++ memcpy(dest->isMatch[i], p->isMatch[i], sizeof(p->isMatch[i])); ++ memcpy(dest->isRep0Long[i], p->isRep0Long[i], sizeof(p->isRep0Long[i])); ++ } ++ for (i = 0; i < kNumLenToPosStates; i++) ++ memcpy(dest->posSlotEncoder[i], p->posSlotEncoder[i], sizeof(p->posSlotEncoder[i])); ++ memcpy(dest->isRep, p->isRep, sizeof(p->isRep)); ++ memcpy(dest->isRepG0, p->isRepG0, sizeof(p->isRepG0)); ++ memcpy(dest->isRepG1, p->isRepG1, sizeof(p->isRepG1)); ++ memcpy(dest->isRepG2, p->isRepG2, sizeof(p->isRepG2)); ++ memcpy(dest->posEncoders, p->posEncoders, sizeof(p->posEncoders)); ++ memcpy(dest->posAlignEncoder, p->posAlignEncoder, sizeof(p->posAlignEncoder)); ++ memcpy(dest->reps, p->reps, sizeof(p->reps)); ++ memcpy(dest->litProbs, p->litProbs, (0x300 << p->lclp) * sizeof(CLzmaProb)); ++} ++ ++void LzmaEnc_RestoreState(CLzmaEncHandle pp) ++{ ++ CLzmaEnc *dest = (CLzmaEnc *)pp; ++ const CSaveState *p = &dest->saveState; ++ int i; ++ dest->lenEnc = p->lenEnc; ++ dest->repLenEnc = p->repLenEnc; ++ dest->state = p->state; ++ ++ for (i = 0; i < kNumStates; i++) ++ { ++ memcpy(dest->isMatch[i], p->isMatch[i], sizeof(p->isMatch[i])); ++ memcpy(dest->isRep0Long[i], p->isRep0Long[i], sizeof(p->isRep0Long[i])); ++ } ++ for (i = 0; i < kNumLenToPosStates; i++) ++ memcpy(dest->posSlotEncoder[i], p->posSlotEncoder[i], sizeof(p->posSlotEncoder[i])); ++ memcpy(dest->isRep, p->isRep, sizeof(p->isRep)); ++ memcpy(dest->isRepG0, p->isRepG0, sizeof(p->isRepG0)); ++ memcpy(dest->isRepG1, p->isRepG1, sizeof(p->isRepG1)); ++ memcpy(dest->isRepG2, p->isRepG2, sizeof(p->isRepG2)); ++ memcpy(dest->posEncoders, p->posEncoders, sizeof(p->posEncoders)); ++ memcpy(dest->posAlignEncoder, p->posAlignEncoder, sizeof(p->posAlignEncoder)); ++ memcpy(dest->reps, p->reps, sizeof(p->reps)); ++ memcpy(dest->litProbs, p->litProbs, (0x300 << dest->lclp) * sizeof(CLzmaProb)); ++} ++ ++SRes LzmaEnc_SetProps(CLzmaEncHandle pp, const CLzmaEncProps *props2) ++{ ++ CLzmaEnc *p = (CLzmaEnc *)pp; ++ CLzmaEncProps props = *props2; ++ LzmaEncProps_Normalize(&props); ++ ++ if (props.lc > LZMA_LC_MAX || props.lp > LZMA_LP_MAX || props.pb > LZMA_PB_MAX || ++ props.dictSize > (1 << kDicLogSizeMaxCompress) || props.dictSize > (1 << 30)) ++ return SZ_ERROR_PARAM; ++ p->dictSize = props.dictSize; ++ p->matchFinderCycles = props.mc; ++ { ++ unsigned fb = props.fb; ++ if (fb < 5) ++ fb = 5; ++ if (fb > LZMA_MATCH_LEN_MAX) ++ fb = LZMA_MATCH_LEN_MAX; ++ p->numFastBytes = fb; ++ } ++ p->lc = props.lc; ++ p->lp = props.lp; ++ p->pb = props.pb; ++ p->fastMode = (props.algo == 0); ++ p->matchFinderBase.btMode = props.btMode; ++ { ++ UInt32 numHashBytes = 4; ++ if (props.btMode) ++ { ++ if (props.numHashBytes < 2) ++ numHashBytes = 2; ++ else if (props.numHashBytes < 4) ++ numHashBytes = props.numHashBytes; ++ } ++ p->matchFinderBase.numHashBytes = numHashBytes; ++ } ++ ++ p->matchFinderBase.cutValue = props.mc; ++ ++ p->writeEndMark = props.writeEndMark; ++ ++ #ifndef _7ZIP_ST ++ /* ++ if (newMultiThread != _multiThread) ++ { ++ ReleaseMatchFinder(); ++ _multiThread = newMultiThread; ++ } ++ */ ++ p->multiThread = (props.numThreads > 1); ++ #endif ++ ++ return SZ_OK; ++} ++ ++static const int kLiteralNextStates[kNumStates] = {0, 0, 0, 0, 1, 2, 3, 4, 5, 6, 4, 5}; ++static const int kMatchNextStates[kNumStates] = {7, 7, 7, 7, 7, 7, 7, 10, 10, 10, 10, 10}; ++static const int kRepNextStates[kNumStates] = {8, 8, 8, 8, 8, 8, 8, 11, 11, 11, 11, 11}; ++static const int kShortRepNextStates[kNumStates]= {9, 9, 9, 9, 9, 9, 9, 11, 11, 11, 11, 11}; ++ ++#define IsCharState(s) ((s) < 7) ++ ++#define GetLenToPosState(len) (((len) < kNumLenToPosStates + 1) ? (len) - 2 : kNumLenToPosStates - 1) ++ ++#define kInfinityPrice (1 << 30) ++ ++static void RangeEnc_Construct(CRangeEnc *p) ++{ ++ p->outStream = 0; ++ p->bufBase = 0; ++} ++ ++#define RangeEnc_GetProcessed(p) ((p)->processed + ((p)->buf - (p)->bufBase) + (p)->cacheSize) ++ ++#define RC_BUF_SIZE (1 << 16) ++static int RangeEnc_Alloc(CRangeEnc *p, ISzAlloc *alloc) ++{ ++ if (p->bufBase == 0) ++ { ++ p->bufBase = (Byte *)alloc->Alloc(alloc, RC_BUF_SIZE); ++ if (p->bufBase == 0) ++ return 0; ++ p->bufLim = p->bufBase + RC_BUF_SIZE; ++ } ++ return 1; ++} ++ ++static void RangeEnc_Free(CRangeEnc *p, ISzAlloc *alloc) ++{ ++ alloc->Free(alloc, p->bufBase); ++ p->bufBase = 0; ++} ++ ++static void RangeEnc_Init(CRangeEnc *p) ++{ ++ /* Stream.Init(); */ ++ p->low = 0; ++ p->range = 0xFFFFFFFF; ++ p->cacheSize = 1; ++ p->cache = 0; ++ ++ p->buf = p->bufBase; ++ ++ p->processed = 0; ++ p->res = SZ_OK; ++} ++ ++static void RangeEnc_FlushStream(CRangeEnc *p) ++{ ++ size_t num; ++ if (p->res != SZ_OK) ++ return; ++ num = p->buf - p->bufBase; ++ if (num != p->outStream->Write(p->outStream, p->bufBase, num)) ++ p->res = SZ_ERROR_WRITE; ++ p->processed += num; ++ p->buf = p->bufBase; ++} ++ ++static void MY_FAST_CALL RangeEnc_ShiftLow(CRangeEnc *p) ++{ ++ if ((UInt32)p->low < (UInt32)0xFF000000 || (int)(p->low >> 32) != 0) ++ { ++ Byte temp = p->cache; ++ do ++ { ++ Byte *buf = p->buf; ++ *buf++ = (Byte)(temp + (Byte)(p->low >> 32)); ++ p->buf = buf; ++ if (buf == p->bufLim) ++ RangeEnc_FlushStream(p); ++ temp = 0xFF; ++ } ++ while (--p->cacheSize != 0); ++ p->cache = (Byte)((UInt32)p->low >> 24); ++ } ++ p->cacheSize++; ++ p->low = (UInt32)p->low << 8; ++} ++ ++static void RangeEnc_FlushData(CRangeEnc *p) ++{ ++ int i; ++ for (i = 0; i < 5; i++) ++ RangeEnc_ShiftLow(p); ++} ++ ++static void RangeEnc_EncodeDirectBits(CRangeEnc *p, UInt32 value, int numBits) ++{ ++ do ++ { ++ p->range >>= 1; ++ p->low += p->range & (0 - ((value >> --numBits) & 1)); ++ if (p->range < kTopValue) ++ { ++ p->range <<= 8; ++ RangeEnc_ShiftLow(p); ++ } ++ } ++ while (numBits != 0); ++} ++ ++static void RangeEnc_EncodeBit(CRangeEnc *p, CLzmaProb *prob, UInt32 symbol) ++{ ++ UInt32 ttt = *prob; ++ UInt32 newBound = (p->range >> kNumBitModelTotalBits) * ttt; ++ if (symbol == 0) ++ { ++ p->range = newBound; ++ ttt += (kBitModelTotal - ttt) >> kNumMoveBits; ++ } ++ else ++ { ++ p->low += newBound; ++ p->range -= newBound; ++ ttt -= ttt >> kNumMoveBits; ++ } ++ *prob = (CLzmaProb)ttt; ++ if (p->range < kTopValue) ++ { ++ p->range <<= 8; ++ RangeEnc_ShiftLow(p); ++ } ++} ++ ++static void LitEnc_Encode(CRangeEnc *p, CLzmaProb *probs, UInt32 symbol) ++{ ++ symbol |= 0x100; ++ do ++ { ++ RangeEnc_EncodeBit(p, probs + (symbol >> 8), (symbol >> 7) & 1); ++ symbol <<= 1; ++ } ++ while (symbol < 0x10000); ++} ++ ++static void LitEnc_EncodeMatched(CRangeEnc *p, CLzmaProb *probs, UInt32 symbol, UInt32 matchByte) ++{ ++ UInt32 offs = 0x100; ++ symbol |= 0x100; ++ do ++ { ++ matchByte <<= 1; ++ RangeEnc_EncodeBit(p, probs + (offs + (matchByte & offs) + (symbol >> 8)), (symbol >> 7) & 1); ++ symbol <<= 1; ++ offs &= ~(matchByte ^ symbol); ++ } ++ while (symbol < 0x10000); ++} ++ ++void LzmaEnc_InitPriceTables(UInt32 *ProbPrices) ++{ ++ UInt32 i; ++ for (i = (1 << kNumMoveReducingBits) / 2; i < kBitModelTotal; i += (1 << kNumMoveReducingBits)) ++ { ++ const int kCyclesBits = kNumBitPriceShiftBits; ++ UInt32 w = i; ++ UInt32 bitCount = 0; ++ int j; ++ for (j = 0; j < kCyclesBits; j++) ++ { ++ w = w * w; ++ bitCount <<= 1; ++ while (w >= ((UInt32)1 << 16)) ++ { ++ w >>= 1; ++ bitCount++; ++ } ++ } ++ ProbPrices[i >> kNumMoveReducingBits] = ((kNumBitModelTotalBits << kCyclesBits) - 15 - bitCount); ++ } ++} ++ ++ ++#define GET_PRICE(prob, symbol) \ ++ p->ProbPrices[((prob) ^ (((-(int)(symbol))) & (kBitModelTotal - 1))) >> kNumMoveReducingBits]; ++ ++#define GET_PRICEa(prob, symbol) \ ++ ProbPrices[((prob) ^ ((-((int)(symbol))) & (kBitModelTotal - 1))) >> kNumMoveReducingBits]; ++ ++#define GET_PRICE_0(prob) p->ProbPrices[(prob) >> kNumMoveReducingBits] ++#define GET_PRICE_1(prob) p->ProbPrices[((prob) ^ (kBitModelTotal - 1)) >> kNumMoveReducingBits] ++ ++#define GET_PRICE_0a(prob) ProbPrices[(prob) >> kNumMoveReducingBits] ++#define GET_PRICE_1a(prob) ProbPrices[((prob) ^ (kBitModelTotal - 1)) >> kNumMoveReducingBits] ++ ++static UInt32 LitEnc_GetPrice(const CLzmaProb *probs, UInt32 symbol, UInt32 *ProbPrices) ++{ ++ UInt32 price = 0; ++ symbol |= 0x100; ++ do ++ { ++ price += GET_PRICEa(probs[symbol >> 8], (symbol >> 7) & 1); ++ symbol <<= 1; ++ } ++ while (symbol < 0x10000); ++ return price; ++} ++ ++static UInt32 LitEnc_GetPriceMatched(const CLzmaProb *probs, UInt32 symbol, UInt32 matchByte, UInt32 *ProbPrices) ++{ ++ UInt32 price = 0; ++ UInt32 offs = 0x100; ++ symbol |= 0x100; ++ do ++ { ++ matchByte <<= 1; ++ price += GET_PRICEa(probs[offs + (matchByte & offs) + (symbol >> 8)], (symbol >> 7) & 1); ++ symbol <<= 1; ++ offs &= ~(matchByte ^ symbol); ++ } ++ while (symbol < 0x10000); ++ return price; ++} ++ ++ ++static void RcTree_Encode(CRangeEnc *rc, CLzmaProb *probs, int numBitLevels, UInt32 symbol) ++{ ++ UInt32 m = 1; ++ int i; ++ for (i = numBitLevels; i != 0;) ++ { ++ UInt32 bit; ++ i--; ++ bit = (symbol >> i) & 1; ++ RangeEnc_EncodeBit(rc, probs + m, bit); ++ m = (m << 1) | bit; ++ } ++} ++ ++static void RcTree_ReverseEncode(CRangeEnc *rc, CLzmaProb *probs, int numBitLevels, UInt32 symbol) ++{ ++ UInt32 m = 1; ++ int i; ++ for (i = 0; i < numBitLevels; i++) ++ { ++ UInt32 bit = symbol & 1; ++ RangeEnc_EncodeBit(rc, probs + m, bit); ++ m = (m << 1) | bit; ++ symbol >>= 1; ++ } ++} ++ ++static UInt32 RcTree_GetPrice(const CLzmaProb *probs, int numBitLevels, UInt32 symbol, UInt32 *ProbPrices) ++{ ++ UInt32 price = 0; ++ symbol |= (1 << numBitLevels); ++ while (symbol != 1) ++ { ++ price += GET_PRICEa(probs[symbol >> 1], symbol & 1); ++ symbol >>= 1; ++ } ++ return price; ++} ++ ++static UInt32 RcTree_ReverseGetPrice(const CLzmaProb *probs, int numBitLevels, UInt32 symbol, UInt32 *ProbPrices) ++{ ++ UInt32 price = 0; ++ UInt32 m = 1; ++ int i; ++ for (i = numBitLevels; i != 0; i--) ++ { ++ UInt32 bit = symbol & 1; ++ symbol >>= 1; ++ price += GET_PRICEa(probs[m], bit); ++ m = (m << 1) | bit; ++ } ++ return price; ++} ++ ++ ++static void LenEnc_Init(CLenEnc *p) ++{ ++ unsigned i; ++ p->choice = p->choice2 = kProbInitValue; ++ for (i = 0; i < (LZMA_NUM_PB_STATES_MAX << kLenNumLowBits); i++) ++ p->low[i] = kProbInitValue; ++ for (i = 0; i < (LZMA_NUM_PB_STATES_MAX << kLenNumMidBits); i++) ++ p->mid[i] = kProbInitValue; ++ for (i = 0; i < kLenNumHighSymbols; i++) ++ p->high[i] = kProbInitValue; ++} ++ ++static void LenEnc_Encode(CLenEnc *p, CRangeEnc *rc, UInt32 symbol, UInt32 posState) ++{ ++ if (symbol < kLenNumLowSymbols) ++ { ++ RangeEnc_EncodeBit(rc, &p->choice, 0); ++ RcTree_Encode(rc, p->low + (posState << kLenNumLowBits), kLenNumLowBits, symbol); ++ } ++ else ++ { ++ RangeEnc_EncodeBit(rc, &p->choice, 1); ++ if (symbol < kLenNumLowSymbols + kLenNumMidSymbols) ++ { ++ RangeEnc_EncodeBit(rc, &p->choice2, 0); ++ RcTree_Encode(rc, p->mid + (posState << kLenNumMidBits), kLenNumMidBits, symbol - kLenNumLowSymbols); ++ } ++ else ++ { ++ RangeEnc_EncodeBit(rc, &p->choice2, 1); ++ RcTree_Encode(rc, p->high, kLenNumHighBits, symbol - kLenNumLowSymbols - kLenNumMidSymbols); ++ } ++ } ++} ++ ++static void LenEnc_SetPrices(CLenEnc *p, UInt32 posState, UInt32 numSymbols, UInt32 *prices, UInt32 *ProbPrices) ++{ ++ UInt32 a0 = GET_PRICE_0a(p->choice); ++ UInt32 a1 = GET_PRICE_1a(p->choice); ++ UInt32 b0 = a1 + GET_PRICE_0a(p->choice2); ++ UInt32 b1 = a1 + GET_PRICE_1a(p->choice2); ++ UInt32 i = 0; ++ for (i = 0; i < kLenNumLowSymbols; i++) ++ { ++ if (i >= numSymbols) ++ return; ++ prices[i] = a0 + RcTree_GetPrice(p->low + (posState << kLenNumLowBits), kLenNumLowBits, i, ProbPrices); ++ } ++ for (; i < kLenNumLowSymbols + kLenNumMidSymbols; i++) ++ { ++ if (i >= numSymbols) ++ return; ++ prices[i] = b0 + RcTree_GetPrice(p->mid + (posState << kLenNumMidBits), kLenNumMidBits, i - kLenNumLowSymbols, ProbPrices); ++ } ++ for (; i < numSymbols; i++) ++ prices[i] = b1 + RcTree_GetPrice(p->high, kLenNumHighBits, i - kLenNumLowSymbols - kLenNumMidSymbols, ProbPrices); ++} ++ ++static void MY_FAST_CALL LenPriceEnc_UpdateTable(CLenPriceEnc *p, UInt32 posState, UInt32 *ProbPrices) ++{ ++ LenEnc_SetPrices(&p->p, posState, p->tableSize, p->prices[posState], ProbPrices); ++ p->counters[posState] = p->tableSize; ++} ++ ++static void LenPriceEnc_UpdateTables(CLenPriceEnc *p, UInt32 numPosStates, UInt32 *ProbPrices) ++{ ++ UInt32 posState; ++ for (posState = 0; posState < numPosStates; posState++) ++ LenPriceEnc_UpdateTable(p, posState, ProbPrices); ++} ++ ++static void LenEnc_Encode2(CLenPriceEnc *p, CRangeEnc *rc, UInt32 symbol, UInt32 posState, Bool updatePrice, UInt32 *ProbPrices) ++{ ++ LenEnc_Encode(&p->p, rc, symbol, posState); ++ if (updatePrice) ++ if (--p->counters[posState] == 0) ++ LenPriceEnc_UpdateTable(p, posState, ProbPrices); ++} ++ ++ ++ ++ ++static void MovePos(CLzmaEnc *p, UInt32 num) ++{ ++ #ifdef SHOW_STAT ++ ttt += num; ++ printf("\n MovePos %d", num); ++ #endif ++ if (num != 0) ++ { ++ p->additionalOffset += num; ++ p->matchFinder.Skip(p->matchFinderObj, num); ++ } ++} ++ ++static UInt32 ReadMatchDistances(CLzmaEnc *p, UInt32 *numDistancePairsRes) ++{ ++ UInt32 lenRes = 0, numPairs; ++ p->numAvail = p->matchFinder.GetNumAvailableBytes(p->matchFinderObj); ++ numPairs = p->matchFinder.GetMatches(p->matchFinderObj, p->matches); ++ #ifdef SHOW_STAT ++ printf("\n i = %d numPairs = %d ", ttt, numPairs / 2); ++ ttt++; ++ { ++ UInt32 i; ++ for (i = 0; i < numPairs; i += 2) ++ printf("%2d %6d | ", p->matches[i], p->matches[i + 1]); ++ } ++ #endif ++ if (numPairs > 0) ++ { ++ lenRes = p->matches[numPairs - 2]; ++ if (lenRes == p->numFastBytes) ++ { ++ const Byte *pby = p->matchFinder.GetPointerToCurrentPos(p->matchFinderObj) - 1; ++ UInt32 distance = p->matches[numPairs - 1] + 1; ++ UInt32 numAvail = p->numAvail; ++ if (numAvail > LZMA_MATCH_LEN_MAX) ++ numAvail = LZMA_MATCH_LEN_MAX; ++ { ++ const Byte *pby2 = pby - distance; ++ for (; lenRes < numAvail && pby[lenRes] == pby2[lenRes]; lenRes++); ++ } ++ } ++ } ++ p->additionalOffset++; ++ *numDistancePairsRes = numPairs; ++ return lenRes; ++} ++ ++ ++#define MakeAsChar(p) (p)->backPrev = (UInt32)(-1); (p)->prev1IsChar = False; ++#define MakeAsShortRep(p) (p)->backPrev = 0; (p)->prev1IsChar = False; ++#define IsShortRep(p) ((p)->backPrev == 0) ++ ++static UInt32 GetRepLen1Price(CLzmaEnc *p, UInt32 state, UInt32 posState) ++{ ++ return ++ GET_PRICE_0(p->isRepG0[state]) + ++ GET_PRICE_0(p->isRep0Long[state][posState]); ++} ++ ++static UInt32 GetPureRepPrice(CLzmaEnc *p, UInt32 repIndex, UInt32 state, UInt32 posState) ++{ ++ UInt32 price; ++ if (repIndex == 0) ++ { ++ price = GET_PRICE_0(p->isRepG0[state]); ++ price += GET_PRICE_1(p->isRep0Long[state][posState]); ++ } ++ else ++ { ++ price = GET_PRICE_1(p->isRepG0[state]); ++ if (repIndex == 1) ++ price += GET_PRICE_0(p->isRepG1[state]); ++ else ++ { ++ price += GET_PRICE_1(p->isRepG1[state]); ++ price += GET_PRICE(p->isRepG2[state], repIndex - 2); ++ } ++ } ++ return price; ++} ++ ++static UInt32 GetRepPrice(CLzmaEnc *p, UInt32 repIndex, UInt32 len, UInt32 state, UInt32 posState) ++{ ++ return p->repLenEnc.prices[posState][len - LZMA_MATCH_LEN_MIN] + ++ GetPureRepPrice(p, repIndex, state, posState); ++} ++ ++static UInt32 Backward(CLzmaEnc *p, UInt32 *backRes, UInt32 cur) ++{ ++ UInt32 posMem = p->opt[cur].posPrev; ++ UInt32 backMem = p->opt[cur].backPrev; ++ p->optimumEndIndex = cur; ++ do ++ { ++ if (p->opt[cur].prev1IsChar) ++ { ++ MakeAsChar(&p->opt[posMem]) ++ p->opt[posMem].posPrev = posMem - 1; ++ if (p->opt[cur].prev2) ++ { ++ p->opt[posMem - 1].prev1IsChar = False; ++ p->opt[posMem - 1].posPrev = p->opt[cur].posPrev2; ++ p->opt[posMem - 1].backPrev = p->opt[cur].backPrev2; ++ } ++ } ++ { ++ UInt32 posPrev = posMem; ++ UInt32 backCur = backMem; ++ ++ backMem = p->opt[posPrev].backPrev; ++ posMem = p->opt[posPrev].posPrev; ++ ++ p->opt[posPrev].backPrev = backCur; ++ p->opt[posPrev].posPrev = cur; ++ cur = posPrev; ++ } ++ } ++ while (cur != 0); ++ *backRes = p->opt[0].backPrev; ++ p->optimumCurrentIndex = p->opt[0].posPrev; ++ return p->optimumCurrentIndex; ++} ++ ++#define LIT_PROBS(pos, prevByte) (p->litProbs + ((((pos) & p->lpMask) << p->lc) + ((prevByte) >> (8 - p->lc))) * 0x300) ++ ++static UInt32 GetOptimum(CLzmaEnc *p, UInt32 position, UInt32 *backRes) ++{ ++ UInt32 numAvail, mainLen, numPairs, repMaxIndex, i, posState, lenEnd, len, cur; ++ UInt32 matchPrice, repMatchPrice, normalMatchPrice; ++ UInt32 reps[LZMA_NUM_REPS], repLens[LZMA_NUM_REPS]; ++ UInt32 *matches; ++ const Byte *data; ++ Byte curByte, matchByte; ++ if (p->optimumEndIndex != p->optimumCurrentIndex) ++ { ++ const COptimal *opt = &p->opt[p->optimumCurrentIndex]; ++ UInt32 lenRes = opt->posPrev - p->optimumCurrentIndex; ++ *backRes = opt->backPrev; ++ p->optimumCurrentIndex = opt->posPrev; ++ return lenRes; ++ } ++ p->optimumCurrentIndex = p->optimumEndIndex = 0; ++ ++ if (p->additionalOffset == 0) ++ mainLen = ReadMatchDistances(p, &numPairs); ++ else ++ { ++ mainLen = p->longestMatchLength; ++ numPairs = p->numPairs; ++ } ++ ++ numAvail = p->numAvail; ++ if (numAvail < 2) ++ { ++ *backRes = (UInt32)(-1); ++ return 1; ++ } ++ if (numAvail > LZMA_MATCH_LEN_MAX) ++ numAvail = LZMA_MATCH_LEN_MAX; ++ ++ data = p->matchFinder.GetPointerToCurrentPos(p->matchFinderObj) - 1; ++ repMaxIndex = 0; ++ for (i = 0; i < LZMA_NUM_REPS; i++) ++ { ++ UInt32 lenTest; ++ const Byte *data2; ++ reps[i] = p->reps[i]; ++ data2 = data - (reps[i] + 1); ++ if (data[0] != data2[0] || data[1] != data2[1]) ++ { ++ repLens[i] = 0; ++ continue; ++ } ++ for (lenTest = 2; lenTest < numAvail && data[lenTest] == data2[lenTest]; lenTest++); ++ repLens[i] = lenTest; ++ if (lenTest > repLens[repMaxIndex]) ++ repMaxIndex = i; ++ } ++ if (repLens[repMaxIndex] >= p->numFastBytes) ++ { ++ UInt32 lenRes; ++ *backRes = repMaxIndex; ++ lenRes = repLens[repMaxIndex]; ++ MovePos(p, lenRes - 1); ++ return lenRes; ++ } ++ ++ matches = p->matches; ++ if (mainLen >= p->numFastBytes) ++ { ++ *backRes = matches[numPairs - 1] + LZMA_NUM_REPS; ++ MovePos(p, mainLen - 1); ++ return mainLen; ++ } ++ curByte = *data; ++ matchByte = *(data - (reps[0] + 1)); ++ ++ if (mainLen < 2 && curByte != matchByte && repLens[repMaxIndex] < 2) ++ { ++ *backRes = (UInt32)-1; ++ return 1; ++ } ++ ++ p->opt[0].state = (CState)p->state; ++ ++ posState = (position & p->pbMask); ++ ++ { ++ const CLzmaProb *probs = LIT_PROBS(position, *(data - 1)); ++ p->opt[1].price = GET_PRICE_0(p->isMatch[p->state][posState]) + ++ (!IsCharState(p->state) ? ++ LitEnc_GetPriceMatched(probs, curByte, matchByte, p->ProbPrices) : ++ LitEnc_GetPrice(probs, curByte, p->ProbPrices)); ++ } ++ ++ MakeAsChar(&p->opt[1]); ++ ++ matchPrice = GET_PRICE_1(p->isMatch[p->state][posState]); ++ repMatchPrice = matchPrice + GET_PRICE_1(p->isRep[p->state]); ++ ++ if (matchByte == curByte) ++ { ++ UInt32 shortRepPrice = repMatchPrice + GetRepLen1Price(p, p->state, posState); ++ if (shortRepPrice < p->opt[1].price) ++ { ++ p->opt[1].price = shortRepPrice; ++ MakeAsShortRep(&p->opt[1]); ++ } ++ } ++ lenEnd = ((mainLen >= repLens[repMaxIndex]) ? mainLen : repLens[repMaxIndex]); ++ ++ if (lenEnd < 2) ++ { ++ *backRes = p->opt[1].backPrev; ++ return 1; ++ } ++ ++ p->opt[1].posPrev = 0; ++ for (i = 0; i < LZMA_NUM_REPS; i++) ++ p->opt[0].backs[i] = reps[i]; ++ ++ len = lenEnd; ++ do ++ p->opt[len--].price = kInfinityPrice; ++ while (len >= 2); ++ ++ for (i = 0; i < LZMA_NUM_REPS; i++) ++ { ++ UInt32 repLen = repLens[i]; ++ UInt32 price; ++ if (repLen < 2) ++ continue; ++ price = repMatchPrice + GetPureRepPrice(p, i, p->state, posState); ++ do ++ { ++ UInt32 curAndLenPrice = price + p->repLenEnc.prices[posState][repLen - 2]; ++ COptimal *opt = &p->opt[repLen]; ++ if (curAndLenPrice < opt->price) ++ { ++ opt->price = curAndLenPrice; ++ opt->posPrev = 0; ++ opt->backPrev = i; ++ opt->prev1IsChar = False; ++ } ++ } ++ while (--repLen >= 2); ++ } ++ ++ normalMatchPrice = matchPrice + GET_PRICE_0(p->isRep[p->state]); ++ ++ len = ((repLens[0] >= 2) ? repLens[0] + 1 : 2); ++ if (len <= mainLen) ++ { ++ UInt32 offs = 0; ++ while (len > matches[offs]) ++ offs += 2; ++ for (; ; len++) ++ { ++ COptimal *opt; ++ UInt32 distance = matches[offs + 1]; ++ ++ UInt32 curAndLenPrice = normalMatchPrice + p->lenEnc.prices[posState][len - LZMA_MATCH_LEN_MIN]; ++ UInt32 lenToPosState = GetLenToPosState(len); ++ if (distance < kNumFullDistances) ++ curAndLenPrice += p->distancesPrices[lenToPosState][distance]; ++ else ++ { ++ UInt32 slot; ++ GetPosSlot2(distance, slot); ++ curAndLenPrice += p->alignPrices[distance & kAlignMask] + p->posSlotPrices[lenToPosState][slot]; ++ } ++ opt = &p->opt[len]; ++ if (curAndLenPrice < opt->price) ++ { ++ opt->price = curAndLenPrice; ++ opt->posPrev = 0; ++ opt->backPrev = distance + LZMA_NUM_REPS; ++ opt->prev1IsChar = False; ++ } ++ if (len == matches[offs]) ++ { ++ offs += 2; ++ if (offs == numPairs) ++ break; ++ } ++ } ++ } ++ ++ cur = 0; ++ ++ #ifdef SHOW_STAT2 ++ if (position >= 0) ++ { ++ unsigned i; ++ printf("\n pos = %4X", position); ++ for (i = cur; i <= lenEnd; i++) ++ printf("\nprice[%4X] = %d", position - cur + i, p->opt[i].price); ++ } ++ #endif ++ ++ for (;;) ++ { ++ UInt32 numAvailFull, newLen, numPairs, posPrev, state, posState, startLen; ++ UInt32 curPrice, curAnd1Price, matchPrice, repMatchPrice; ++ Bool nextIsChar; ++ Byte curByte, matchByte; ++ const Byte *data; ++ COptimal *curOpt; ++ COptimal *nextOpt; ++ ++ cur++; ++ if (cur == lenEnd) ++ return Backward(p, backRes, cur); ++ ++ newLen = ReadMatchDistances(p, &numPairs); ++ if (newLen >= p->numFastBytes) ++ { ++ p->numPairs = numPairs; ++ p->longestMatchLength = newLen; ++ return Backward(p, backRes, cur); ++ } ++ position++; ++ curOpt = &p->opt[cur]; ++ posPrev = curOpt->posPrev; ++ if (curOpt->prev1IsChar) ++ { ++ posPrev--; ++ if (curOpt->prev2) ++ { ++ state = p->opt[curOpt->posPrev2].state; ++ if (curOpt->backPrev2 < LZMA_NUM_REPS) ++ state = kRepNextStates[state]; ++ else ++ state = kMatchNextStates[state]; ++ } ++ else ++ state = p->opt[posPrev].state; ++ state = kLiteralNextStates[state]; ++ } ++ else ++ state = p->opt[posPrev].state; ++ if (posPrev == cur - 1) ++ { ++ if (IsShortRep(curOpt)) ++ state = kShortRepNextStates[state]; ++ else ++ state = kLiteralNextStates[state]; ++ } ++ else ++ { ++ UInt32 pos; ++ const COptimal *prevOpt; ++ if (curOpt->prev1IsChar && curOpt->prev2) ++ { ++ posPrev = curOpt->posPrev2; ++ pos = curOpt->backPrev2; ++ state = kRepNextStates[state]; ++ } ++ else ++ { ++ pos = curOpt->backPrev; ++ if (pos < LZMA_NUM_REPS) ++ state = kRepNextStates[state]; ++ else ++ state = kMatchNextStates[state]; ++ } ++ prevOpt = &p->opt[posPrev]; ++ if (pos < LZMA_NUM_REPS) ++ { ++ UInt32 i; ++ reps[0] = prevOpt->backs[pos]; ++ for (i = 1; i <= pos; i++) ++ reps[i] = prevOpt->backs[i - 1]; ++ for (; i < LZMA_NUM_REPS; i++) ++ reps[i] = prevOpt->backs[i]; ++ } ++ else ++ { ++ UInt32 i; ++ reps[0] = (pos - LZMA_NUM_REPS); ++ for (i = 1; i < LZMA_NUM_REPS; i++) ++ reps[i] = prevOpt->backs[i - 1]; ++ } ++ } ++ curOpt->state = (CState)state; ++ ++ curOpt->backs[0] = reps[0]; ++ curOpt->backs[1] = reps[1]; ++ curOpt->backs[2] = reps[2]; ++ curOpt->backs[3] = reps[3]; ++ ++ curPrice = curOpt->price; ++ nextIsChar = False; ++ data = p->matchFinder.GetPointerToCurrentPos(p->matchFinderObj) - 1; ++ curByte = *data; ++ matchByte = *(data - (reps[0] + 1)); ++ ++ posState = (position & p->pbMask); ++ ++ curAnd1Price = curPrice + GET_PRICE_0(p->isMatch[state][posState]); ++ { ++ const CLzmaProb *probs = LIT_PROBS(position, *(data - 1)); ++ curAnd1Price += ++ (!IsCharState(state) ? ++ LitEnc_GetPriceMatched(probs, curByte, matchByte, p->ProbPrices) : ++ LitEnc_GetPrice(probs, curByte, p->ProbPrices)); ++ } ++ ++ nextOpt = &p->opt[cur + 1]; ++ ++ if (curAnd1Price < nextOpt->price) ++ { ++ nextOpt->price = curAnd1Price; ++ nextOpt->posPrev = cur; ++ MakeAsChar(nextOpt); ++ nextIsChar = True; ++ } ++ ++ matchPrice = curPrice + GET_PRICE_1(p->isMatch[state][posState]); ++ repMatchPrice = matchPrice + GET_PRICE_1(p->isRep[state]); ++ ++ if (matchByte == curByte && !(nextOpt->posPrev < cur && nextOpt->backPrev == 0)) ++ { ++ UInt32 shortRepPrice = repMatchPrice + GetRepLen1Price(p, state, posState); ++ if (shortRepPrice <= nextOpt->price) ++ { ++ nextOpt->price = shortRepPrice; ++ nextOpt->posPrev = cur; ++ MakeAsShortRep(nextOpt); ++ nextIsChar = True; ++ } ++ } ++ numAvailFull = p->numAvail; ++ { ++ UInt32 temp = kNumOpts - 1 - cur; ++ if (temp < numAvailFull) ++ numAvailFull = temp; ++ } ++ ++ if (numAvailFull < 2) ++ continue; ++ numAvail = (numAvailFull <= p->numFastBytes ? numAvailFull : p->numFastBytes); ++ ++ if (!nextIsChar && matchByte != curByte) /* speed optimization */ ++ { ++ /* try Literal + rep0 */ ++ UInt32 temp; ++ UInt32 lenTest2; ++ const Byte *data2 = data - (reps[0] + 1); ++ UInt32 limit = p->numFastBytes + 1; ++ if (limit > numAvailFull) ++ limit = numAvailFull; ++ ++ for (temp = 1; temp < limit && data[temp] == data2[temp]; temp++); ++ lenTest2 = temp - 1; ++ if (lenTest2 >= 2) ++ { ++ UInt32 state2 = kLiteralNextStates[state]; ++ UInt32 posStateNext = (position + 1) & p->pbMask; ++ UInt32 nextRepMatchPrice = curAnd1Price + ++ GET_PRICE_1(p->isMatch[state2][posStateNext]) + ++ GET_PRICE_1(p->isRep[state2]); ++ /* for (; lenTest2 >= 2; lenTest2--) */ ++ { ++ UInt32 curAndLenPrice; ++ COptimal *opt; ++ UInt32 offset = cur + 1 + lenTest2; ++ while (lenEnd < offset) ++ p->opt[++lenEnd].price = kInfinityPrice; ++ curAndLenPrice = nextRepMatchPrice + GetRepPrice(p, 0, lenTest2, state2, posStateNext); ++ opt = &p->opt[offset]; ++ if (curAndLenPrice < opt->price) ++ { ++ opt->price = curAndLenPrice; ++ opt->posPrev = cur + 1; ++ opt->backPrev = 0; ++ opt->prev1IsChar = True; ++ opt->prev2 = False; ++ } ++ } ++ } ++ } ++ ++ startLen = 2; /* speed optimization */ ++ { ++ UInt32 repIndex; ++ for (repIndex = 0; repIndex < LZMA_NUM_REPS; repIndex++) ++ { ++ UInt32 lenTest; ++ UInt32 lenTestTemp; ++ UInt32 price; ++ const Byte *data2 = data - (reps[repIndex] + 1); ++ if (data[0] != data2[0] || data[1] != data2[1]) ++ continue; ++ for (lenTest = 2; lenTest < numAvail && data[lenTest] == data2[lenTest]; lenTest++); ++ while (lenEnd < cur + lenTest) ++ p->opt[++lenEnd].price = kInfinityPrice; ++ lenTestTemp = lenTest; ++ price = repMatchPrice + GetPureRepPrice(p, repIndex, state, posState); ++ do ++ { ++ UInt32 curAndLenPrice = price + p->repLenEnc.prices[posState][lenTest - 2]; ++ COptimal *opt = &p->opt[cur + lenTest]; ++ if (curAndLenPrice < opt->price) ++ { ++ opt->price = curAndLenPrice; ++ opt->posPrev = cur; ++ opt->backPrev = repIndex; ++ opt->prev1IsChar = False; ++ } ++ } ++ while (--lenTest >= 2); ++ lenTest = lenTestTemp; ++ ++ if (repIndex == 0) ++ startLen = lenTest + 1; ++ ++ /* if (_maxMode) */ ++ { ++ UInt32 lenTest2 = lenTest + 1; ++ UInt32 limit = lenTest2 + p->numFastBytes; ++ UInt32 nextRepMatchPrice; ++ if (limit > numAvailFull) ++ limit = numAvailFull; ++ for (; lenTest2 < limit && data[lenTest2] == data2[lenTest2]; lenTest2++); ++ lenTest2 -= lenTest + 1; ++ if (lenTest2 >= 2) ++ { ++ UInt32 state2 = kRepNextStates[state]; ++ UInt32 posStateNext = (position + lenTest) & p->pbMask; ++ UInt32 curAndLenCharPrice = ++ price + p->repLenEnc.prices[posState][lenTest - 2] + ++ GET_PRICE_0(p->isMatch[state2][posStateNext]) + ++ LitEnc_GetPriceMatched(LIT_PROBS(position + lenTest, data[lenTest - 1]), ++ data[lenTest], data2[lenTest], p->ProbPrices); ++ state2 = kLiteralNextStates[state2]; ++ posStateNext = (position + lenTest + 1) & p->pbMask; ++ nextRepMatchPrice = curAndLenCharPrice + ++ GET_PRICE_1(p->isMatch[state2][posStateNext]) + ++ GET_PRICE_1(p->isRep[state2]); ++ ++ /* for (; lenTest2 >= 2; lenTest2--) */ ++ { ++ UInt32 curAndLenPrice; ++ COptimal *opt; ++ UInt32 offset = cur + lenTest + 1 + lenTest2; ++ while (lenEnd < offset) ++ p->opt[++lenEnd].price = kInfinityPrice; ++ curAndLenPrice = nextRepMatchPrice + GetRepPrice(p, 0, lenTest2, state2, posStateNext); ++ opt = &p->opt[offset]; ++ if (curAndLenPrice < opt->price) ++ { ++ opt->price = curAndLenPrice; ++ opt->posPrev = cur + lenTest + 1; ++ opt->backPrev = 0; ++ opt->prev1IsChar = True; ++ opt->prev2 = True; ++ opt->posPrev2 = cur; ++ opt->backPrev2 = repIndex; ++ } ++ } ++ } ++ } ++ } ++ } ++ /* for (UInt32 lenTest = 2; lenTest <= newLen; lenTest++) */ ++ if (newLen > numAvail) ++ { ++ newLen = numAvail; ++ for (numPairs = 0; newLen > matches[numPairs]; numPairs += 2); ++ matches[numPairs] = newLen; ++ numPairs += 2; ++ } ++ if (newLen >= startLen) ++ { ++ UInt32 normalMatchPrice = matchPrice + GET_PRICE_0(p->isRep[state]); ++ UInt32 offs, curBack, posSlot; ++ UInt32 lenTest; ++ while (lenEnd < cur + newLen) ++ p->opt[++lenEnd].price = kInfinityPrice; ++ ++ offs = 0; ++ while (startLen > matches[offs]) ++ offs += 2; ++ curBack = matches[offs + 1]; ++ GetPosSlot2(curBack, posSlot); ++ for (lenTest = /*2*/ startLen; ; lenTest++) ++ { ++ UInt32 curAndLenPrice = normalMatchPrice + p->lenEnc.prices[posState][lenTest - LZMA_MATCH_LEN_MIN]; ++ UInt32 lenToPosState = GetLenToPosState(lenTest); ++ COptimal *opt; ++ if (curBack < kNumFullDistances) ++ curAndLenPrice += p->distancesPrices[lenToPosState][curBack]; ++ else ++ curAndLenPrice += p->posSlotPrices[lenToPosState][posSlot] + p->alignPrices[curBack & kAlignMask]; ++ ++ opt = &p->opt[cur + lenTest]; ++ if (curAndLenPrice < opt->price) ++ { ++ opt->price = curAndLenPrice; ++ opt->posPrev = cur; ++ opt->backPrev = curBack + LZMA_NUM_REPS; ++ opt->prev1IsChar = False; ++ } ++ ++ if (/*_maxMode && */lenTest == matches[offs]) ++ { ++ /* Try Match + Literal + Rep0 */ ++ const Byte *data2 = data - (curBack + 1); ++ UInt32 lenTest2 = lenTest + 1; ++ UInt32 limit = lenTest2 + p->numFastBytes; ++ UInt32 nextRepMatchPrice; ++ if (limit > numAvailFull) ++ limit = numAvailFull; ++ for (; lenTest2 < limit && data[lenTest2] == data2[lenTest2]; lenTest2++); ++ lenTest2 -= lenTest + 1; ++ if (lenTest2 >= 2) ++ { ++ UInt32 state2 = kMatchNextStates[state]; ++ UInt32 posStateNext = (position + lenTest) & p->pbMask; ++ UInt32 curAndLenCharPrice = curAndLenPrice + ++ GET_PRICE_0(p->isMatch[state2][posStateNext]) + ++ LitEnc_GetPriceMatched(LIT_PROBS(position + lenTest, data[lenTest - 1]), ++ data[lenTest], data2[lenTest], p->ProbPrices); ++ state2 = kLiteralNextStates[state2]; ++ posStateNext = (posStateNext + 1) & p->pbMask; ++ nextRepMatchPrice = curAndLenCharPrice + ++ GET_PRICE_1(p->isMatch[state2][posStateNext]) + ++ GET_PRICE_1(p->isRep[state2]); ++ ++ /* for (; lenTest2 >= 2; lenTest2--) */ ++ { ++ UInt32 offset = cur + lenTest + 1 + lenTest2; ++ UInt32 curAndLenPrice; ++ COptimal *opt; ++ while (lenEnd < offset) ++ p->opt[++lenEnd].price = kInfinityPrice; ++ curAndLenPrice = nextRepMatchPrice + GetRepPrice(p, 0, lenTest2, state2, posStateNext); ++ opt = &p->opt[offset]; ++ if (curAndLenPrice < opt->price) ++ { ++ opt->price = curAndLenPrice; ++ opt->posPrev = cur + lenTest + 1; ++ opt->backPrev = 0; ++ opt->prev1IsChar = True; ++ opt->prev2 = True; ++ opt->posPrev2 = cur; ++ opt->backPrev2 = curBack + LZMA_NUM_REPS; ++ } ++ } ++ } ++ offs += 2; ++ if (offs == numPairs) ++ break; ++ curBack = matches[offs + 1]; ++ if (curBack >= kNumFullDistances) ++ GetPosSlot2(curBack, posSlot); ++ } ++ } ++ } ++ } ++} ++ ++#define ChangePair(smallDist, bigDist) (((bigDist) >> 7) > (smallDist)) ++ ++static UInt32 GetOptimumFast(CLzmaEnc *p, UInt32 *backRes) ++{ ++ UInt32 numAvail, mainLen, mainDist, numPairs, repIndex, repLen, i; ++ const Byte *data; ++ const UInt32 *matches; ++ ++ if (p->additionalOffset == 0) ++ mainLen = ReadMatchDistances(p, &numPairs); ++ else ++ { ++ mainLen = p->longestMatchLength; ++ numPairs = p->numPairs; ++ } ++ ++ numAvail = p->numAvail; ++ *backRes = (UInt32)-1; ++ if (numAvail < 2) ++ return 1; ++ if (numAvail > LZMA_MATCH_LEN_MAX) ++ numAvail = LZMA_MATCH_LEN_MAX; ++ data = p->matchFinder.GetPointerToCurrentPos(p->matchFinderObj) - 1; ++ ++ repLen = repIndex = 0; ++ for (i = 0; i < LZMA_NUM_REPS; i++) ++ { ++ UInt32 len; ++ const Byte *data2 = data - (p->reps[i] + 1); ++ if (data[0] != data2[0] || data[1] != data2[1]) ++ continue; ++ for (len = 2; len < numAvail && data[len] == data2[len]; len++); ++ if (len >= p->numFastBytes) ++ { ++ *backRes = i; ++ MovePos(p, len - 1); ++ return len; ++ } ++ if (len > repLen) ++ { ++ repIndex = i; ++ repLen = len; ++ } ++ } ++ ++ matches = p->matches; ++ if (mainLen >= p->numFastBytes) ++ { ++ *backRes = matches[numPairs - 1] + LZMA_NUM_REPS; ++ MovePos(p, mainLen - 1); ++ return mainLen; ++ } ++ ++ mainDist = 0; /* for GCC */ ++ if (mainLen >= 2) ++ { ++ mainDist = matches[numPairs - 1]; ++ while (numPairs > 2 && mainLen == matches[numPairs - 4] + 1) ++ { ++ if (!ChangePair(matches[numPairs - 3], mainDist)) ++ break; ++ numPairs -= 2; ++ mainLen = matches[numPairs - 2]; ++ mainDist = matches[numPairs - 1]; ++ } ++ if (mainLen == 2 && mainDist >= 0x80) ++ mainLen = 1; ++ } ++ ++ if (repLen >= 2 && ( ++ (repLen + 1 >= mainLen) || ++ (repLen + 2 >= mainLen && mainDist >= (1 << 9)) || ++ (repLen + 3 >= mainLen && mainDist >= (1 << 15)))) ++ { ++ *backRes = repIndex; ++ MovePos(p, repLen - 1); ++ return repLen; ++ } ++ ++ if (mainLen < 2 || numAvail <= 2) ++ return 1; ++ ++ p->longestMatchLength = ReadMatchDistances(p, &p->numPairs); ++ if (p->longestMatchLength >= 2) ++ { ++ UInt32 newDistance = matches[p->numPairs - 1]; ++ if ((p->longestMatchLength >= mainLen && newDistance < mainDist) || ++ (p->longestMatchLength == mainLen + 1 && !ChangePair(mainDist, newDistance)) || ++ (p->longestMatchLength > mainLen + 1) || ++ (p->longestMatchLength + 1 >= mainLen && mainLen >= 3 && ChangePair(newDistance, mainDist))) ++ return 1; ++ } ++ ++ data = p->matchFinder.GetPointerToCurrentPos(p->matchFinderObj) - 1; ++ for (i = 0; i < LZMA_NUM_REPS; i++) ++ { ++ UInt32 len, limit; ++ const Byte *data2 = data - (p->reps[i] + 1); ++ if (data[0] != data2[0] || data[1] != data2[1]) ++ continue; ++ limit = mainLen - 1; ++ for (len = 2; len < limit && data[len] == data2[len]; len++); ++ if (len >= limit) ++ return 1; ++ } ++ *backRes = mainDist + LZMA_NUM_REPS; ++ MovePos(p, mainLen - 2); ++ return mainLen; ++} ++ ++static void WriteEndMarker(CLzmaEnc *p, UInt32 posState) ++{ ++ UInt32 len; ++ RangeEnc_EncodeBit(&p->rc, &p->isMatch[p->state][posState], 1); ++ RangeEnc_EncodeBit(&p->rc, &p->isRep[p->state], 0); ++ p->state = kMatchNextStates[p->state]; ++ len = LZMA_MATCH_LEN_MIN; ++ LenEnc_Encode2(&p->lenEnc, &p->rc, len - LZMA_MATCH_LEN_MIN, posState, !p->fastMode, p->ProbPrices); ++ RcTree_Encode(&p->rc, p->posSlotEncoder[GetLenToPosState(len)], kNumPosSlotBits, (1 << kNumPosSlotBits) - 1); ++ RangeEnc_EncodeDirectBits(&p->rc, (((UInt32)1 << 30) - 1) >> kNumAlignBits, 30 - kNumAlignBits); ++ RcTree_ReverseEncode(&p->rc, p->posAlignEncoder, kNumAlignBits, kAlignMask); ++} ++ ++static SRes CheckErrors(CLzmaEnc *p) ++{ ++ if (p->result != SZ_OK) ++ return p->result; ++ if (p->rc.res != SZ_OK) ++ p->result = SZ_ERROR_WRITE; ++ if (p->matchFinderBase.result != SZ_OK) ++ p->result = SZ_ERROR_READ; ++ if (p->result != SZ_OK) ++ p->finished = True; ++ return p->result; ++} ++ ++static SRes Flush(CLzmaEnc *p, UInt32 nowPos) ++{ ++ /* ReleaseMFStream(); */ ++ p->finished = True; ++ if (p->writeEndMark) ++ WriteEndMarker(p, nowPos & p->pbMask); ++ RangeEnc_FlushData(&p->rc); ++ RangeEnc_FlushStream(&p->rc); ++ return CheckErrors(p); ++} ++ ++static void FillAlignPrices(CLzmaEnc *p) ++{ ++ UInt32 i; ++ for (i = 0; i < kAlignTableSize; i++) ++ p->alignPrices[i] = RcTree_ReverseGetPrice(p->posAlignEncoder, kNumAlignBits, i, p->ProbPrices); ++ p->alignPriceCount = 0; ++} ++ ++static void FillDistancesPrices(CLzmaEnc *p) ++{ ++ UInt32 tempPrices[kNumFullDistances]; ++ UInt32 i, lenToPosState; ++ for (i = kStartPosModelIndex; i < kNumFullDistances; i++) ++ { ++ UInt32 posSlot = GetPosSlot1(i); ++ UInt32 footerBits = ((posSlot >> 1) - 1); ++ UInt32 base = ((2 | (posSlot & 1)) << footerBits); ++ tempPrices[i] = RcTree_ReverseGetPrice(p->posEncoders + base - posSlot - 1, footerBits, i - base, p->ProbPrices); ++ } ++ ++ for (lenToPosState = 0; lenToPosState < kNumLenToPosStates; lenToPosState++) ++ { ++ UInt32 posSlot; ++ const CLzmaProb *encoder = p->posSlotEncoder[lenToPosState]; ++ UInt32 *posSlotPrices = p->posSlotPrices[lenToPosState]; ++ for (posSlot = 0; posSlot < p->distTableSize; posSlot++) ++ posSlotPrices[posSlot] = RcTree_GetPrice(encoder, kNumPosSlotBits, posSlot, p->ProbPrices); ++ for (posSlot = kEndPosModelIndex; posSlot < p->distTableSize; posSlot++) ++ posSlotPrices[posSlot] += ((((posSlot >> 1) - 1) - kNumAlignBits) << kNumBitPriceShiftBits); ++ ++ { ++ UInt32 *distancesPrices = p->distancesPrices[lenToPosState]; ++ UInt32 i; ++ for (i = 0; i < kStartPosModelIndex; i++) ++ distancesPrices[i] = posSlotPrices[i]; ++ for (; i < kNumFullDistances; i++) ++ distancesPrices[i] = posSlotPrices[GetPosSlot1(i)] + tempPrices[i]; ++ } ++ } ++ p->matchPriceCount = 0; ++} ++ ++void LzmaEnc_Construct(CLzmaEnc *p) ++{ ++ RangeEnc_Construct(&p->rc); ++ MatchFinder_Construct(&p->matchFinderBase); ++ #ifndef _7ZIP_ST ++ MatchFinderMt_Construct(&p->matchFinderMt); ++ p->matchFinderMt.MatchFinder = &p->matchFinderBase; ++ #endif ++ ++ { ++ CLzmaEncProps props; ++ LzmaEncProps_Init(&props); ++ LzmaEnc_SetProps(p, &props); ++ } ++ ++ #ifndef LZMA_LOG_BSR ++ LzmaEnc_FastPosInit(p->g_FastPos); ++ #endif ++ ++ LzmaEnc_InitPriceTables(p->ProbPrices); ++ p->litProbs = 0; ++ p->saveState.litProbs = 0; ++} ++ ++CLzmaEncHandle LzmaEnc_Create(ISzAlloc *alloc) ++{ ++ void *p; ++ p = alloc->Alloc(alloc, sizeof(CLzmaEnc)); ++ if (p != 0) ++ LzmaEnc_Construct((CLzmaEnc *)p); ++ return p; ++} ++ ++void LzmaEnc_FreeLits(CLzmaEnc *p, ISzAlloc *alloc) ++{ ++ alloc->Free(alloc, p->litProbs); ++ alloc->Free(alloc, p->saveState.litProbs); ++ p->litProbs = 0; ++ p->saveState.litProbs = 0; ++} ++ ++void LzmaEnc_Destruct(CLzmaEnc *p, ISzAlloc *alloc, ISzAlloc *allocBig) ++{ ++ #ifndef _7ZIP_ST ++ MatchFinderMt_Destruct(&p->matchFinderMt, allocBig); ++ #endif ++ MatchFinder_Free(&p->matchFinderBase, allocBig); ++ LzmaEnc_FreeLits(p, alloc); ++ RangeEnc_Free(&p->rc, alloc); ++} ++ ++void LzmaEnc_Destroy(CLzmaEncHandle p, ISzAlloc *alloc, ISzAlloc *allocBig) ++{ ++ LzmaEnc_Destruct((CLzmaEnc *)p, alloc, allocBig); ++ alloc->Free(alloc, p); ++} ++ ++static SRes LzmaEnc_CodeOneBlock(CLzmaEnc *p, Bool useLimits, UInt32 maxPackSize, UInt32 maxUnpackSize) ++{ ++ UInt32 nowPos32, startPos32; ++ if (p->needInit) ++ { ++ p->matchFinder.Init(p->matchFinderObj); ++ p->needInit = 0; ++ } ++ ++ if (p->finished) ++ return p->result; ++ RINOK(CheckErrors(p)); ++ ++ nowPos32 = (UInt32)p->nowPos64; ++ startPos32 = nowPos32; ++ ++ if (p->nowPos64 == 0) ++ { ++ UInt32 numPairs; ++ Byte curByte; ++ if (p->matchFinder.GetNumAvailableBytes(p->matchFinderObj) == 0) ++ return Flush(p, nowPos32); ++ ReadMatchDistances(p, &numPairs); ++ RangeEnc_EncodeBit(&p->rc, &p->isMatch[p->state][0], 0); ++ p->state = kLiteralNextStates[p->state]; ++ curByte = p->matchFinder.GetIndexByte(p->matchFinderObj, 0 - p->additionalOffset); ++ LitEnc_Encode(&p->rc, p->litProbs, curByte); ++ p->additionalOffset--; ++ nowPos32++; ++ } ++ ++ if (p->matchFinder.GetNumAvailableBytes(p->matchFinderObj) != 0) ++ for (;;) ++ { ++ UInt32 pos, len, posState; ++ ++ if (p->fastMode) ++ len = GetOptimumFast(p, &pos); ++ else ++ len = GetOptimum(p, nowPos32, &pos); ++ ++ #ifdef SHOW_STAT2 ++ printf("\n pos = %4X, len = %d pos = %d", nowPos32, len, pos); ++ #endif ++ ++ posState = nowPos32 & p->pbMask; ++ if (len == 1 && pos == (UInt32)-1) ++ { ++ Byte curByte; ++ CLzmaProb *probs; ++ const Byte *data; ++ ++ RangeEnc_EncodeBit(&p->rc, &p->isMatch[p->state][posState], 0); ++ data = p->matchFinder.GetPointerToCurrentPos(p->matchFinderObj) - p->additionalOffset; ++ curByte = *data; ++ probs = LIT_PROBS(nowPos32, *(data - 1)); ++ if (IsCharState(p->state)) ++ LitEnc_Encode(&p->rc, probs, curByte); ++ else ++ LitEnc_EncodeMatched(&p->rc, probs, curByte, *(data - p->reps[0] - 1)); ++ p->state = kLiteralNextStates[p->state]; ++ } ++ else ++ { ++ RangeEnc_EncodeBit(&p->rc, &p->isMatch[p->state][posState], 1); ++ if (pos < LZMA_NUM_REPS) ++ { ++ RangeEnc_EncodeBit(&p->rc, &p->isRep[p->state], 1); ++ if (pos == 0) ++ { ++ RangeEnc_EncodeBit(&p->rc, &p->isRepG0[p->state], 0); ++ RangeEnc_EncodeBit(&p->rc, &p->isRep0Long[p->state][posState], ((len == 1) ? 0 : 1)); ++ } ++ else ++ { ++ UInt32 distance = p->reps[pos]; ++ RangeEnc_EncodeBit(&p->rc, &p->isRepG0[p->state], 1); ++ if (pos == 1) ++ RangeEnc_EncodeBit(&p->rc, &p->isRepG1[p->state], 0); ++ else ++ { ++ RangeEnc_EncodeBit(&p->rc, &p->isRepG1[p->state], 1); ++ RangeEnc_EncodeBit(&p->rc, &p->isRepG2[p->state], pos - 2); ++ if (pos == 3) ++ p->reps[3] = p->reps[2]; ++ p->reps[2] = p->reps[1]; ++ } ++ p->reps[1] = p->reps[0]; ++ p->reps[0] = distance; ++ } ++ if (len == 1) ++ p->state = kShortRepNextStates[p->state]; ++ else ++ { ++ LenEnc_Encode2(&p->repLenEnc, &p->rc, len - LZMA_MATCH_LEN_MIN, posState, !p->fastMode, p->ProbPrices); ++ p->state = kRepNextStates[p->state]; ++ } ++ } ++ else ++ { ++ UInt32 posSlot; ++ RangeEnc_EncodeBit(&p->rc, &p->isRep[p->state], 0); ++ p->state = kMatchNextStates[p->state]; ++ LenEnc_Encode2(&p->lenEnc, &p->rc, len - LZMA_MATCH_LEN_MIN, posState, !p->fastMode, p->ProbPrices); ++ pos -= LZMA_NUM_REPS; ++ GetPosSlot(pos, posSlot); ++ RcTree_Encode(&p->rc, p->posSlotEncoder[GetLenToPosState(len)], kNumPosSlotBits, posSlot); ++ ++ if (posSlot >= kStartPosModelIndex) ++ { ++ UInt32 footerBits = ((posSlot >> 1) - 1); ++ UInt32 base = ((2 | (posSlot & 1)) << footerBits); ++ UInt32 posReduced = pos - base; ++ ++ if (posSlot < kEndPosModelIndex) ++ RcTree_ReverseEncode(&p->rc, p->posEncoders + base - posSlot - 1, footerBits, posReduced); ++ else ++ { ++ RangeEnc_EncodeDirectBits(&p->rc, posReduced >> kNumAlignBits, footerBits - kNumAlignBits); ++ RcTree_ReverseEncode(&p->rc, p->posAlignEncoder, kNumAlignBits, posReduced & kAlignMask); ++ p->alignPriceCount++; ++ } ++ } ++ p->reps[3] = p->reps[2]; ++ p->reps[2] = p->reps[1]; ++ p->reps[1] = p->reps[0]; ++ p->reps[0] = pos; ++ p->matchPriceCount++; ++ } ++ } ++ p->additionalOffset -= len; ++ nowPos32 += len; ++ if (p->additionalOffset == 0) ++ { ++ UInt32 processed; ++ if (!p->fastMode) ++ { ++ if (p->matchPriceCount >= (1 << 7)) ++ FillDistancesPrices(p); ++ if (p->alignPriceCount >= kAlignTableSize) ++ FillAlignPrices(p); ++ } ++ if (p->matchFinder.GetNumAvailableBytes(p->matchFinderObj) == 0) ++ break; ++ processed = nowPos32 - startPos32; ++ if (useLimits) ++ { ++ if (processed + kNumOpts + 300 >= maxUnpackSize || ++ RangeEnc_GetProcessed(&p->rc) + kNumOpts * 2 >= maxPackSize) ++ break; ++ } ++ else if (processed >= (1 << 15)) ++ { ++ p->nowPos64 += nowPos32 - startPos32; ++ return CheckErrors(p); ++ } ++ } ++ } ++ p->nowPos64 += nowPos32 - startPos32; ++ return Flush(p, nowPos32); ++} ++ ++#define kBigHashDicLimit ((UInt32)1 << 24) ++ ++static SRes LzmaEnc_Alloc(CLzmaEnc *p, UInt32 keepWindowSize, ISzAlloc *alloc, ISzAlloc *allocBig) ++{ ++ UInt32 beforeSize = kNumOpts; ++ Bool btMode; ++ if (!RangeEnc_Alloc(&p->rc, alloc)) ++ return SZ_ERROR_MEM; ++ btMode = (p->matchFinderBase.btMode != 0); ++ #ifndef _7ZIP_ST ++ p->mtMode = (p->multiThread && !p->fastMode && btMode); ++ #endif ++ ++ { ++ unsigned lclp = p->lc + p->lp; ++ if (p->litProbs == 0 || p->saveState.litProbs == 0 || p->lclp != lclp) ++ { ++ LzmaEnc_FreeLits(p, alloc); ++ p->litProbs = (CLzmaProb *)alloc->Alloc(alloc, (0x300 << lclp) * sizeof(CLzmaProb)); ++ p->saveState.litProbs = (CLzmaProb *)alloc->Alloc(alloc, (0x300 << lclp) * sizeof(CLzmaProb)); ++ if (p->litProbs == 0 || p->saveState.litProbs == 0) ++ { ++ LzmaEnc_FreeLits(p, alloc); ++ return SZ_ERROR_MEM; ++ } ++ p->lclp = lclp; ++ } ++ } ++ ++ p->matchFinderBase.bigHash = (p->dictSize > kBigHashDicLimit); ++ ++ if (beforeSize + p->dictSize < keepWindowSize) ++ beforeSize = keepWindowSize - p->dictSize; ++ ++ #ifndef _7ZIP_ST ++ if (p->mtMode) ++ { ++ RINOK(MatchFinderMt_Create(&p->matchFinderMt, p->dictSize, beforeSize, p->numFastBytes, LZMA_MATCH_LEN_MAX, allocBig)); ++ p->matchFinderObj = &p->matchFinderMt; ++ MatchFinderMt_CreateVTable(&p->matchFinderMt, &p->matchFinder); ++ } ++ else ++ #endif ++ { ++ if (!MatchFinder_Create(&p->matchFinderBase, p->dictSize, beforeSize, p->numFastBytes, LZMA_MATCH_LEN_MAX, allocBig)) ++ return SZ_ERROR_MEM; ++ p->matchFinderObj = &p->matchFinderBase; ++ MatchFinder_CreateVTable(&p->matchFinderBase, &p->matchFinder); ++ } ++ return SZ_OK; ++} ++ ++void LzmaEnc_Init(CLzmaEnc *p) ++{ ++ UInt32 i; ++ p->state = 0; ++ for (i = 0 ; i < LZMA_NUM_REPS; i++) ++ p->reps[i] = 0; ++ ++ RangeEnc_Init(&p->rc); ++ ++ ++ for (i = 0; i < kNumStates; i++) ++ { ++ UInt32 j; ++ for (j = 0; j < LZMA_NUM_PB_STATES_MAX; j++) ++ { ++ p->isMatch[i][j] = kProbInitValue; ++ p->isRep0Long[i][j] = kProbInitValue; ++ } ++ p->isRep[i] = kProbInitValue; ++ p->isRepG0[i] = kProbInitValue; ++ p->isRepG1[i] = kProbInitValue; ++ p->isRepG2[i] = kProbInitValue; ++ } ++ ++ { ++ UInt32 num = 0x300 << (p->lp + p->lc); ++ for (i = 0; i < num; i++) ++ p->litProbs[i] = kProbInitValue; ++ } ++ ++ { ++ for (i = 0; i < kNumLenToPosStates; i++) ++ { ++ CLzmaProb *probs = p->posSlotEncoder[i]; ++ UInt32 j; ++ for (j = 0; j < (1 << kNumPosSlotBits); j++) ++ probs[j] = kProbInitValue; ++ } ++ } ++ { ++ for (i = 0; i < kNumFullDistances - kEndPosModelIndex; i++) ++ p->posEncoders[i] = kProbInitValue; ++ } ++ ++ LenEnc_Init(&p->lenEnc.p); ++ LenEnc_Init(&p->repLenEnc.p); ++ ++ for (i = 0; i < (1 << kNumAlignBits); i++) ++ p->posAlignEncoder[i] = kProbInitValue; ++ ++ p->optimumEndIndex = 0; ++ p->optimumCurrentIndex = 0; ++ p->additionalOffset = 0; ++ ++ p->pbMask = (1 << p->pb) - 1; ++ p->lpMask = (1 << p->lp) - 1; ++} ++ ++void LzmaEnc_InitPrices(CLzmaEnc *p) ++{ ++ if (!p->fastMode) ++ { ++ FillDistancesPrices(p); ++ FillAlignPrices(p); ++ } ++ ++ p->lenEnc.tableSize = ++ p->repLenEnc.tableSize = ++ p->numFastBytes + 1 - LZMA_MATCH_LEN_MIN; ++ LenPriceEnc_UpdateTables(&p->lenEnc, 1 << p->pb, p->ProbPrices); ++ LenPriceEnc_UpdateTables(&p->repLenEnc, 1 << p->pb, p->ProbPrices); ++} ++ ++static SRes LzmaEnc_AllocAndInit(CLzmaEnc *p, UInt32 keepWindowSize, ISzAlloc *alloc, ISzAlloc *allocBig) ++{ ++ UInt32 i; ++ for (i = 0; i < (UInt32)kDicLogSizeMaxCompress; i++) ++ if (p->dictSize <= ((UInt32)1 << i)) ++ break; ++ p->distTableSize = i * 2; ++ ++ p->finished = False; ++ p->result = SZ_OK; ++ RINOK(LzmaEnc_Alloc(p, keepWindowSize, alloc, allocBig)); ++ LzmaEnc_Init(p); ++ LzmaEnc_InitPrices(p); ++ p->nowPos64 = 0; ++ return SZ_OK; ++} ++ ++static SRes LzmaEnc_Prepare(CLzmaEncHandle pp, ISeqOutStream *outStream, ISeqInStream *inStream, ++ ISzAlloc *alloc, ISzAlloc *allocBig) ++{ ++ CLzmaEnc *p = (CLzmaEnc *)pp; ++ p->matchFinderBase.stream = inStream; ++ p->needInit = 1; ++ p->rc.outStream = outStream; ++ return LzmaEnc_AllocAndInit(p, 0, alloc, allocBig); ++} ++ ++SRes LzmaEnc_PrepareForLzma2(CLzmaEncHandle pp, ++ ISeqInStream *inStream, UInt32 keepWindowSize, ++ ISzAlloc *alloc, ISzAlloc *allocBig) ++{ ++ CLzmaEnc *p = (CLzmaEnc *)pp; ++ p->matchFinderBase.stream = inStream; ++ p->needInit = 1; ++ return LzmaEnc_AllocAndInit(p, keepWindowSize, alloc, allocBig); ++} ++ ++static void LzmaEnc_SetInputBuf(CLzmaEnc *p, const Byte *src, SizeT srcLen) ++{ ++ p->matchFinderBase.directInput = 1; ++ p->matchFinderBase.bufferBase = (Byte *)src; ++ p->matchFinderBase.directInputRem = srcLen; ++} ++ ++SRes LzmaEnc_MemPrepare(CLzmaEncHandle pp, const Byte *src, SizeT srcLen, ++ UInt32 keepWindowSize, ISzAlloc *alloc, ISzAlloc *allocBig) ++{ ++ CLzmaEnc *p = (CLzmaEnc *)pp; ++ LzmaEnc_SetInputBuf(p, src, srcLen); ++ p->needInit = 1; ++ ++ return LzmaEnc_AllocAndInit(p, keepWindowSize, alloc, allocBig); ++} ++ ++void LzmaEnc_Finish(CLzmaEncHandle pp) ++{ ++ #ifndef _7ZIP_ST ++ CLzmaEnc *p = (CLzmaEnc *)pp; ++ if (p->mtMode) ++ MatchFinderMt_ReleaseStream(&p->matchFinderMt); ++ #else ++ pp = pp; ++ #endif ++} ++ ++typedef struct ++{ ++ ISeqOutStream funcTable; ++ Byte *data; ++ SizeT rem; ++ Bool overflow; ++} CSeqOutStreamBuf; ++ ++static size_t MyWrite(void *pp, const void *data, size_t size) ++{ ++ CSeqOutStreamBuf *p = (CSeqOutStreamBuf *)pp; ++ if (p->rem < size) ++ { ++ size = p->rem; ++ p->overflow = True; ++ } ++ memcpy(p->data, data, size); ++ p->rem -= size; ++ p->data += size; ++ return size; ++} ++ ++ ++UInt32 LzmaEnc_GetNumAvailableBytes(CLzmaEncHandle pp) ++{ ++ const CLzmaEnc *p = (CLzmaEnc *)pp; ++ return p->matchFinder.GetNumAvailableBytes(p->matchFinderObj); ++} ++ ++const Byte *LzmaEnc_GetCurBuf(CLzmaEncHandle pp) ++{ ++ const CLzmaEnc *p = (CLzmaEnc *)pp; ++ return p->matchFinder.GetPointerToCurrentPos(p->matchFinderObj) - p->additionalOffset; ++} ++ ++SRes LzmaEnc_CodeOneMemBlock(CLzmaEncHandle pp, Bool reInit, ++ Byte *dest, size_t *destLen, UInt32 desiredPackSize, UInt32 *unpackSize) ++{ ++ CLzmaEnc *p = (CLzmaEnc *)pp; ++ UInt64 nowPos64; ++ SRes res; ++ CSeqOutStreamBuf outStream; ++ ++ outStream.funcTable.Write = MyWrite; ++ outStream.data = dest; ++ outStream.rem = *destLen; ++ outStream.overflow = False; ++ ++ p->writeEndMark = False; ++ p->finished = False; ++ p->result = SZ_OK; ++ ++ if (reInit) ++ LzmaEnc_Init(p); ++ LzmaEnc_InitPrices(p); ++ nowPos64 = p->nowPos64; ++ RangeEnc_Init(&p->rc); ++ p->rc.outStream = &outStream.funcTable; ++ ++ res = LzmaEnc_CodeOneBlock(p, True, desiredPackSize, *unpackSize); ++ ++ *unpackSize = (UInt32)(p->nowPos64 - nowPos64); ++ *destLen -= outStream.rem; ++ if (outStream.overflow) ++ return SZ_ERROR_OUTPUT_EOF; ++ ++ return res; ++} ++ ++static SRes LzmaEnc_Encode2(CLzmaEnc *p, ICompressProgress *progress) ++{ ++ SRes res = SZ_OK; ++ ++ #ifndef _7ZIP_ST ++ Byte allocaDummy[0x300]; ++ int i = 0; ++ for (i = 0; i < 16; i++) ++ allocaDummy[i] = (Byte)i; ++ #endif ++ ++ for (;;) ++ { ++ res = LzmaEnc_CodeOneBlock(p, False, 0, 0); ++ if (res != SZ_OK || p->finished != 0) ++ break; ++ if (progress != 0) ++ { ++ res = progress->Progress(progress, p->nowPos64, RangeEnc_GetProcessed(&p->rc)); ++ if (res != SZ_OK) ++ { ++ res = SZ_ERROR_PROGRESS; ++ break; ++ } ++ } ++ } ++ LzmaEnc_Finish(p); ++ return res; ++} ++ ++SRes LzmaEnc_Encode(CLzmaEncHandle pp, ISeqOutStream *outStream, ISeqInStream *inStream, ICompressProgress *progress, ++ ISzAlloc *alloc, ISzAlloc *allocBig) ++{ ++ RINOK(LzmaEnc_Prepare(pp, outStream, inStream, alloc, allocBig)); ++ return LzmaEnc_Encode2((CLzmaEnc *)pp, progress); ++} ++ ++SRes LzmaEnc_WriteProperties(CLzmaEncHandle pp, Byte *props, SizeT *size) ++{ ++ CLzmaEnc *p = (CLzmaEnc *)pp; ++ int i; ++ UInt32 dictSize = p->dictSize; ++ if (*size < LZMA_PROPS_SIZE) ++ return SZ_ERROR_PARAM; ++ *size = LZMA_PROPS_SIZE; ++ props[0] = (Byte)((p->pb * 5 + p->lp) * 9 + p->lc); ++ ++ for (i = 11; i <= 30; i++) ++ { ++ if (dictSize <= ((UInt32)2 << i)) ++ { ++ dictSize = (2 << i); ++ break; ++ } ++ if (dictSize <= ((UInt32)3 << i)) ++ { ++ dictSize = (3 << i); ++ break; ++ } ++ } ++ ++ for (i = 0; i < 4; i++) ++ props[1 + i] = (Byte)(dictSize >> (8 * i)); ++ return SZ_OK; ++} ++ ++SRes LzmaEnc_MemEncode(CLzmaEncHandle pp, Byte *dest, SizeT *destLen, const Byte *src, SizeT srcLen, ++ int writeEndMark, ICompressProgress *progress, ISzAlloc *alloc, ISzAlloc *allocBig) ++{ ++ SRes res; ++ CLzmaEnc *p = (CLzmaEnc *)pp; ++ ++ CSeqOutStreamBuf outStream; ++ ++ LzmaEnc_SetInputBuf(p, src, srcLen); ++ ++ outStream.funcTable.Write = MyWrite; ++ outStream.data = dest; ++ outStream.rem = *destLen; ++ outStream.overflow = False; ++ ++ p->writeEndMark = writeEndMark; ++ ++ p->rc.outStream = &outStream.funcTable; ++ res = LzmaEnc_MemPrepare(pp, src, srcLen, 0, alloc, allocBig); ++ if (res == SZ_OK) ++ res = LzmaEnc_Encode2(p, progress); ++ ++ *destLen -= outStream.rem; ++ if (outStream.overflow) ++ return SZ_ERROR_OUTPUT_EOF; ++ return res; ++} ++ ++SRes LzmaEncode(Byte *dest, SizeT *destLen, const Byte *src, SizeT srcLen, ++ const CLzmaEncProps *props, Byte *propsEncoded, SizeT *propsSize, int writeEndMark, ++ ICompressProgress *progress, ISzAlloc *alloc, ISzAlloc *allocBig) ++{ ++ CLzmaEnc *p = (CLzmaEnc *)LzmaEnc_Create(alloc); ++ SRes res; ++ if (p == 0) ++ return SZ_ERROR_MEM; ++ ++ res = LzmaEnc_SetProps(p, props); ++ if (res == SZ_OK) ++ { ++ res = LzmaEnc_WriteProperties(p, propsEncoded, propsSize); ++ if (res == SZ_OK) ++ res = LzmaEnc_MemEncode(p, dest, destLen, src, srcLen, ++ writeEndMark, progress, alloc, allocBig); ++ } ++ ++ LzmaEnc_Destroy(p, alloc, allocBig); ++ return res; ++} +--- /dev/null ++++ b/lib/lzma/Makefile +@@ -0,0 +1,7 @@ ++lzma_compress-objs := LzFind.o LzmaEnc.o ++lzma_decompress-objs := LzmaDec.o ++ ++obj-$(CONFIG_LZMA_COMPRESS) += lzma_compress.o ++obj-$(CONFIG_LZMA_DECOMPRESS) += lzma_decompress.o ++ ++EXTRA_CFLAGS += -Iinclude/linux -Iinclude/linux/lzma -include types.h diff --git a/target/linux/generic/pending-5.15/532-jffs2_eofdetect.patch b/target/linux/generic/pending-5.15/532-jffs2_eofdetect.patch new file mode 100644 index 0000000000..a1d7dc1a69 --- /dev/null +++ b/target/linux/generic/pending-5.15/532-jffs2_eofdetect.patch @@ -0,0 +1,65 @@ +From: Felix Fietkau <nbd@nbd.name> +Subject: fs: jffs2: EOF marker + +Signed-off-by: Felix Fietkau <nbd@nbd.name> +--- + fs/jffs2/build.c | 10 ++++++++++ + fs/jffs2/scan.c | 21 +++++++++++++++++++-- + 2 files changed, 29 insertions(+), 2 deletions(-) + +--- a/fs/jffs2/build.c ++++ b/fs/jffs2/build.c +@@ -117,6 +117,16 @@ static int jffs2_build_filesystem(struct + dbg_fsbuild("scanned flash completely\n"); + jffs2_dbg_dump_block_lists_nolock(c); + ++ if (c->flags & (1 << 7)) { ++ printk("%s(): unlocking the mtd device... ", __func__); ++ mtd_unlock(c->mtd, 0, c->mtd->size); ++ printk("done.\n"); ++ ++ printk("%s(): erasing all blocks after the end marker... ", __func__); ++ jffs2_erase_pending_blocks(c, -1); ++ printk("done.\n"); ++ } ++ + dbg_fsbuild("pass 1 starting\n"); + c->flags |= JFFS2_SB_FLAG_BUILDING; + /* Now scan the directory tree, increasing nlink according to every dirent found. */ +--- a/fs/jffs2/scan.c ++++ b/fs/jffs2/scan.c +@@ -148,8 +148,14 @@ int jffs2_scan_medium(struct jffs2_sb_in + /* reset summary info for next eraseblock scan */ + jffs2_sum_reset_collected(s); + +- ret = jffs2_scan_eraseblock(c, jeb, buf_size?flashbuf:(flashbuf+jeb->offset), +- buf_size, s); ++ if (c->flags & (1 << 7)) { ++ if (mtd_block_isbad(c->mtd, jeb->offset)) ++ ret = BLK_STATE_BADBLOCK; ++ else ++ ret = BLK_STATE_ALLFF; ++ } else ++ ret = jffs2_scan_eraseblock(c, jeb, buf_size?flashbuf:(flashbuf+jeb->offset), ++ buf_size, s); + + if (ret < 0) + goto out; +@@ -565,6 +571,17 @@ full_scan: + return err; + } + ++ if ((buf[0] == 0xde) && ++ (buf[1] == 0xad) && ++ (buf[2] == 0xc0) && ++ (buf[3] == 0xde)) { ++ /* end of filesystem. erase everything after this point */ ++ printk("%s(): End of filesystem marker found at 0x%x\n", __func__, jeb->offset); ++ c->flags |= (1 << 7); ++ ++ return BLK_STATE_ALLFF; ++ } ++ + /* We temporarily use 'ofs' as a pointer into the buffer/jeb */ + ofs = 0; + max_ofs = EMPTY_SCAN_SIZE(c->sector_size); diff --git a/target/linux/generic/pending-5.15/600-netfilter_conntrack_flush.patch b/target/linux/generic/pending-5.15/600-netfilter_conntrack_flush.patch new file mode 100644 index 0000000000..ab6953e557 --- /dev/null +++ b/target/linux/generic/pending-5.15/600-netfilter_conntrack_flush.patch @@ -0,0 +1,88 @@ +From: Felix Fietkau <nbd@nbd.name> +Subject: netfilter: add support for flushing conntrack via /proc + +lede-commit 8193bbe59a74d34d6a26d4a8cb857b1952905314 +Signed-off-by: Felix Fietkau <nbd@nbd.name> +--- + net/netfilter/nf_conntrack_standalone.c | 59 ++++++++++++++++++++++++++++++++- + 1 file changed, 58 insertions(+), 1 deletion(-) + +--- a/net/netfilter/nf_conntrack_standalone.c ++++ b/net/netfilter/nf_conntrack_standalone.c +@@ -9,6 +9,7 @@ + #include <linux/percpu.h> + #include <linux/netdevice.h> + #include <linux/security.h> ++#include <linux/inet.h> + #include <net/net_namespace.h> + #ifdef CONFIG_SYSCTL + #include <linux/sysctl.h> +@@ -457,6 +458,56 @@ static int ct_cpu_seq_show(struct seq_fi + return 0; + } + ++struct kill_request { ++ u16 family; ++ union nf_inet_addr addr; ++}; ++ ++static int kill_matching(struct nf_conn *i, void *data) ++{ ++ struct kill_request *kr = data; ++ struct nf_conntrack_tuple *t1 = &i->tuplehash[IP_CT_DIR_ORIGINAL].tuple; ++ struct nf_conntrack_tuple *t2 = &i->tuplehash[IP_CT_DIR_REPLY].tuple; ++ ++ if (!kr->family) ++ return 1; ++ ++ if (t1->src.l3num != kr->family) ++ return 0; ++ ++ return (nf_inet_addr_cmp(&kr->addr, &t1->src.u3) || ++ nf_inet_addr_cmp(&kr->addr, &t1->dst.u3) || ++ nf_inet_addr_cmp(&kr->addr, &t2->src.u3) || ++ nf_inet_addr_cmp(&kr->addr, &t2->dst.u3)); ++} ++ ++static int ct_file_write(struct file *file, char *buf, size_t count) ++{ ++ struct seq_file *seq = file->private_data; ++ struct net *net = seq_file_net(seq); ++ struct kill_request kr = { }; ++ ++ if (count == 0) ++ return 0; ++ ++ if (count >= INET6_ADDRSTRLEN) ++ count = INET6_ADDRSTRLEN - 1; ++ ++ if (strnchr(buf, count, ':')) { ++ kr.family = AF_INET6; ++ if (!in6_pton(buf, count, (void *)&kr.addr, '\n', NULL)) ++ return -EINVAL; ++ } else if (strnchr(buf, count, '.')) { ++ kr.family = AF_INET; ++ if (!in4_pton(buf, count, (void *)&kr.addr, '\n', NULL)) ++ return -EINVAL; ++ } ++ ++ nf_ct_iterate_cleanup_net(net, kill_matching, &kr, 0, 0); ++ ++ return 0; ++} ++ + static const struct seq_operations ct_cpu_seq_ops = { + .start = ct_cpu_seq_start, + .next = ct_cpu_seq_next, +@@ -470,8 +521,9 @@ static int nf_conntrack_standalone_init_ + kuid_t root_uid; + kgid_t root_gid; + +- pde = proc_create_net("nf_conntrack", 0440, net->proc_net, &ct_seq_ops, +- sizeof(struct ct_iter_state)); ++ pde = proc_create_net_data_write("nf_conntrack", 0440, net->proc_net, ++ &ct_seq_ops, &ct_file_write, ++ sizeof(struct ct_iter_state), NULL); + if (!pde) + goto out_nf_conntrack; + diff --git a/target/linux/generic/pending-5.15/610-netfilter_match_bypass_default_checks.patch b/target/linux/generic/pending-5.15/610-netfilter_match_bypass_default_checks.patch new file mode 100644 index 0000000000..457703121c --- /dev/null +++ b/target/linux/generic/pending-5.15/610-netfilter_match_bypass_default_checks.patch @@ -0,0 +1,110 @@ +From: Felix Fietkau <nbd@nbd.name> +Subject: kernel: add a new version of my netfilter speedup patches for linux 2.6.39 and 3.0 + +Signed-off-by: Felix Fietkau <nbd@nbd.name> +--- + include/uapi/linux/netfilter_ipv4/ip_tables.h | 1 + + net/ipv4/netfilter/ip_tables.c | 37 +++++++++++++++++++++++++++ + 2 files changed, 38 insertions(+) + +--- a/include/uapi/linux/netfilter_ipv4/ip_tables.h ++++ b/include/uapi/linux/netfilter_ipv4/ip_tables.h +@@ -89,6 +89,7 @@ struct ipt_ip { + #define IPT_F_FRAG 0x01 /* Set if rule is a fragment rule */ + #define IPT_F_GOTO 0x02 /* Set if jump is a goto */ + #define IPT_F_MASK 0x03 /* All possible flag bits mask. */ ++#define IPT_F_NO_DEF_MATCH 0x80 /* Internal: no default match rules present */ + + /* Values for "inv" field in struct ipt_ip. */ + #define IPT_INV_VIA_IN 0x01 /* Invert the sense of IN IFACE. */ +--- a/net/ipv4/netfilter/ip_tables.c ++++ b/net/ipv4/netfilter/ip_tables.c +@@ -50,6 +50,9 @@ ip_packet_match(const struct iphdr *ip, + { + unsigned long ret; + ++ if (ipinfo->flags & IPT_F_NO_DEF_MATCH) ++ return true; ++ + if (NF_INVF(ipinfo, IPT_INV_SRCIP, + (ip->saddr & ipinfo->smsk.s_addr) != ipinfo->src.s_addr) || + NF_INVF(ipinfo, IPT_INV_DSTIP, +@@ -80,6 +83,29 @@ ip_packet_match(const struct iphdr *ip, + return true; + } + ++static void ++ip_checkdefault(struct ipt_ip *ip) ++{ ++ static const char iface_mask[IFNAMSIZ] = {}; ++ ++ if (ip->invflags || ip->flags & IPT_F_FRAG) ++ return; ++ ++ if (memcmp(ip->iniface_mask, iface_mask, IFNAMSIZ) != 0) ++ return; ++ ++ if (memcmp(ip->outiface_mask, iface_mask, IFNAMSIZ) != 0) ++ return; ++ ++ if (ip->smsk.s_addr || ip->dmsk.s_addr) ++ return; ++ ++ if (ip->proto) ++ return; ++ ++ ip->flags |= IPT_F_NO_DEF_MATCH; ++} ++ + static bool + ip_checkentry(const struct ipt_ip *ip) + { +@@ -524,6 +550,8 @@ find_check_entry(struct ipt_entry *e, st + struct xt_mtchk_param mtpar; + struct xt_entry_match *ematch; + ++ ip_checkdefault(&e->ip); ++ + if (!xt_percpu_counter_alloc(alloc_state, &e->counters)) + return -ENOMEM; + +@@ -818,6 +846,7 @@ copy_entries_to_user(unsigned int total_ + const struct xt_table_info *private = table->private; + int ret = 0; + const void *loc_cpu_entry; ++ u8 flags; + + counters = alloc_counters(table); + if (IS_ERR(counters)) +@@ -845,6 +874,14 @@ copy_entries_to_user(unsigned int total_ + goto free_counters; + } + ++ flags = e->ip.flags & IPT_F_MASK; ++ if (copy_to_user(userptr + off ++ + offsetof(struct ipt_entry, ip.flags), ++ &flags, sizeof(flags)) != 0) { ++ ret = -EFAULT; ++ goto free_counters; ++ } ++ + for (i = sizeof(struct ipt_entry); + i < e->target_offset; + i += m->u.match_size) { +@@ -1223,12 +1260,15 @@ compat_copy_entry_to_user(struct ipt_ent + compat_uint_t origsize; + const struct xt_entry_match *ematch; + int ret = 0; ++ u8 flags = e->ip.flags & IPT_F_MASK; + + origsize = *size; + ce = *dstptr; + if (copy_to_user(ce, e, sizeof(struct ipt_entry)) != 0 || + copy_to_user(&ce->counters, &counters[i], +- sizeof(counters[i])) != 0) ++ sizeof(counters[i])) != 0 || ++ copy_to_user(&ce->ip.flags, &flags, ++ sizeof(flags)) != 0) + return -EFAULT; + + *dstptr += sizeof(struct compat_ipt_entry); diff --git a/target/linux/generic/pending-5.15/611-netfilter_match_bypass_default_table.patch b/target/linux/generic/pending-5.15/611-netfilter_match_bypass_default_table.patch new file mode 100644 index 0000000000..baf738a8d2 --- /dev/null +++ b/target/linux/generic/pending-5.15/611-netfilter_match_bypass_default_table.patch @@ -0,0 +1,106 @@ +From: Felix Fietkau <nbd@nbd.name> +Subject: netfilter: match bypass default table + +Signed-off-by: Felix Fietkau <nbd@nbd.name> +--- + net/ipv4/netfilter/ip_tables.c | 79 +++++++++++++++++++++++++++++++----------- + 1 file changed, 58 insertions(+), 21 deletions(-) + +--- a/net/ipv4/netfilter/ip_tables.c ++++ b/net/ipv4/netfilter/ip_tables.c +@@ -246,6 +246,33 @@ struct ipt_entry *ipt_next_entry(const s + return (void *)entry + entry->next_offset; + } + ++static bool ++ipt_handle_default_rule(struct ipt_entry *e, unsigned int *verdict) ++{ ++ struct xt_entry_target *t; ++ struct xt_standard_target *st; ++ ++ if (e->target_offset != sizeof(struct ipt_entry)) ++ return false; ++ ++ if (!(e->ip.flags & IPT_F_NO_DEF_MATCH)) ++ return false; ++ ++ t = ipt_get_target(e); ++ if (t->u.kernel.target->target) ++ return false; ++ ++ st = (struct xt_standard_target *) t; ++ if (st->verdict == XT_RETURN) ++ return false; ++ ++ if (st->verdict >= 0) ++ return false; ++ ++ *verdict = (unsigned)(-st->verdict) - 1; ++ return true; ++} ++ + /* Returns one of the generic firewall policies, like NF_ACCEPT. */ + unsigned int + ipt_do_table(struct sk_buff *skb, +@@ -266,27 +293,28 @@ ipt_do_table(struct sk_buff *skb, + unsigned int addend; + + /* Initialization */ ++ WARN_ON(!(table->valid_hooks & (1 << hook))); ++ local_bh_disable(); ++ private = READ_ONCE(table->private); /* Address dependency. */ ++ cpu = smp_processor_id(); ++ table_base = private->entries; ++ ++ e = get_entry(table_base, private->hook_entry[hook]); ++ if (ipt_handle_default_rule(e, &verdict)) { ++ struct xt_counters *counter; ++ ++ counter = xt_get_this_cpu_counter(&e->counters); ++ ADD_COUNTER(*counter, skb->len, 1); ++ local_bh_enable(); ++ return verdict; ++ } ++ + stackidx = 0; + ip = ip_hdr(skb); + indev = state->in ? state->in->name : nulldevname; + outdev = state->out ? state->out->name : nulldevname; +- /* We handle fragments by dealing with the first fragment as +- * if it was a normal packet. All other fragments are treated +- * normally, except that they will NEVER match rules that ask +- * things we don't know, ie. tcp syn flag or ports). If the +- * rule is also a fragment-specific rule, non-fragments won't +- * match it. */ +- acpar.fragoff = ntohs(ip->frag_off) & IP_OFFSET; +- acpar.thoff = ip_hdrlen(skb); +- acpar.hotdrop = false; +- acpar.state = state; + +- WARN_ON(!(table->valid_hooks & (1 << hook))); +- local_bh_disable(); + addend = xt_write_recseq_begin(); +- private = READ_ONCE(table->private); /* Address dependency. */ +- cpu = smp_processor_id(); +- table_base = private->entries; + jumpstack = (struct ipt_entry **)private->jumpstack[cpu]; + + /* Switch to alternate jumpstack if we're being invoked via TEE. +@@ -299,7 +327,16 @@ ipt_do_table(struct sk_buff *skb, + if (static_key_false(&xt_tee_enabled)) + jumpstack += private->stacksize * __this_cpu_read(nf_skb_duplicated); + +- e = get_entry(table_base, private->hook_entry[hook]); ++ /* We handle fragments by dealing with the first fragment as ++ * if it was a normal packet. All other fragments are treated ++ * normally, except that they will NEVER match rules that ask ++ * things we don't know, ie. tcp syn flag or ports). If the ++ * rule is also a fragment-specific rule, non-fragments won't ++ * match it. */ ++ acpar.fragoff = ntohs(ip->frag_off) & IP_OFFSET; ++ acpar.thoff = ip_hdrlen(skb); ++ acpar.hotdrop = false; ++ acpar.state = state; + + do { + const struct xt_entry_target *t; diff --git a/target/linux/generic/pending-5.15/612-netfilter_match_reduce_memory_access.patch b/target/linux/generic/pending-5.15/612-netfilter_match_reduce_memory_access.patch new file mode 100644 index 0000000000..79da6778b6 --- /dev/null +++ b/target/linux/generic/pending-5.15/612-netfilter_match_reduce_memory_access.patch @@ -0,0 +1,22 @@ +From: Felix Fietkau <nbd@nbd.name> +Subject: netfilter: reduce match memory access + +Signed-off-by: Felix Fietkau <nbd@nbd.name> +--- + net/ipv4/netfilter/ip_tables.c | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +--- a/net/ipv4/netfilter/ip_tables.c ++++ b/net/ipv4/netfilter/ip_tables.c +@@ -53,9 +53,9 @@ ip_packet_match(const struct iphdr *ip, + if (ipinfo->flags & IPT_F_NO_DEF_MATCH) + return true; + +- if (NF_INVF(ipinfo, IPT_INV_SRCIP, ++ if (NF_INVF(ipinfo, IPT_INV_SRCIP, ipinfo->smsk.s_addr && + (ip->saddr & ipinfo->smsk.s_addr) != ipinfo->src.s_addr) || +- NF_INVF(ipinfo, IPT_INV_DSTIP, ++ NF_INVF(ipinfo, IPT_INV_DSTIP, ipinfo->dmsk.s_addr && + (ip->daddr & ipinfo->dmsk.s_addr) != ipinfo->dst.s_addr)) + return false; + diff --git a/target/linux/generic/pending-5.15/613-netfilter_optional_tcp_window_check.patch b/target/linux/generic/pending-5.15/613-netfilter_optional_tcp_window_check.patch new file mode 100644 index 0000000000..97e449510d --- /dev/null +++ b/target/linux/generic/pending-5.15/613-netfilter_optional_tcp_window_check.patch @@ -0,0 +1,73 @@ +From: Felix Fietkau <nbd@nbd.name> +Subject: netfilter: optional tcp window check + +Signed-off-by: Felix Fietkau <nbd@nbd.name> +--- + net/netfilter/nf_conntrack_proto_tcp.c | 13 +++++++++++++ + 1 file changed, 13 insertions(+) + +--- a/net/netfilter/nf_conntrack_proto_tcp.c ++++ b/net/netfilter/nf_conntrack_proto_tcp.c +@@ -31,6 +31,9 @@ + #include <net/netfilter/ipv4/nf_conntrack_ipv4.h> + #include <net/netfilter/ipv6/nf_conntrack_ipv6.h> + ++/* Do not check the TCP window for incoming packets */ ++static int nf_ct_tcp_no_window_check __read_mostly = 1; ++ + /* "Be conservative in what you do, + be liberal in what you accept from others." + If it's non-zero, we mark only out of window RST segments as INVALID. */ +@@ -476,6 +479,9 @@ static bool tcp_in_window(const struct n + s32 receiver_offset; + bool res, in_recv_win; + ++ if (nf_ct_tcp_no_window_check) ++ return true; ++ + /* + * Get the required data from the packet. + */ +@@ -1130,7 +1136,7 @@ int nf_conntrack_tcp_packet(struct nf_co + IP_CT_TCP_FLAG_DATA_UNACKNOWLEDGED && + timeouts[new_state] > timeouts[TCP_CONNTRACK_UNACK]) + timeout = timeouts[TCP_CONNTRACK_UNACK]; +- else if (ct->proto.tcp.last_win == 0 && ++ else if (!nf_ct_tcp_no_window_check && ct->proto.tcp.last_win == 0 && + timeouts[new_state] > timeouts[TCP_CONNTRACK_RETRANS]) + timeout = timeouts[TCP_CONNTRACK_RETRANS]; + else +--- a/net/netfilter/nf_conntrack_standalone.c ++++ b/net/netfilter/nf_conntrack_standalone.c +@@ -25,6 +25,9 @@ + #include <net/netfilter/nf_conntrack_timestamp.h> + #include <linux/rculist_nulls.h> + ++/* Do not check the TCP window for incoming packets */ ++static int nf_ct_tcp_no_window_check __read_mostly = 1; ++ + static bool enable_hooks __read_mostly; + MODULE_PARM_DESC(enable_hooks, "Always enable conntrack hooks"); + module_param(enable_hooks, bool, 0000); +@@ -660,6 +663,7 @@ enum nf_ct_sysctl_index { + NF_SYSCTL_CT_PROTO_TIMEOUT_GRE_STREAM, + #endif + ++ NF_SYSCTL_CT_PROTO_TCP_NO_WINDOW_CHECK, + __NF_SYSCTL_CT_LAST_SYSCTL, + }; + +@@ -1014,6 +1018,13 @@ static struct ctl_table nf_ct_sysctl_tab + .proc_handler = proc_dointvec_jiffies, + }, + #endif ++ [NF_SYSCTL_CT_PROTO_TCP_NO_WINDOW_CHECK] = { ++ .procname = "nf_conntrack_tcp_no_window_check", ++ .data = &nf_ct_tcp_no_window_check, ++ .maxlen = sizeof(unsigned int), ++ .mode = 0644, ++ .proc_handler = proc_dointvec, ++ }, + {} + }; + diff --git a/target/linux/generic/pending-5.15/620-net_sched-codel-do-not-defer-queue-length-update.patch b/target/linux/generic/pending-5.15/620-net_sched-codel-do-not-defer-queue-length-update.patch new file mode 100644 index 0000000000..4b4825ae3b --- /dev/null +++ b/target/linux/generic/pending-5.15/620-net_sched-codel-do-not-defer-queue-length-update.patch @@ -0,0 +1,86 @@ +From: Konstantin Khlebnikov <khlebnikov@yandex-team.ru> +Date: Mon, 21 Aug 2017 11:14:14 +0300 +Subject: [PATCH] net_sched/codel: do not defer queue length update + +When codel wants to drop last packet in ->dequeue() it cannot call +qdisc_tree_reduce_backlog() right away - it will notify parent qdisc +about zero qlen and HTB/HFSC will deactivate class. The same class will +be deactivated second time by caller of ->dequeue(). Currently codel and +fq_codel defer update. This triggers warning in HFSC when it's qlen != 0 +but there is no active classes. + +This patch update parent queue length immediately: just temporary increase +qlen around qdisc_tree_reduce_backlog() to prevent first class deactivation +if we have skb to return. + +This might open another problem in HFSC - now operation peek could fail and +deactivate parent class. + +Signed-off-by: Konstantin Khlebnikov <khlebnikov@yandex-team.ru> +Link: https://bugzilla.kernel.org/show_bug.cgi?id=109581 +--- + +--- a/net/sched/sch_codel.c ++++ b/net/sched/sch_codel.c +@@ -95,11 +95,17 @@ static struct sk_buff *codel_qdisc_deque + &q->stats, qdisc_pkt_len, codel_get_enqueue_time, + drop_func, dequeue_func); + +- /* We cant call qdisc_tree_reduce_backlog() if our qlen is 0, +- * or HTB crashes. Defer it for next round. ++ /* If our qlen is 0 qdisc_tree_reduce_backlog() will deactivate ++ * parent class, dequeue in parent qdisc will do the same if we ++ * return skb. Temporary increment qlen if we have skb. + */ +- if (q->stats.drop_count && sch->q.qlen) { +- qdisc_tree_reduce_backlog(sch, q->stats.drop_count, q->stats.drop_len); ++ if (q->stats.drop_count) { ++ if (skb) ++ sch->q.qlen++; ++ qdisc_tree_reduce_backlog(sch, q->stats.drop_count, ++ q->stats.drop_len); ++ if (skb) ++ sch->q.qlen--; + q->stats.drop_count = 0; + q->stats.drop_len = 0; + } +--- a/net/sched/sch_fq_codel.c ++++ b/net/sched/sch_fq_codel.c +@@ -304,6 +304,21 @@ begin: + &flow->cvars, &q->cstats, qdisc_pkt_len, + codel_get_enqueue_time, drop_func, dequeue_func); + ++ /* If our qlen is 0 qdisc_tree_reduce_backlog() will deactivate ++ * parent class, dequeue in parent qdisc will do the same if we ++ * return skb. Temporary increment qlen if we have skb. ++ */ ++ if (q->cstats.drop_count) { ++ if (skb) ++ sch->q.qlen++; ++ qdisc_tree_reduce_backlog(sch, q->cstats.drop_count, ++ q->cstats.drop_len); ++ if (skb) ++ sch->q.qlen--; ++ q->cstats.drop_count = 0; ++ q->cstats.drop_len = 0; ++ } ++ + if (!skb) { + /* force a pass through old_flows to prevent starvation */ + if ((head == &q->new_flows) && !list_empty(&q->old_flows)) +@@ -314,15 +329,6 @@ begin: + } + qdisc_bstats_update(sch, skb); + flow->deficit -= qdisc_pkt_len(skb); +- /* We cant call qdisc_tree_reduce_backlog() if our qlen is 0, +- * or HTB crashes. Defer it for next round. +- */ +- if (q->cstats.drop_count && sch->q.qlen) { +- qdisc_tree_reduce_backlog(sch, q->cstats.drop_count, +- q->cstats.drop_len); +- q->cstats.drop_count = 0; +- q->cstats.drop_len = 0; +- } + return skb; + } + diff --git a/target/linux/generic/pending-5.15/630-packet_socket_type.patch b/target/linux/generic/pending-5.15/630-packet_socket_type.patch new file mode 100644 index 0000000000..5b396c3a8c --- /dev/null +++ b/target/linux/generic/pending-5.15/630-packet_socket_type.patch @@ -0,0 +1,138 @@ +From: Felix Fietkau <nbd@nbd.name> +Subject: net: add an optimization for dealing with raw sockets + +lede-commit: 4898039703d7315f0f3431c860123338ec3be0f6 +Signed-off-by: Felix Fietkau <nbd@nbd.name> +--- + include/uapi/linux/if_packet.h | 3 +++ + net/packet/af_packet.c | 34 +++++++++++++++++++++++++++------- + net/packet/internal.h | 1 + + 3 files changed, 31 insertions(+), 7 deletions(-) + +--- a/include/uapi/linux/if_packet.h ++++ b/include/uapi/linux/if_packet.h +@@ -33,6 +33,8 @@ struct sockaddr_ll { + #define PACKET_KERNEL 7 /* To kernel space */ + /* Unused, PACKET_FASTROUTE and PACKET_LOOPBACK are invisible to user space */ + #define PACKET_FASTROUTE 6 /* Fastrouted frame */ ++#define PACKET_MASK_ANY 0xffffffff /* mask for packet type bits */ ++ + + /* Packet socket options */ + +@@ -59,6 +61,7 @@ struct sockaddr_ll { + #define PACKET_ROLLOVER_STATS 21 + #define PACKET_FANOUT_DATA 22 + #define PACKET_IGNORE_OUTGOING 23 ++#define PACKET_RECV_TYPE 24 + + #define PACKET_FANOUT_HASH 0 + #define PACKET_FANOUT_LB 1 +--- a/net/packet/af_packet.c ++++ b/net/packet/af_packet.c +@@ -1822,6 +1822,7 @@ static int packet_rcv_spkt(struct sk_buf + { + struct sock *sk; + struct sockaddr_pkt *spkt; ++ struct packet_sock *po; + + /* + * When we registered the protocol we saved the socket in the data +@@ -1829,6 +1830,7 @@ static int packet_rcv_spkt(struct sk_buf + */ + + sk = pt->af_packet_priv; ++ po = pkt_sk(sk); + + /* + * Yank back the headers [hope the device set this +@@ -1841,7 +1843,7 @@ static int packet_rcv_spkt(struct sk_buf + * so that this procedure is noop. + */ + +- if (skb->pkt_type == PACKET_LOOPBACK) ++ if (!(po->pkt_type & (1 << skb->pkt_type))) + goto out; + + if (!net_eq(dev_net(dev), sock_net(sk))) +@@ -2079,12 +2081,12 @@ static int packet_rcv(struct sk_buff *sk + unsigned int snaplen, res; + bool is_drop_n_account = false; + +- if (skb->pkt_type == PACKET_LOOPBACK) +- goto drop; +- + sk = pt->af_packet_priv; + po = pkt_sk(sk); + ++ if (!(po->pkt_type & (1 << skb->pkt_type))) ++ goto drop; ++ + if (!net_eq(dev_net(dev), sock_net(sk))) + goto drop; + +@@ -2210,12 +2212,12 @@ static int tpacket_rcv(struct sk_buff *s + BUILD_BUG_ON(TPACKET_ALIGN(sizeof(*h.h2)) != 32); + BUILD_BUG_ON(TPACKET_ALIGN(sizeof(*h.h3)) != 48); + +- if (skb->pkt_type == PACKET_LOOPBACK) +- goto drop; +- + sk = pt->af_packet_priv; + po = pkt_sk(sk); + ++ if (!(po->pkt_type & (1 << skb->pkt_type))) ++ goto drop; ++ + if (!net_eq(dev_net(dev), sock_net(sk))) + goto drop; + +@@ -3325,6 +3327,7 @@ static int packet_create(struct net *net + mutex_init(&po->pg_vec_lock); + po->rollover = NULL; + po->prot_hook.func = packet_rcv; ++ po->pkt_type = PACKET_MASK_ANY & ~(1 << PACKET_LOOPBACK); + + if (sock->type == SOCK_PACKET) + po->prot_hook.func = packet_rcv_spkt; +@@ -3969,6 +3972,16 @@ packet_setsockopt(struct socket *sock, i + po->xmit = val ? packet_direct_xmit : dev_queue_xmit; + return 0; + } ++ case PACKET_RECV_TYPE: ++ { ++ unsigned int val; ++ if (optlen != sizeof(val)) ++ return -EINVAL; ++ if (copy_from_sockptr(&val, optval, sizeof(val))) ++ return -EFAULT; ++ po->pkt_type = val & ~BIT(PACKET_LOOPBACK); ++ return 0; ++ } + default: + return -ENOPROTOOPT; + } +@@ -4025,6 +4038,13 @@ static int packet_getsockopt(struct sock + case PACKET_VNET_HDR: + val = po->has_vnet_hdr; + break; ++ case PACKET_RECV_TYPE: ++ if (len > sizeof(unsigned int)) ++ len = sizeof(unsigned int); ++ val = po->pkt_type; ++ ++ data = &val; ++ break; + case PACKET_VERSION: + val = po->tp_version; + break; +--- a/net/packet/internal.h ++++ b/net/packet/internal.h +@@ -137,6 +137,7 @@ struct packet_sock { + int (*xmit)(struct sk_buff *skb); + struct packet_type prot_hook ____cacheline_aligned_in_smp; + atomic_t tp_drops ____cacheline_aligned_in_smp; ++ unsigned int pkt_type; + }; + + static struct packet_sock *pkt_sk(struct sock *sk) diff --git a/target/linux/generic/pending-5.15/655-increase_skb_pad.patch b/target/linux/generic/pending-5.15/655-increase_skb_pad.patch new file mode 100644 index 0000000000..ff39ddb8b3 --- /dev/null +++ b/target/linux/generic/pending-5.15/655-increase_skb_pad.patch @@ -0,0 +1,20 @@ +From: Felix Fietkau <nbd@nbd.name> +Subject: kernel: add a few patches for avoiding unnecessary skb reallocations - significantly improves ethernet<->wireless performance + +lede-commit: 6f89cffc9add6939d44a6b54cf9a5e77849aa7fd +Signed-off-by: Felix Fietkau <nbd@nbd.name> +--- + include/linux/skbuff.h | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +--- a/include/linux/skbuff.h ++++ b/include/linux/skbuff.h +@@ -2676,7 +2676,7 @@ static inline int pskb_network_may_pull( + * NET_IP_ALIGN(2) + ethernet_header(14) + IP_header(20/40) + ports(8) + */ + #ifndef NET_SKB_PAD +-#define NET_SKB_PAD max(32, L1_CACHE_BYTES) ++#define NET_SKB_PAD max(64, L1_CACHE_BYTES) + #endif + + int ___pskb_trim(struct sk_buff *skb, unsigned int len); diff --git a/target/linux/generic/pending-5.15/666-Add-support-for-MAP-E-FMRs-mesh-mode.patch b/target/linux/generic/pending-5.15/666-Add-support-for-MAP-E-FMRs-mesh-mode.patch new file mode 100644 index 0000000000..89eb7d61b3 --- /dev/null +++ b/target/linux/generic/pending-5.15/666-Add-support-for-MAP-E-FMRs-mesh-mode.patch @@ -0,0 +1,511 @@ +From: Steven Barth <steven@midlink.org> +Subject: Add support for MAP-E FMRs (mesh mode) + +MAP-E FMRs (draft-ietf-softwire-map-10) are rules for IPv4-communication +between MAP CEs (mesh mode) without the need to forward such data to a +border relay. This is similar to how 6rd works but for IPv4 over IPv6. + +Signed-off-by: Steven Barth <cyrus@openwrt.org> +--- + include/net/ip6_tunnel.h | 13 ++ + include/uapi/linux/if_tunnel.h | 13 ++ + net/ipv6/ip6_tunnel.c | 276 +++++++++++++++++++++++++++++++++++++++-- + 3 files changed, 291 insertions(+), 11 deletions(-) + +--- a/include/net/ip6_tunnel.h ++++ b/include/net/ip6_tunnel.h +@@ -18,6 +18,18 @@ + /* determine capability on a per-packet basis */ + #define IP6_TNL_F_CAP_PER_PACKET 0x40000 + ++/* IPv6 tunnel FMR */ ++struct __ip6_tnl_fmr { ++ struct __ip6_tnl_fmr *next; /* next fmr in list */ ++ struct in6_addr ip6_prefix; ++ struct in_addr ip4_prefix; ++ ++ __u8 ip6_prefix_len; ++ __u8 ip4_prefix_len; ++ __u8 ea_len; ++ __u8 offset; ++}; ++ + struct __ip6_tnl_parm { + char name[IFNAMSIZ]; /* name of tunnel device */ + int link; /* ifindex of underlying L2 interface */ +@@ -29,6 +41,7 @@ struct __ip6_tnl_parm { + __u32 flags; /* tunnel flags */ + struct in6_addr laddr; /* local tunnel end-point address */ + struct in6_addr raddr; /* remote tunnel end-point address */ ++ struct __ip6_tnl_fmr *fmrs; /* FMRs */ + + __be16 i_flags; + __be16 o_flags; +--- a/include/uapi/linux/if_tunnel.h ++++ b/include/uapi/linux/if_tunnel.h +@@ -77,10 +77,23 @@ enum { + IFLA_IPTUN_ENCAP_DPORT, + IFLA_IPTUN_COLLECT_METADATA, + IFLA_IPTUN_FWMARK, ++ IFLA_IPTUN_FMRS, + __IFLA_IPTUN_MAX, + }; + #define IFLA_IPTUN_MAX (__IFLA_IPTUN_MAX - 1) + ++enum { ++ IFLA_IPTUN_FMR_UNSPEC, ++ IFLA_IPTUN_FMR_IP6_PREFIX, ++ IFLA_IPTUN_FMR_IP4_PREFIX, ++ IFLA_IPTUN_FMR_IP6_PREFIX_LEN, ++ IFLA_IPTUN_FMR_IP4_PREFIX_LEN, ++ IFLA_IPTUN_FMR_EA_LEN, ++ IFLA_IPTUN_FMR_OFFSET, ++ __IFLA_IPTUN_FMR_MAX, ++}; ++#define IFLA_IPTUN_FMR_MAX (__IFLA_IPTUN_FMR_MAX - 1) ++ + enum tunnel_encap_types { + TUNNEL_ENCAP_NONE, + TUNNEL_ENCAP_FOU, +--- a/net/ipv6/ip6_tunnel.c ++++ b/net/ipv6/ip6_tunnel.c +@@ -11,6 +11,9 @@ + * linux/net/ipv6/sit.c and linux/net/ipv4/ipip.c + * + * RFC 2473 ++ * ++ * Changes: ++ * Steven Barth <cyrus@openwrt.org>: MAP-E FMR support + */ + + #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt +@@ -67,9 +70,9 @@ static bool log_ecn_error = true; + module_param(log_ecn_error, bool, 0644); + MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN"); + +-static u32 HASH(const struct in6_addr *addr1, const struct in6_addr *addr2) ++static u32 HASH(const struct in6_addr *addr) + { +- u32 hash = ipv6_addr_hash(addr1) ^ ipv6_addr_hash(addr2); ++ u32 hash = ipv6_addr_hash(addr); + + return hash_32(hash, IP6_TUNNEL_HASH_SIZE_SHIFT); + } +@@ -144,17 +147,33 @@ static struct ip6_tnl * + ip6_tnl_lookup(struct net *net, int link, + const struct in6_addr *remote, const struct in6_addr *local) + { +- unsigned int hash = HASH(remote, local); ++ unsigned int hash = HASH(local); + struct ip6_tnl *t, *cand = NULL; + struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id); + struct in6_addr any; + + for_each_ip6_tunnel_rcu(ip6n->tnls_r_l[hash]) { + if (!ipv6_addr_equal(local, &t->parms.laddr) || +- !ipv6_addr_equal(remote, &t->parms.raddr) || + !(t->dev->flags & IFF_UP)) + continue; + ++ if (!ipv6_addr_equal(remote, &t->parms.raddr)) { ++ struct __ip6_tnl_fmr *fmr; ++ bool found = false; ++ ++ for (fmr = t->parms.fmrs; fmr; fmr = fmr->next) { ++ if (!ipv6_prefix_equal(remote, &fmr->ip6_prefix, ++ fmr->ip6_prefix_len)) ++ continue; ++ ++ found = true; ++ break; ++ } ++ ++ if (!found) ++ continue; ++ } ++ + if (link == t->parms.link) + return t; + else +@@ -162,7 +181,7 @@ ip6_tnl_lookup(struct net *net, int link + } + + memset(&any, 0, sizeof(any)); +- hash = HASH(&any, local); ++ hash = HASH(local); + for_each_ip6_tunnel_rcu(ip6n->tnls_r_l[hash]) { + if (!ipv6_addr_equal(local, &t->parms.laddr) || + !ipv6_addr_any(&t->parms.raddr) || +@@ -175,7 +194,7 @@ ip6_tnl_lookup(struct net *net, int link + cand = t; + } + +- hash = HASH(remote, &any); ++ hash = HASH(&any); + for_each_ip6_tunnel_rcu(ip6n->tnls_r_l[hash]) { + if (!ipv6_addr_equal(remote, &t->parms.raddr) || + !ipv6_addr_any(&t->parms.laddr) || +@@ -223,7 +242,7 @@ ip6_tnl_bucket(struct ip6_tnl_net *ip6n, + + if (!ipv6_addr_any(remote) || !ipv6_addr_any(local)) { + prio = 1; +- h = HASH(remote, local); ++ h = HASH(local); + } + return &ip6n->tnls[prio][h]; + } +@@ -405,6 +424,12 @@ ip6_tnl_dev_uninit(struct net_device *de + struct net *net = t->net; + struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id); + ++ while (t->parms.fmrs) { ++ struct __ip6_tnl_fmr *next = t->parms.fmrs->next; ++ kfree(t->parms.fmrs); ++ t->parms.fmrs = next; ++ } ++ + if (dev == ip6n->fb_tnl_dev) + RCU_INIT_POINTER(ip6n->tnls_wc[0], NULL); + else +@@ -821,6 +846,107 @@ int ip6_tnl_rcv_ctl(struct ip6_tnl *t, + } + EXPORT_SYMBOL_GPL(ip6_tnl_rcv_ctl); + ++/** ++ * ip4ip6_fmr_calc - calculate target / source IPv6-address based on FMR ++ * @dest: destination IPv6 address buffer ++ * @skb: received socket buffer ++ * @fmr: MAP FMR ++ * @xmit: Calculate for xmit or rcv ++ **/ ++static void ip4ip6_fmr_calc(struct in6_addr *dest, ++ const struct iphdr *iph, const uint8_t *end, ++ const struct __ip6_tnl_fmr *fmr, bool xmit) ++{ ++ int psidlen = fmr->ea_len - (32 - fmr->ip4_prefix_len); ++ u8 *portp = NULL; ++ bool use_dest_addr; ++ const struct iphdr *dsth = iph; ++ ++ if ((u8*)dsth >= end) ++ return; ++ ++ /* find significant IP header */ ++ if (iph->protocol == IPPROTO_ICMP) { ++ struct icmphdr *ih = (struct icmphdr*)(((u8*)dsth) + dsth->ihl * 4); ++ if (ih && ((u8*)&ih[1]) <= end && ( ++ ih->type == ICMP_DEST_UNREACH || ++ ih->type == ICMP_SOURCE_QUENCH || ++ ih->type == ICMP_TIME_EXCEEDED || ++ ih->type == ICMP_PARAMETERPROB || ++ ih->type == ICMP_REDIRECT)) ++ dsth = (const struct iphdr*)&ih[1]; ++ } ++ ++ /* in xmit-path use dest port by default and source port only if ++ this is an ICMP reply to something else; vice versa in rcv-path */ ++ use_dest_addr = (xmit && dsth == iph) || (!xmit && dsth != iph); ++ ++ /* get dst port */ ++ if (((u8*)&dsth[1]) <= end && ( ++ dsth->protocol == IPPROTO_UDP || ++ dsth->protocol == IPPROTO_TCP || ++ dsth->protocol == IPPROTO_SCTP || ++ dsth->protocol == IPPROTO_DCCP)) { ++ /* for UDP, TCP, SCTP and DCCP source and dest port ++ follow IPv4 header directly */ ++ portp = ((u8*)dsth) + dsth->ihl * 4; ++ ++ if (use_dest_addr) ++ portp += sizeof(u16); ++ } else if (iph->protocol == IPPROTO_ICMP) { ++ struct icmphdr *ih = (struct icmphdr*)(((u8*)dsth) + dsth->ihl * 4); ++ ++ /* use icmp identifier as port */ ++ if (((u8*)&ih) <= end && ( ++ (use_dest_addr && ( ++ ih->type == ICMP_ECHOREPLY || ++ ih->type == ICMP_TIMESTAMPREPLY || ++ ih->type == ICMP_INFO_REPLY || ++ ih->type == ICMP_ADDRESSREPLY)) || ++ (!use_dest_addr && ( ++ ih->type == ICMP_ECHO || ++ ih->type == ICMP_TIMESTAMP || ++ ih->type == ICMP_INFO_REQUEST || ++ ih->type == ICMP_ADDRESS) ++ ))) ++ portp = (u8*)&ih->un.echo.id; ++ } ++ ++ if ((portp && &portp[2] <= end) || psidlen == 0) { ++ int frombyte = fmr->ip6_prefix_len / 8; ++ int fromrem = fmr->ip6_prefix_len % 8; ++ int bytes = sizeof(struct in6_addr) - frombyte; ++ const u32 *addr = (use_dest_addr) ? &iph->daddr : &iph->saddr; ++ u64 eabits = ((u64)ntohl(*addr)) << (32 + fmr->ip4_prefix_len); ++ u64 t = 0; ++ ++ /* extract PSID from port and add it to eabits */ ++ u16 psidbits = 0; ++ if (psidlen > 0) { ++ psidbits = ((u16)portp[0]) << 8 | ((u16)portp[1]); ++ psidbits >>= 16 - psidlen - fmr->offset; ++ psidbits = (u16)(psidbits << (16 - psidlen)); ++ eabits |= ((u64)psidbits) << (48 - (fmr->ea_len - psidlen)); ++ } ++ ++ /* rewrite destination address */ ++ *dest = fmr->ip6_prefix; ++ memcpy(&dest->s6_addr[10], addr, sizeof(*addr)); ++ dest->s6_addr16[7] = htons(psidbits >> (16 - psidlen)); ++ ++ if (bytes > sizeof(u64)) ++ bytes = sizeof(u64); ++ ++ /* insert eabits */ ++ memcpy(&t, &dest->s6_addr[frombyte], bytes); ++ t = be64_to_cpu(t) & ~(((((u64)1) << fmr->ea_len) - 1) ++ << (64 - fmr->ea_len - fromrem)); ++ t = cpu_to_be64(t | (eabits >> fromrem)); ++ memcpy(&dest->s6_addr[frombyte], &t, bytes); ++ } ++} ++ ++ + static int __ip6_tnl_rcv(struct ip6_tnl *tunnel, struct sk_buff *skb, + const struct tnl_ptk_info *tpi, + struct metadata_dst *tun_dst, +@@ -873,6 +999,27 @@ static int __ip6_tnl_rcv(struct ip6_tnl + skb_reset_network_header(skb); + memset(skb->cb, 0, sizeof(struct inet6_skb_parm)); + ++ if (tpi->proto == htons(ETH_P_IP) && tunnel->parms.fmrs && ++ !ipv6_addr_equal(&ipv6h->saddr, &tunnel->parms.raddr)) { ++ /* Packet didn't come from BR, so lookup FMR */ ++ struct __ip6_tnl_fmr *fmr; ++ struct in6_addr expected = tunnel->parms.raddr; ++ for (fmr = tunnel->parms.fmrs; fmr; fmr = fmr->next) ++ if (ipv6_prefix_equal(&ipv6h->saddr, ++ &fmr->ip6_prefix, fmr->ip6_prefix_len)) ++ break; ++ ++ /* Check that IPv6 matches IPv4 source to prevent spoofing */ ++ if (fmr) ++ ip4ip6_fmr_calc(&expected, ip_hdr(skb), ++ skb_tail_pointer(skb), fmr, false); ++ ++ if (!ipv6_addr_equal(&ipv6h->saddr, &expected)) { ++ rcu_read_unlock(); ++ goto drop; ++ } ++ } ++ + __skb_tunnel_rx(skb, tunnel->dev, tunnel->net); + + err = dscp_ecn_decapsulate(tunnel, ipv6h, skb); +@@ -1024,6 +1171,7 @@ static void init_tel_txopt(struct ipv6_t + opt->ops.opt_nflen = 8; + } + ++ + /** + * ip6_tnl_addr_conflict - compare packet addresses to tunnel's own + * @t: the outgoing tunnel device +@@ -1304,6 +1452,7 @@ ipxip6_tnl_xmit(struct sk_buff *skb, str + u8 protocol) + { + struct ip6_tnl *t = netdev_priv(dev); ++ struct __ip6_tnl_fmr *fmr; + struct ipv6hdr *ipv6h; + const struct iphdr *iph; + int encap_limit = -1; +@@ -1403,6 +1552,18 @@ ipxip6_tnl_xmit(struct sk_buff *skb, str + fl6.flowi6_uid = sock_net_uid(dev_net(dev), NULL); + dsfield = INET_ECN_encapsulate(dsfield, orig_dsfield); + ++ /* try to find matching FMR */ ++ for (fmr = t->parms.fmrs; fmr; fmr = fmr->next) { ++ unsigned mshift = 32 - fmr->ip4_prefix_len; ++ if (ntohl(fmr->ip4_prefix.s_addr) >> mshift == ++ ntohl(ip_hdr(skb)->daddr) >> mshift) ++ break; ++ } ++ ++ /* change dstaddr according to FMR */ ++ if (fmr) ++ ip4ip6_fmr_calc(&fl6.daddr, ip_hdr(skb), skb_tail_pointer(skb), fmr, true); ++ + if (iptunnel_handle_offloads(skb, SKB_GSO_IPXIP6)) + return -1; + +@@ -1555,6 +1716,14 @@ ip6_tnl_change(struct ip6_tnl *t, const + t->parms.link = p->link; + t->parms.proto = p->proto; + t->parms.fwmark = p->fwmark; ++ ++ while (t->parms.fmrs) { ++ struct __ip6_tnl_fmr *next = t->parms.fmrs->next; ++ kfree(t->parms.fmrs); ++ t->parms.fmrs = next; ++ } ++ t->parms.fmrs = p->fmrs; ++ + dst_cache_reset(&t->dst_cache); + ip6_tnl_link_config(t); + return 0; +@@ -1593,6 +1762,7 @@ ip6_tnl_parm_from_user(struct __ip6_tnl_ + p->flowinfo = u->flowinfo; + p->link = u->link; + p->proto = u->proto; ++ p->fmrs = NULL; + memcpy(p->name, u->name, sizeof(u->name)); + } + +@@ -1978,6 +2148,15 @@ static int ip6_tnl_validate(struct nlatt + return 0; + } + ++static const struct nla_policy ip6_tnl_fmr_policy[IFLA_IPTUN_FMR_MAX + 1] = { ++ [IFLA_IPTUN_FMR_IP6_PREFIX] = { .len = sizeof(struct in6_addr) }, ++ [IFLA_IPTUN_FMR_IP4_PREFIX] = { .len = sizeof(struct in_addr) }, ++ [IFLA_IPTUN_FMR_IP6_PREFIX_LEN] = { .type = NLA_U8 }, ++ [IFLA_IPTUN_FMR_IP4_PREFIX_LEN] = { .type = NLA_U8 }, ++ [IFLA_IPTUN_FMR_EA_LEN] = { .type = NLA_U8 }, ++ [IFLA_IPTUN_FMR_OFFSET] = { .type = NLA_U8 } ++}; ++ + static void ip6_tnl_netlink_parms(struct nlattr *data[], + struct __ip6_tnl_parm *parms) + { +@@ -2015,6 +2194,46 @@ static void ip6_tnl_netlink_parms(struct + + if (data[IFLA_IPTUN_FWMARK]) + parms->fwmark = nla_get_u32(data[IFLA_IPTUN_FWMARK]); ++ ++ if (data[IFLA_IPTUN_FMRS]) { ++ unsigned rem; ++ struct nlattr *fmr; ++ nla_for_each_nested(fmr, data[IFLA_IPTUN_FMRS], rem) { ++ struct nlattr *fmrd[IFLA_IPTUN_FMR_MAX + 1], *c; ++ struct __ip6_tnl_fmr *nfmr; ++ ++ nla_parse_nested(fmrd, IFLA_IPTUN_FMR_MAX, ++ fmr, ip6_tnl_fmr_policy, NULL); ++ ++ if (!(nfmr = kzalloc(sizeof(*nfmr), GFP_KERNEL))) ++ continue; ++ ++ nfmr->offset = 6; ++ ++ if ((c = fmrd[IFLA_IPTUN_FMR_IP6_PREFIX])) ++ nla_memcpy(&nfmr->ip6_prefix, fmrd[IFLA_IPTUN_FMR_IP6_PREFIX], ++ sizeof(nfmr->ip6_prefix)); ++ ++ if ((c = fmrd[IFLA_IPTUN_FMR_IP4_PREFIX])) ++ nla_memcpy(&nfmr->ip4_prefix, fmrd[IFLA_IPTUN_FMR_IP4_PREFIX], ++ sizeof(nfmr->ip4_prefix)); ++ ++ if ((c = fmrd[IFLA_IPTUN_FMR_IP6_PREFIX_LEN])) ++ nfmr->ip6_prefix_len = nla_get_u8(c); ++ ++ if ((c = fmrd[IFLA_IPTUN_FMR_IP4_PREFIX_LEN])) ++ nfmr->ip4_prefix_len = nla_get_u8(c); ++ ++ if ((c = fmrd[IFLA_IPTUN_FMR_EA_LEN])) ++ nfmr->ea_len = nla_get_u8(c); ++ ++ if ((c = fmrd[IFLA_IPTUN_FMR_OFFSET])) ++ nfmr->offset = nla_get_u8(c); ++ ++ nfmr->next = parms->fmrs; ++ parms->fmrs = nfmr; ++ } ++ } + } + + static bool ip6_tnl_netlink_encap_parms(struct nlattr *data[], +@@ -2130,6 +2349,12 @@ static void ip6_tnl_dellink(struct net_d + + static size_t ip6_tnl_get_size(const struct net_device *dev) + { ++ const struct ip6_tnl *t = netdev_priv(dev); ++ struct __ip6_tnl_fmr *c; ++ int fmrs = 0; ++ for (c = t->parms.fmrs; c; c = c->next) ++ ++fmrs; ++ + return + /* IFLA_IPTUN_LINK */ + nla_total_size(4) + +@@ -2159,6 +2384,24 @@ static size_t ip6_tnl_get_size(const str + nla_total_size(0) + + /* IFLA_IPTUN_FWMARK */ + nla_total_size(4) + ++ /* IFLA_IPTUN_FMRS */ ++ nla_total_size(0) + ++ ( ++ /* nest */ ++ nla_total_size(0) + ++ /* IFLA_IPTUN_FMR_IP6_PREFIX */ ++ nla_total_size(sizeof(struct in6_addr)) + ++ /* IFLA_IPTUN_FMR_IP4_PREFIX */ ++ nla_total_size(sizeof(struct in_addr)) + ++ /* IFLA_IPTUN_FMR_EA_LEN */ ++ nla_total_size(1) + ++ /* IFLA_IPTUN_FMR_IP6_PREFIX_LEN */ ++ nla_total_size(1) + ++ /* IFLA_IPTUN_FMR_IP4_PREFIX_LEN */ ++ nla_total_size(1) + ++ /* IFLA_IPTUN_FMR_OFFSET */ ++ nla_total_size(1) ++ ) * fmrs + + 0; + } + +@@ -2166,6 +2409,9 @@ static int ip6_tnl_fill_info(struct sk_b + { + struct ip6_tnl *tunnel = netdev_priv(dev); + struct __ip6_tnl_parm *parm = &tunnel->parms; ++ struct __ip6_tnl_fmr *c; ++ int fmrcnt = 0; ++ struct nlattr *fmrs; + + if (nla_put_u32(skb, IFLA_IPTUN_LINK, parm->link) || + nla_put_in6_addr(skb, IFLA_IPTUN_LOCAL, &parm->laddr) || +@@ -2175,9 +2421,27 @@ static int ip6_tnl_fill_info(struct sk_b + nla_put_be32(skb, IFLA_IPTUN_FLOWINFO, parm->flowinfo) || + nla_put_u32(skb, IFLA_IPTUN_FLAGS, parm->flags) || + nla_put_u8(skb, IFLA_IPTUN_PROTO, parm->proto) || +- nla_put_u32(skb, IFLA_IPTUN_FWMARK, parm->fwmark)) ++ nla_put_u32(skb, IFLA_IPTUN_FWMARK, parm->fwmark) || ++ !(fmrs = nla_nest_start(skb, IFLA_IPTUN_FMRS))) + goto nla_put_failure; + ++ for (c = parm->fmrs; c; c = c->next) { ++ struct nlattr *fmr = nla_nest_start(skb, ++fmrcnt); ++ if (!fmr || ++ nla_put(skb, IFLA_IPTUN_FMR_IP6_PREFIX, ++ sizeof(c->ip6_prefix), &c->ip6_prefix) || ++ nla_put(skb, IFLA_IPTUN_FMR_IP4_PREFIX, ++ sizeof(c->ip4_prefix), &c->ip4_prefix) || ++ nla_put_u8(skb, IFLA_IPTUN_FMR_IP6_PREFIX_LEN, c->ip6_prefix_len) || ++ nla_put_u8(skb, IFLA_IPTUN_FMR_IP4_PREFIX_LEN, c->ip4_prefix_len) || ++ nla_put_u8(skb, IFLA_IPTUN_FMR_EA_LEN, c->ea_len) || ++ nla_put_u8(skb, IFLA_IPTUN_FMR_OFFSET, c->offset)) ++ goto nla_put_failure; ++ ++ nla_nest_end(skb, fmr); ++ } ++ nla_nest_end(skb, fmrs); ++ + if (nla_put_u16(skb, IFLA_IPTUN_ENCAP_TYPE, tunnel->encap.type) || + nla_put_be16(skb, IFLA_IPTUN_ENCAP_SPORT, tunnel->encap.sport) || + nla_put_be16(skb, IFLA_IPTUN_ENCAP_DPORT, tunnel->encap.dport) || +@@ -2217,6 +2481,7 @@ static const struct nla_policy ip6_tnl_p + [IFLA_IPTUN_ENCAP_DPORT] = { .type = NLA_U16 }, + [IFLA_IPTUN_COLLECT_METADATA] = { .type = NLA_FLAG }, + [IFLA_IPTUN_FWMARK] = { .type = NLA_U32 }, ++ [IFLA_IPTUN_FMRS] = { .type = NLA_NESTED }, + }; + + static struct rtnl_link_ops ip6_link_ops __read_mostly = { diff --git a/target/linux/generic/pending-5.15/670-ipv6-allow-rejecting-with-source-address-failed-policy.patch b/target/linux/generic/pending-5.15/670-ipv6-allow-rejecting-with-source-address-failed-policy.patch new file mode 100644 index 0000000000..d85735531b --- /dev/null +++ b/target/linux/generic/pending-5.15/670-ipv6-allow-rejecting-with-source-address-failed-policy.patch @@ -0,0 +1,263 @@ +From: Jonas Gorski <jogo@openwrt.org> +Subject: ipv6: allow rejecting with "source address failed policy" + +RFC6204 L-14 requires rejecting traffic from invalid addresses with +ICMPv6 Destination Unreachable, Code 5 (Source address failed ingress/ +egress policy) on the LAN side, so add an appropriate rule for that. + +Signed-off-by: Jonas Gorski <jogo@openwrt.org> +--- + include/net/netns/ipv6.h | 1 + + include/uapi/linux/fib_rules.h | 4 +++ + include/uapi/linux/rtnetlink.h | 1 + + net/ipv4/fib_semantics.c | 4 +++ + net/ipv4/fib_trie.c | 1 + + net/ipv4/ipmr.c | 1 + + net/ipv6/fib6_rules.c | 4 +++ + net/ipv6/ip6mr.c | 2 ++ + net/ipv6/route.c | 58 +++++++++++++++++++++++++++++++++++++++++- + 9 files changed, 75 insertions(+), 1 deletion(-) + +--- a/include/net/netns/ipv6.h ++++ b/include/net/netns/ipv6.h +@@ -88,6 +88,7 @@ struct netns_ipv6 { + unsigned int fib6_routes_require_src; + #endif + struct rt6_info *ip6_prohibit_entry; ++ struct rt6_info *ip6_policy_failed_entry; + struct rt6_info *ip6_blk_hole_entry; + struct fib6_table *fib6_local_tbl; + struct fib_rules_ops *fib6_rules_ops; +--- a/include/uapi/linux/fib_rules.h ++++ b/include/uapi/linux/fib_rules.h +@@ -82,6 +82,10 @@ enum { + FR_ACT_BLACKHOLE, /* Drop without notification */ + FR_ACT_UNREACHABLE, /* Drop with ENETUNREACH */ + FR_ACT_PROHIBIT, /* Drop with EACCES */ ++ FR_ACT_RES9, ++ FR_ACT_RES10, ++ FR_ACT_RES11, ++ FR_ACT_POLICY_FAILED, /* Drop with EACCES */ + __FR_ACT_MAX, + }; + +--- a/include/uapi/linux/rtnetlink.h ++++ b/include/uapi/linux/rtnetlink.h +@@ -249,6 +249,7 @@ enum { + RTN_THROW, /* Not in this table */ + RTN_NAT, /* Translate this address */ + RTN_XRESOLVE, /* Use external resolver */ ++ RTN_POLICY_FAILED, /* Failed ingress/egress policy */ + __RTN_MAX + }; + +--- a/net/ipv4/fib_semantics.c ++++ b/net/ipv4/fib_semantics.c +@@ -142,6 +142,10 @@ const struct fib_prop fib_props[RTN_MAX + .error = -EINVAL, + .scope = RT_SCOPE_NOWHERE, + }, ++ [RTN_POLICY_FAILED] = { ++ .error = -EACCES, ++ .scope = RT_SCOPE_UNIVERSE, ++ }, + }; + + static void rt_fibinfo_free(struct rtable __rcu **rtp) +--- a/net/ipv4/fib_trie.c ++++ b/net/ipv4/fib_trie.c +@@ -2734,6 +2734,7 @@ static const char *const rtn_type_names[ + [RTN_THROW] = "THROW", + [RTN_NAT] = "NAT", + [RTN_XRESOLVE] = "XRESOLVE", ++ [RTN_POLICY_FAILED] = "POLICY_FAILED", + }; + + static inline const char *rtn_type(char *buf, size_t len, unsigned int t) +--- a/net/ipv4/ipmr.c ++++ b/net/ipv4/ipmr.c +@@ -175,6 +175,7 @@ static int ipmr_rule_action(struct fib_r + case FR_ACT_UNREACHABLE: + return -ENETUNREACH; + case FR_ACT_PROHIBIT: ++ case FR_ACT_POLICY_FAILED: + return -EACCES; + case FR_ACT_BLACKHOLE: + default: +--- a/net/ipv6/fib6_rules.c ++++ b/net/ipv6/fib6_rules.c +@@ -220,6 +220,10 @@ static int __fib6_rule_action(struct fib + err = -EACCES; + rt = net->ipv6.ip6_prohibit_entry; + goto discard_pkt; ++ case FR_ACT_POLICY_FAILED: ++ err = -EACCES; ++ rt = net->ipv6.ip6_policy_failed_entry; ++ goto discard_pkt; + } + + tb_id = fib_rule_get_table(rule, arg); +--- a/net/ipv6/ip6mr.c ++++ b/net/ipv6/ip6mr.c +@@ -163,6 +163,8 @@ static int ip6mr_rule_action(struct fib_ + return -ENETUNREACH; + case FR_ACT_PROHIBIT: + return -EACCES; ++ case FR_ACT_POLICY_FAILED: ++ return -EACCES; + case FR_ACT_BLACKHOLE: + default: + return -EINVAL; +--- a/net/ipv6/route.c ++++ b/net/ipv6/route.c +@@ -95,6 +95,8 @@ static int ip6_pkt_discard(struct sk_bu + static int ip6_pkt_discard_out(struct net *net, struct sock *sk, struct sk_buff *skb); + static int ip6_pkt_prohibit(struct sk_buff *skb); + static int ip6_pkt_prohibit_out(struct net *net, struct sock *sk, struct sk_buff *skb); ++static int ip6_pkt_policy_failed(struct sk_buff *skb); ++static int ip6_pkt_policy_failed_out(struct net *net, struct sock *sk, struct sk_buff *skb); + static void ip6_link_failure(struct sk_buff *skb); + static void ip6_rt_update_pmtu(struct dst_entry *dst, struct sock *sk, + struct sk_buff *skb, u32 mtu, +@@ -310,6 +312,18 @@ static const struct rt6_info ip6_prohibi + .rt6i_flags = (RTF_REJECT | RTF_NONEXTHOP), + }; + ++static const struct rt6_info ip6_policy_failed_entry_template = { ++ .dst = { ++ .__refcnt = ATOMIC_INIT(1), ++ .__use = 1, ++ .obsolete = DST_OBSOLETE_FORCE_CHK, ++ .error = -EACCES, ++ .input = ip6_pkt_policy_failed, ++ .output = ip6_pkt_policy_failed_out, ++ }, ++ .rt6i_flags = (RTF_REJECT | RTF_NONEXTHOP), ++}; ++ + static const struct rt6_info ip6_blk_hole_entry_template = { + .dst = { + .__refcnt = ATOMIC_INIT(1), +@@ -1031,6 +1045,7 @@ static const int fib6_prop[RTN_MAX + 1] + [RTN_BLACKHOLE] = -EINVAL, + [RTN_UNREACHABLE] = -EHOSTUNREACH, + [RTN_PROHIBIT] = -EACCES, ++ [RTN_POLICY_FAILED] = -EACCES, + [RTN_THROW] = -EAGAIN, + [RTN_NAT] = -EINVAL, + [RTN_XRESOLVE] = -EINVAL, +@@ -1066,6 +1081,10 @@ static void ip6_rt_init_dst_reject(struc + rt->dst.output = ip6_pkt_prohibit_out; + rt->dst.input = ip6_pkt_prohibit; + break; ++ case RTN_POLICY_FAILED: ++ rt->dst.output = ip6_pkt_policy_failed_out; ++ rt->dst.input = ip6_pkt_policy_failed; ++ break; + case RTN_THROW: + case RTN_UNREACHABLE: + default: +@@ -4448,6 +4467,17 @@ static int ip6_pkt_prohibit_out(struct n + return ip6_pkt_drop(skb, ICMPV6_ADM_PROHIBITED, IPSTATS_MIB_OUTNOROUTES); + } + ++static int ip6_pkt_policy_failed(struct sk_buff *skb) ++{ ++ return ip6_pkt_drop(skb, ICMPV6_POLICY_FAIL, IPSTATS_MIB_INNOROUTES); ++} ++ ++static int ip6_pkt_policy_failed_out(struct net *net, struct sock *sk, struct sk_buff *skb) ++{ ++ skb->dev = skb_dst(skb)->dev; ++ return ip6_pkt_drop(skb, ICMPV6_POLICY_FAIL, IPSTATS_MIB_OUTNOROUTES); ++} ++ + /* + * Allocate a dst for local (unicast / anycast) address. + */ +@@ -4928,7 +4958,8 @@ static int rtm_to_fib6_config(struct sk_ + if (rtm->rtm_type == RTN_UNREACHABLE || + rtm->rtm_type == RTN_BLACKHOLE || + rtm->rtm_type == RTN_PROHIBIT || +- rtm->rtm_type == RTN_THROW) ++ rtm->rtm_type == RTN_THROW || ++ rtm->rtm_type == RTN_POLICY_FAILED) + cfg->fc_flags |= RTF_REJECT; + + if (rtm->rtm_type == RTN_LOCAL) +@@ -6127,6 +6158,8 @@ static int ip6_route_dev_notify(struct n + #ifdef CONFIG_IPV6_MULTIPLE_TABLES + net->ipv6.ip6_prohibit_entry->dst.dev = dev; + net->ipv6.ip6_prohibit_entry->rt6i_idev = in6_dev_get(dev); ++ net->ipv6.ip6_policy_failed_entry->dst.dev = dev; ++ net->ipv6.ip6_policy_failed_entry->rt6i_idev = in6_dev_get(dev); + net->ipv6.ip6_blk_hole_entry->dst.dev = dev; + net->ipv6.ip6_blk_hole_entry->rt6i_idev = in6_dev_get(dev); + #endif +@@ -6138,6 +6171,7 @@ static int ip6_route_dev_notify(struct n + in6_dev_put_clear(&net->ipv6.ip6_null_entry->rt6i_idev); + #ifdef CONFIG_IPV6_MULTIPLE_TABLES + in6_dev_put_clear(&net->ipv6.ip6_prohibit_entry->rt6i_idev); ++ in6_dev_put_clear(&net->ipv6.ip6_policy_failed_entry->rt6i_idev); + in6_dev_put_clear(&net->ipv6.ip6_blk_hole_entry->rt6i_idev); + #endif + } +@@ -6329,6 +6363,8 @@ static int __net_init ip6_route_net_init + + #ifdef CONFIG_IPV6_MULTIPLE_TABLES + net->ipv6.fib6_has_custom_rules = false; ++ ++ + net->ipv6.ip6_prohibit_entry = kmemdup(&ip6_prohibit_entry_template, + sizeof(*net->ipv6.ip6_prohibit_entry), + GFP_KERNEL); +@@ -6339,11 +6375,21 @@ static int __net_init ip6_route_net_init + ip6_template_metrics, true); + INIT_LIST_HEAD(&net->ipv6.ip6_prohibit_entry->rt6i_uncached); + ++ net->ipv6.ip6_policy_failed_entry = ++ kmemdup(&ip6_policy_failed_entry_template, ++ sizeof(*net->ipv6.ip6_policy_failed_entry), GFP_KERNEL); ++ if (!net->ipv6.ip6_policy_failed_entry) ++ goto out_ip6_prohibit_entry; ++ net->ipv6.ip6_policy_failed_entry->dst.ops = &net->ipv6.ip6_dst_ops; ++ dst_init_metrics(&net->ipv6.ip6_policy_failed_entry->dst, ++ ip6_template_metrics, true); ++ INIT_LIST_HEAD(&net->ipv6.ip6_policy_failed_entry->rt6i_uncached); ++ + net->ipv6.ip6_blk_hole_entry = kmemdup(&ip6_blk_hole_entry_template, + sizeof(*net->ipv6.ip6_blk_hole_entry), + GFP_KERNEL); + if (!net->ipv6.ip6_blk_hole_entry) +- goto out_ip6_prohibit_entry; ++ goto out_ip6_policy_failed_entry; + net->ipv6.ip6_blk_hole_entry->dst.ops = &net->ipv6.ip6_dst_ops; + dst_init_metrics(&net->ipv6.ip6_blk_hole_entry->dst, + ip6_template_metrics, true); +@@ -6370,6 +6416,8 @@ out: + return ret; + + #ifdef CONFIG_IPV6_MULTIPLE_TABLES ++out_ip6_policy_failed_entry: ++ kfree(net->ipv6.ip6_policy_failed_entry); + out_ip6_prohibit_entry: + kfree(net->ipv6.ip6_prohibit_entry); + out_ip6_null_entry: +@@ -6389,6 +6437,7 @@ static void __net_exit ip6_route_net_exi + kfree(net->ipv6.ip6_null_entry); + #ifdef CONFIG_IPV6_MULTIPLE_TABLES + kfree(net->ipv6.ip6_prohibit_entry); ++ kfree(net->ipv6.ip6_policy_failed_entry); + kfree(net->ipv6.ip6_blk_hole_entry); + #endif + dst_entries_destroy(&net->ipv6.ip6_dst_ops); +@@ -6466,6 +6515,9 @@ void __init ip6_route_init_special_entri + init_net.ipv6.ip6_prohibit_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev); + init_net.ipv6.ip6_blk_hole_entry->dst.dev = init_net.loopback_dev; + init_net.ipv6.ip6_blk_hole_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev); ++ init_net.ipv6.ip6_policy_failed_entry->dst.dev = init_net.loopback_dev; ++ init_net.ipv6.ip6_policy_failed_entry->rt6i_idev = ++ in6_dev_get(init_net.loopback_dev); + #endif + } + diff --git a/target/linux/generic/pending-5.15/671-net-provide-defines-for-_POLICY_FAILED-until-all-cod.patch b/target/linux/generic/pending-5.15/671-net-provide-defines-for-_POLICY_FAILED-until-all-cod.patch new file mode 100644 index 0000000000..0e7f35db89 --- /dev/null +++ b/target/linux/generic/pending-5.15/671-net-provide-defines-for-_POLICY_FAILED-until-all-cod.patch @@ -0,0 +1,50 @@ +From: Jonas Gorski <jogo@openwrt.org> +Subject: net: provide defines for _POLICY_FAILED until all code is updated + +Upstream introduced ICMPV6_POLICY_FAIL for code 5 of destination +unreachable, conflicting with our name. + +Add appropriate defines to allow our code to build with the new +name until we have updated our local patches for older kernels +and userspace packages. + +Signed-off-by: Jonas Gorski <jogo@openwrt.org> +--- + include/uapi/linux/fib_rules.h | 2 ++ + include/uapi/linux/icmpv6.h | 2 ++ + include/uapi/linux/rtnetlink.h | 2 ++ + 3 files changed, 6 insertions(+) + +--- a/include/uapi/linux/fib_rules.h ++++ b/include/uapi/linux/fib_rules.h +@@ -89,6 +89,8 @@ enum { + __FR_ACT_MAX, + }; + ++#define FR_ACT_FAILED_POLICY FR_ACT_POLICY_FAILED ++ + #define FR_ACT_MAX (__FR_ACT_MAX - 1) + + #endif +--- a/include/uapi/linux/icmpv6.h ++++ b/include/uapi/linux/icmpv6.h +@@ -126,6 +126,8 @@ struct icmp6hdr { + #define ICMPV6_POLICY_FAIL 5 + #define ICMPV6_REJECT_ROUTE 6 + ++#define ICMPV6_FAILED_POLICY ICMPV6_POLICY_FAIL ++ + /* + * Codes for Time Exceeded + */ +--- a/include/uapi/linux/rtnetlink.h ++++ b/include/uapi/linux/rtnetlink.h +@@ -253,6 +253,8 @@ enum { + __RTN_MAX + }; + ++#define RTN_FAILED_POLICY RTN_POLICY_FAILED ++ + #define RTN_MAX (__RTN_MAX - 1) + + diff --git a/target/linux/generic/pending-5.15/680-NET-skip-GRO-for-foreign-MAC-addresses.patch b/target/linux/generic/pending-5.15/680-NET-skip-GRO-for-foreign-MAC-addresses.patch new file mode 100644 index 0000000000..e64e0421e1 --- /dev/null +++ b/target/linux/generic/pending-5.15/680-NET-skip-GRO-for-foreign-MAC-addresses.patch @@ -0,0 +1,149 @@ +From: Felix Fietkau <nbd@nbd.name> +Subject: net: replace GRO optimization patch with a new one that supports VLANs/bridges with different MAC addresses + +Signed-off-by: Felix Fietkau <nbd@nbd.name> +--- + include/linux/netdevice.h | 2 ++ + include/linux/skbuff.h | 3 ++- + net/core/dev.c | 48 +++++++++++++++++++++++++++++++++++++++++++++++ + net/ethernet/eth.c | 18 +++++++++++++++++- + 4 files changed, 69 insertions(+), 2 deletions(-) + +--- a/include/linux/netdevice.h ++++ b/include/linux/netdevice.h +@@ -2036,6 +2036,8 @@ struct net_device { + struct netdev_hw_addr_list mc; + struct netdev_hw_addr_list dev_addrs; + ++ unsigned char local_addr_mask[MAX_ADDR_LEN]; ++ + #ifdef CONFIG_SYSFS + struct kset *queues_kset; + #endif +--- a/include/linux/skbuff.h ++++ b/include/linux/skbuff.h +@@ -858,6 +858,7 @@ struct sk_buff { + #ifdef CONFIG_TLS_DEVICE + __u8 decrypted:1; + #endif ++ __u8 gro_skip:1; + + #ifdef CONFIG_NET_SCHED + __u16 tc_index; /* traffic control index */ +--- a/net/core/dev.c ++++ b/net/core/dev.c +@@ -6062,6 +6062,9 @@ static enum gro_result dev_gro_receive(s + int same_flow; + int grow; + ++ if (skb->gro_skip) ++ goto normal; ++ + if (netif_elide_gro(skb->dev)) + goto normal; + +@@ -8039,6 +8042,48 @@ static void __netdev_adjacent_dev_unlink + &upper_dev->adj_list.lower); + } + ++static void __netdev_addr_mask(unsigned char *mask, const unsigned char *addr, ++ struct net_device *dev) ++{ ++ int i; ++ ++ for (i = 0; i < dev->addr_len; i++) ++ mask[i] |= addr[i] ^ dev->dev_addr[i]; ++} ++ ++static void __netdev_upper_mask(unsigned char *mask, struct net_device *dev, ++ struct net_device *lower) ++{ ++ struct net_device *cur; ++ struct list_head *iter; ++ ++ netdev_for_each_upper_dev_rcu(dev, cur, iter) { ++ __netdev_addr_mask(mask, cur->dev_addr, lower); ++ __netdev_upper_mask(mask, cur, lower); ++ } ++} ++ ++static void __netdev_update_addr_mask(struct net_device *dev) ++{ ++ unsigned char mask[MAX_ADDR_LEN]; ++ struct net_device *cur; ++ struct list_head *iter; ++ ++ memset(mask, 0, sizeof(mask)); ++ __netdev_upper_mask(mask, dev, dev); ++ memcpy(dev->local_addr_mask, mask, dev->addr_len); ++ ++ netdev_for_each_lower_dev(dev, cur, iter) ++ __netdev_update_addr_mask(cur); ++} ++ ++static void netdev_update_addr_mask(struct net_device *dev) ++{ ++ rcu_read_lock(); ++ __netdev_update_addr_mask(dev); ++ rcu_read_unlock(); ++} ++ + static int __netdev_upper_dev_link(struct net_device *dev, + struct net_device *upper_dev, bool master, + void *upper_priv, void *upper_info, +@@ -8090,6 +8135,7 @@ static int __netdev_upper_dev_link(struc + if (ret) + return ret; + ++ netdev_update_addr_mask(dev); + ret = call_netdevice_notifiers_info(NETDEV_CHANGEUPPER, + &changeupper_info.info); + ret = notifier_to_errno(ret); +@@ -8186,6 +8232,7 @@ static void __netdev_upper_dev_unlink(st + + __netdev_adjacent_dev_unlink_neighbour(dev, upper_dev); + ++ netdev_update_addr_mask(dev); + call_netdevice_notifiers_info(NETDEV_CHANGEUPPER, + &changeupper_info.info); + +@@ -8972,6 +9019,7 @@ int dev_set_mac_address(struct net_devic + if (err) + return err; + dev->addr_assign_type = NET_ADDR_SET; ++ netdev_update_addr_mask(dev); + call_netdevice_notifiers(NETDEV_CHANGEADDR, dev); + add_device_randomness(dev->dev_addr, dev->addr_len); + return 0; +--- a/net/ethernet/eth.c ++++ b/net/ethernet/eth.c +@@ -143,6 +143,18 @@ u32 eth_get_headlen(const struct net_dev + } + EXPORT_SYMBOL(eth_get_headlen); + ++static inline bool ++eth_check_local_mask(const void *addr1, const void *addr2, const void *mask) ++{ ++ const u16 *a1 = addr1; ++ const u16 *a2 = addr2; ++ const u16 *m = mask; ++ ++ return (((a1[0] ^ a2[0]) & ~m[0]) | ++ ((a1[1] ^ a2[1]) & ~m[1]) | ++ ((a1[2] ^ a2[2]) & ~m[2])); ++} ++ + /** + * eth_type_trans - determine the packet's protocol ID. + * @skb: received socket data +@@ -174,6 +186,10 @@ __be16 eth_type_trans(struct sk_buff *sk + } else { + skb->pkt_type = PACKET_OTHERHOST; + } ++ ++ if (eth_check_local_mask(eth->h_dest, dev->dev_addr, ++ dev->local_addr_mask)) ++ skb->gro_skip = 1; + } + + /* diff --git a/target/linux/generic/pending-5.15/682-of_net-add-mac-address-increment-support.patch b/target/linux/generic/pending-5.15/682-of_net-add-mac-address-increment-support.patch new file mode 100644 index 0000000000..82e81f37c5 --- /dev/null +++ b/target/linux/generic/pending-5.15/682-of_net-add-mac-address-increment-support.patch @@ -0,0 +1,89 @@ +From 844c273286f328acf0dab5fbd5d864366b4904dc Mon Sep 17 00:00:00 2001 +From: Ansuel Smith <ansuelsmth@gmail.com> +Date: Tue, 30 Mar 2021 18:21:14 +0200 +Subject: [PATCH] of_net: add mac-address-increment support + +Lots of embedded devices use the mac-address of other interface +extracted from nvmem cells and increments it by one or two. Add two +bindings to integrate this and directly use the right mac-address for +the interface. Some example are some routers that use the gmac +mac-address stored in the art partition and increments it by one for the +wifi. mac-address-increment-byte bindings is used to tell what byte of +the mac-address has to be increased (if not defined the last byte is +increased) and mac-address-increment tells how much the byte decided +early has to be increased. + +Signed-off-by: Ansuel Smith <ansuelsmth@gmail.com> +--- + drivers/of/of_net.c | 43 +++++++++++++++++++++++++++++++++++++++---- + 1 file changed, 39 insertions(+), 4 deletions(-) + +--- a/drivers/of/of_net.c ++++ b/drivers/of/of_net.c +@@ -115,27 +115,62 @@ static int of_get_mac_addr_nvmem(struct + * this case, the real MAC is in 'local-mac-address', and 'mac-address' exists + * but is all zeros. + * ++ * DT can tell the system to increment the mac-address after is extracted by ++ * using: ++ * - mac-address-increment-byte to decide what byte to increase ++ * (if not defined is increased the last byte) ++ * - mac-address-increment to decide how much to increase. The value WILL ++ * overflow to other bytes if the increment is over 255 or the total ++ * increment will exceed 255 of the current byte. ++ * (example 00:01:02:03:04:ff + 1 == 00:01:02:03:05:00) ++ * (example 00:01:02:03:04:fe + 5 == 00:01:02:03:05:03) ++ * + * Return: 0 on success and errno in case of error. + */ + int of_get_mac_address(struct device_node *np, u8 *addr) + { ++ u32 inc_idx, mac_inc, mac_val; + int ret; + ++ /* Check first if the increment byte is present and valid. ++ * If not set assume to increment the last byte if found. ++ */ ++ if (of_property_read_u32(np, "mac-address-increment-byte", &inc_idx)) ++ inc_idx = 5; ++ if (inc_idx < 3 || inc_idx > 5) ++ return -EINVAL; ++ + if (!np) + return -ENODEV; + + ret = of_get_mac_addr(np, "mac-address", addr); + if (!ret) +- return 0; ++ goto found; + + ret = of_get_mac_addr(np, "local-mac-address", addr); + if (!ret) +- return 0; ++ goto found; + + ret = of_get_mac_addr(np, "address", addr); + if (!ret) +- return 0; ++ goto found; ++ ++ ret = of_get_mac_addr_nvmem(np, addr); ++ if (ret) ++ return ret; ++ ++found: ++ if (!of_property_read_u32(np, "mac-address-increment", &mac_inc)) { ++ /* Convert to a contiguous value */ ++ mac_val = (addr[3] << 16) + (addr[4] << 8) + addr[5]; ++ mac_val += mac_inc << 8 * (5-inc_idx); ++ ++ /* Apply the incremented value handling overflow case */ ++ addr[3] = (mac_val >> 16) & 0xff; ++ addr[4] = (mac_val >> 8) & 0xff; ++ addr[5] = (mac_val >> 0) & 0xff; ++ } + +- return of_get_mac_addr_nvmem(np, addr); ++ return ret; + } + EXPORT_SYMBOL(of_get_mac_address); diff --git a/target/linux/generic/pending-5.15/683-of_net-add-mac-address-to-of-tree.patch b/target/linux/generic/pending-5.15/683-of_net-add-mac-address-to-of-tree.patch new file mode 100644 index 0000000000..03cd763d9d --- /dev/null +++ b/target/linux/generic/pending-5.15/683-of_net-add-mac-address-to-of-tree.patch @@ -0,0 +1,38 @@ +--- a/drivers/of/of_net.c ++++ b/drivers/of/of_net.c +@@ -95,6 +95,27 @@ static int of_get_mac_addr_nvmem(struct + return 0; + } + ++static int of_add_mac_address(struct device_node *np, u8* addr) ++{ ++ struct property *prop; ++ ++ prop = kzalloc(sizeof(*prop), GFP_KERNEL); ++ if (!prop) ++ return -ENOMEM; ++ ++ prop->name = "mac-address"; ++ prop->length = ETH_ALEN; ++ prop->value = kmemdup(addr, ETH_ALEN, GFP_KERNEL); ++ if (!prop->value || of_update_property(np, prop)) ++ goto free; ++ ++ return 0; ++free: ++ kfree(prop->value); ++ kfree(prop); ++ return -ENOMEM; ++} ++ + /** + * Search the device tree for the best MAC address to use. 'mac-address' is + * checked first, because that is supposed to contain to "most recent" MAC +@@ -171,6 +192,7 @@ found: + addr[5] = (mac_val >> 0) & 0xff; + } + ++ of_add_mac_address(np, addr); + return ret; + } + EXPORT_SYMBOL(of_get_mac_address); diff --git a/target/linux/generic/pending-5.15/700-net-ethernet-mtk_eth_soc-avoid-creating-duplicate-of.patch b/target/linux/generic/pending-5.15/700-net-ethernet-mtk_eth_soc-avoid-creating-duplicate-of.patch new file mode 100644 index 0000000000..ff090f07f1 --- /dev/null +++ b/target/linux/generic/pending-5.15/700-net-ethernet-mtk_eth_soc-avoid-creating-duplicate-of.patch @@ -0,0 +1,26 @@ +From: Felix Fietkau <nbd@nbd.name> +Date: Thu, 8 Jul 2021 07:08:29 +0200 +Subject: [PATCH] net: ethernet: mtk_eth_soc: avoid creating duplicate offload + entries + +Sometimes multiple CLS_REPLACE calls are issued for the same connection. +rhashtable_insert_fast does not check for these duplicates, so multiple +hardware flow entries can be created. +Fix this by checking for an existing entry early + +Fixes: 502e84e2382d ("net: ethernet: mtk_eth_soc: add flow offloading support") +Signed-off-by: Felix Fietkau <nbd@nbd.name> +--- + +--- a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c ++++ b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c +@@ -187,6 +187,9 @@ mtk_flow_offload_replace(struct mtk_eth + int hash; + int i; + ++ if (rhashtable_lookup(ð->flow_table, &f->cookie, mtk_flow_ht_params)) ++ return -EEXIST; ++ + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_META)) { + struct flow_match_meta match; + diff --git a/target/linux/generic/pending-5.15/701-net-ethernet-mtk_eth_soc-add-ipv6-flow-offloading-support.patch b/target/linux/generic/pending-5.15/701-net-ethernet-mtk_eth_soc-add-ipv6-flow-offloading-support.patch new file mode 100644 index 0000000000..7140c6877e --- /dev/null +++ b/target/linux/generic/pending-5.15/701-net-ethernet-mtk_eth_soc-add-ipv6-flow-offloading-support.patch @@ -0,0 +1,65 @@ +--- a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c ++++ b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c +@@ -7,6 +7,7 @@ + #include <linux/rhashtable.h> + #include <linux/if_ether.h> + #include <linux/ip.h> ++#include <linux/ipv6.h> + #include <net/flow_offload.h> + #include <net/pkt_cls.h> + #include <net/dsa.h> +@@ -20,6 +21,11 @@ struct mtk_flow_data { + __be32 src_addr; + __be32 dst_addr; + } v4; ++ ++ struct { ++ struct in6_addr src_addr; ++ struct in6_addr dst_addr; ++ } v6; + }; + + __be16 src_port; +@@ -64,6 +70,14 @@ mtk_flow_set_ipv4_addr(struct mtk_foe_en + data->v4.dst_addr, data->dst_port); + } + ++static int ++mtk_flow_set_ipv6_addr(struct mtk_foe_entry *foe, struct mtk_flow_data *data) ++{ ++ return mtk_foe_entry_set_ipv6_tuple(foe, ++ data->v6.src_addr.s6_addr32, data->src_port, ++ data->v6.dst_addr.s6_addr32, data->dst_port); ++} ++ + static void + mtk_flow_offload_mangle_eth(const struct flow_action_entry *act, void *eth) + { +@@ -254,6 +268,9 @@ mtk_flow_offload_replace(struct mtk_eth + case FLOW_DISSECTOR_KEY_IPV4_ADDRS: + offload_type = MTK_PPE_PKT_TYPE_IPV4_HNAPT; + break; ++ case FLOW_DISSECTOR_KEY_IPV6_ADDRS: ++ offload_type = MTK_PPE_PKT_TYPE_IPV6_ROUTE_5T; ++ break; + default: + return -EOPNOTSUPP; + } +@@ -289,6 +306,17 @@ mtk_flow_offload_replace(struct mtk_eth + mtk_flow_set_ipv4_addr(&foe, &data, false); + } + ++ if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) { ++ struct flow_match_ipv6_addrs addrs; ++ ++ flow_rule_match_ipv6_addrs(rule, &addrs); ++ ++ data.v6.src_addr = addrs.key->src; ++ data.v6.dst_addr = addrs.key->dst; ++ ++ mtk_flow_set_ipv6_addr(&foe, &data); ++ } ++ + flow_action_for_each(i, act, &rule->action) { + if (act->id != FLOW_ACTION_MANGLE) + continue; diff --git a/target/linux/generic/pending-5.15/702-net-ethernet-mtk_eth_soc-enable-threaded-NAPI.patch b/target/linux/generic/pending-5.15/702-net-ethernet-mtk_eth_soc-enable-threaded-NAPI.patch new file mode 100644 index 0000000000..da9c040322 --- /dev/null +++ b/target/linux/generic/pending-5.15/702-net-ethernet-mtk_eth_soc-enable-threaded-NAPI.patch @@ -0,0 +1,41 @@ +From: Felix Fietkau <nbd@nbd.name> +Date: Mon, 21 Mar 2022 20:39:59 +0100 +Subject: [PATCH] net: ethernet: mtk_eth_soc: enable threaded NAPI + +This can improve performance under load by ensuring that NAPI processing is +not pinned on CPU 0. + +Signed-off-by: Felix Fietkau <nbd@nbd.name> +--- + +--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c ++++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c +@@ -2171,8 +2171,8 @@ static irqreturn_t mtk_handle_irq_rx(int + + eth->rx_events++; + if (likely(napi_schedule_prep(ð->rx_napi))) { +- __napi_schedule(ð->rx_napi); + mtk_rx_irq_disable(eth, MTK_RX_DONE_INT); ++ __napi_schedule(ð->rx_napi); + } + + return IRQ_HANDLED; +@@ -2184,8 +2184,8 @@ static irqreturn_t mtk_handle_irq_tx(int + + eth->tx_events++; + if (likely(napi_schedule_prep(ð->tx_napi))) { +- __napi_schedule(ð->tx_napi); + mtk_tx_irq_disable(eth, MTK_TX_DONE_INT); ++ __napi_schedule(ð->tx_napi); + } + + return IRQ_HANDLED; +@@ -3229,6 +3229,8 @@ static int mtk_probe(struct platform_dev + * for NAPI to work + */ + init_dummy_netdev(ð->dummy_dev); ++ eth->dummy_dev.threaded = 1; ++ strcpy(eth->dummy_dev.name, "mtk_eth"); + netif_napi_add(ð->dummy_dev, ð->tx_napi, mtk_napi_tx, + MTK_NAPI_WEIGHT); + netif_napi_add(ð->dummy_dev, ð->rx_napi, mtk_napi_rx, diff --git a/target/linux/generic/pending-5.15/703-phy-add-detach-callback-to-struct-phy_driver.patch b/target/linux/generic/pending-5.15/703-phy-add-detach-callback-to-struct-phy_driver.patch new file mode 100644 index 0000000000..52350ecccc --- /dev/null +++ b/target/linux/generic/pending-5.15/703-phy-add-detach-callback-to-struct-phy_driver.patch @@ -0,0 +1,38 @@ +From: Gabor Juhos <juhosg@openwrt.org> +Subject: generic: add detach callback to struct phy_driver + +lede-commit: fe61fc2d7d0b3fb348b502f68f98243b3ddf5867 + +Signed-off-by: Gabor Juhos <juhosg@openwrt.org> +--- + drivers/net/phy/phy_device.c | 3 +++ + include/linux/phy.h | 6 ++++++ + 2 files changed, 9 insertions(+) + +--- a/drivers/net/phy/phy_device.c ++++ b/drivers/net/phy/phy_device.c +@@ -1651,6 +1651,9 @@ void phy_detach(struct phy_device *phyde + struct module *ndev_owner = NULL; + struct mii_bus *bus; + ++ if (phydev->drv && phydev->drv->detach) ++ phydev->drv->detach(phydev); ++ + if (phydev->sysfs_links) { + if (dev) + sysfs_remove_link(&dev->dev.kobj, "phydev"); +--- a/include/linux/phy.h ++++ b/include/linux/phy.h +@@ -765,6 +765,12 @@ struct phy_driver { + /** @handle_interrupt: Override default interrupt handling */ + irqreturn_t (*handle_interrupt)(struct phy_device *phydev); + ++ /* ++ * Called before an ethernet device is detached ++ * from the PHY. ++ */ ++ void (*detach)(struct phy_device *phydev); ++ + /** @remove: Clears up any memory if needed */ + void (*remove)(struct phy_device *phydev); + diff --git a/target/linux/generic/pending-5.15/710-bridge-add-knob-for-filtering-rx-tx-BPDU-pack.patch b/target/linux/generic/pending-5.15/710-bridge-add-knob-for-filtering-rx-tx-BPDU-pack.patch new file mode 100644 index 0000000000..ecd6e9e7a2 --- /dev/null +++ b/target/linux/generic/pending-5.15/710-bridge-add-knob-for-filtering-rx-tx-BPDU-pack.patch @@ -0,0 +1,177 @@ +From: Felix Fietkau <nbd@nbd.name> +Date: Fri, 27 Aug 2021 12:22:32 +0200 +Subject: [PATCH] bridge: add knob for filtering rx/tx BPDU packets on a port + +Some devices (e.g. wireless APs) can't have devices behind them be part of +a bridge topology with redundant links, due to address limitations. +Additionally, broadcast traffic on these devices is somewhat expensive, due to +the low data rate and wakeups of clients in powersave mode. +This knob can be used to ensure that BPDU packets are never sent or forwarded +to/from these devices + +Signed-off-by: Felix Fietkau <nbd@nbd.name> +--- + +--- a/include/linux/if_bridge.h ++++ b/include/linux/if_bridge.h +@@ -56,6 +56,7 @@ struct br_ip_list { + #define BR_MRP_AWARE BIT(17) + #define BR_MRP_LOST_CONT BIT(18) + #define BR_MRP_LOST_IN_CONT BIT(19) ++#define BR_BPDU_FILTER BIT(20) + + #define BR_DEFAULT_AGEING_TIME (300 * HZ) + +--- a/net/bridge/br_forward.c ++++ b/net/bridge/br_forward.c +@@ -191,6 +191,7 @@ out: + void br_flood(struct net_bridge *br, struct sk_buff *skb, + enum br_pkt_type pkt_type, bool local_rcv, bool local_orig) + { ++ const unsigned char *dest = eth_hdr(skb)->h_dest; + struct net_bridge_port *prev = NULL; + struct net_bridge_port *p; + +@@ -206,6 +207,10 @@ void br_flood(struct net_bridge *br, str + case BR_PKT_MULTICAST: + if (!(p->flags & BR_MCAST_FLOOD) && skb->dev != br->dev) + continue; ++ if ((p->flags & BR_BPDU_FILTER) && ++ unlikely(is_link_local_ether_addr(dest) && ++ dest[5] == 0)) ++ continue; + break; + case BR_PKT_BROADCAST: + if (!(p->flags & BR_BCAST_FLOOD) && skb->dev != br->dev) +--- a/net/bridge/br_input.c ++++ b/net/bridge/br_input.c +@@ -305,6 +305,8 @@ static rx_handler_result_t br_handle_fra + fwd_mask |= p->group_fwd_mask; + switch (dest[5]) { + case 0x00: /* Bridge Group Address */ ++ if (p->flags & BR_BPDU_FILTER) ++ goto drop; + /* If STP is turned off, + then must forward to keep loop detection */ + if (p->br->stp_enabled == BR_NO_STP || +--- a/net/bridge/br_sysfs_if.c ++++ b/net/bridge/br_sysfs_if.c +@@ -233,6 +233,7 @@ BRPORT_ATTR_FLAG(multicast_flood, BR_MCA + BRPORT_ATTR_FLAG(broadcast_flood, BR_BCAST_FLOOD); + BRPORT_ATTR_FLAG(neigh_suppress, BR_NEIGH_SUPPRESS); + BRPORT_ATTR_FLAG(isolated, BR_ISOLATED); ++BRPORT_ATTR_FLAG(bpdu_filter, BR_BPDU_FILTER); + + #ifdef CONFIG_BRIDGE_IGMP_SNOOPING + static ssize_t show_multicast_router(struct net_bridge_port *p, char *buf) +@@ -285,6 +286,7 @@ static const struct brport_attribute *br + &brport_attr_group_fwd_mask, + &brport_attr_neigh_suppress, + &brport_attr_isolated, ++ &brport_attr_bpdu_filter, + &brport_attr_backup_port, + NULL + }; +--- a/net/bridge/br_stp_bpdu.c ++++ b/net/bridge/br_stp_bpdu.c +@@ -80,7 +80,8 @@ void br_send_config_bpdu(struct net_brid + { + unsigned char buf[35]; + +- if (p->br->stp_enabled != BR_KERNEL_STP) ++ if (p->br->stp_enabled != BR_KERNEL_STP || ++ (p->flags & BR_BPDU_FILTER)) + return; + + buf[0] = 0; +@@ -127,7 +128,8 @@ void br_send_tcn_bpdu(struct net_bridge_ + { + unsigned char buf[4]; + +- if (p->br->stp_enabled != BR_KERNEL_STP) ++ if (p->br->stp_enabled != BR_KERNEL_STP || ++ (p->flags & BR_BPDU_FILTER)) + return; + + buf[0] = 0; +@@ -172,6 +174,9 @@ void br_stp_rcv(const struct stp_proto * + if (!(br->dev->flags & IFF_UP)) + goto out; + ++ if (p->flags & BR_BPDU_FILTER) ++ goto out; ++ + if (p->state == BR_STATE_DISABLED) + goto out; + +--- a/include/uapi/linux/if_link.h ++++ b/include/uapi/linux/if_link.h +@@ -524,6 +524,7 @@ enum { + IFLA_BRPORT_BACKUP_PORT, + IFLA_BRPORT_MRP_RING_OPEN, + IFLA_BRPORT_MRP_IN_OPEN, ++ IFLA_BRPORT_BPDU_FILTER, + __IFLA_BRPORT_MAX + }; + #define IFLA_BRPORT_MAX (__IFLA_BRPORT_MAX - 1) +--- a/net/bridge/br_netlink.c ++++ b/net/bridge/br_netlink.c +@@ -137,6 +137,7 @@ static inline size_t br_port_info_size(v + + nla_total_size(1) /* IFLA_BRPORT_VLAN_TUNNEL */ + + nla_total_size(1) /* IFLA_BRPORT_NEIGH_SUPPRESS */ + + nla_total_size(1) /* IFLA_BRPORT_ISOLATED */ ++ + nla_total_size(1) /* IFLA_BRPORT_BPDU_FILTER */ + + nla_total_size(sizeof(struct ifla_bridge_id)) /* IFLA_BRPORT_ROOT_ID */ + + nla_total_size(sizeof(struct ifla_bridge_id)) /* IFLA_BRPORT_BRIDGE_ID */ + + nla_total_size(sizeof(u16)) /* IFLA_BRPORT_DESIGNATED_PORT */ +@@ -220,7 +221,8 @@ static int br_port_fill_attrs(struct sk_ + BR_MRP_LOST_CONT)) || + nla_put_u8(skb, IFLA_BRPORT_MRP_IN_OPEN, + !!(p->flags & BR_MRP_LOST_IN_CONT)) || +- nla_put_u8(skb, IFLA_BRPORT_ISOLATED, !!(p->flags & BR_ISOLATED))) ++ nla_put_u8(skb, IFLA_BRPORT_ISOLATED, !!(p->flags & BR_ISOLATED)) || ++ nla_put_u8(skb, IFLA_BRPORT_BPDU_FILTER, !!(p->flags & BR_BPDU_FILTER))) + return -EMSGSIZE; + + timerval = br_timer_value(&p->message_age_timer); +@@ -728,6 +730,7 @@ static const struct nla_policy br_port_p + [IFLA_BRPORT_NEIGH_SUPPRESS] = { .type = NLA_U8 }, + [IFLA_BRPORT_ISOLATED] = { .type = NLA_U8 }, + [IFLA_BRPORT_BACKUP_PORT] = { .type = NLA_U32 }, ++ [IFLA_BRPORT_BPDU_FILTER] = { .type = NLA_U8 }, + }; + + /* Change the state of the port and notify spanning tree */ +@@ -826,6 +829,10 @@ static int br_setport(struct net_bridge_ + if (err) + return err; + ++ err = br_set_port_flag(p, tb, IFLA_BRPORT_BPDU_FILTER, BR_BPDU_FILTER); ++ if (err) ++ return err; ++ + br_vlan_tunnel_old = (p->flags & BR_VLAN_TUNNEL) ? true : false; + err = br_set_port_flag(p, tb, IFLA_BRPORT_VLAN_TUNNEL, BR_VLAN_TUNNEL); + if (err) +--- a/net/core/rtnetlink.c ++++ b/net/core/rtnetlink.c +@@ -55,7 +55,7 @@ + #include <net/net_namespace.h> + + #define RTNL_MAX_TYPE 50 +-#define RTNL_SLAVE_MAX_TYPE 36 ++#define RTNL_SLAVE_MAX_TYPE 37 + + struct rtnl_link { + rtnl_doit_func doit; +@@ -4684,7 +4684,9 @@ int ndo_dflt_bridge_getlink(struct sk_bu + brport_nla_put_flag(skb, flags, mask, + IFLA_BRPORT_MCAST_FLOOD, BR_MCAST_FLOOD) || + brport_nla_put_flag(skb, flags, mask, +- IFLA_BRPORT_BCAST_FLOOD, BR_BCAST_FLOOD)) { ++ IFLA_BRPORT_BCAST_FLOOD, BR_BCAST_FLOOD) || ++ brport_nla_put_flag(skb, flags, mask, ++ IFLA_BRPORT_BPDU_FILTER, BR_BPDU_FILTER)) { + nla_nest_cancel(skb, protinfo); + goto nla_put_failure; + } diff --git a/target/linux/generic/pending-5.15/730-net-phy-at803x-fix-feature-detection.patch b/target/linux/generic/pending-5.15/730-net-phy-at803x-fix-feature-detection.patch new file mode 100644 index 0000000000..c49bb19601 --- /dev/null +++ b/target/linux/generic/pending-5.15/730-net-phy-at803x-fix-feature-detection.patch @@ -0,0 +1,66 @@ +From 97ca310aa18a93329ef5cd68c20de89761962f45 Mon Sep 17 00:00:00 2001 +From: David Bauer <mail@david-bauer.net> +Date: Sun, 13 Jun 2021 12:19:36 +0200 +Subject: [PATCH] net: phy: at803x: fix feature detection + +AR8031/AR8033 have different status registers for copper +and fiber operation. However, the extended status register +is the same for both operation modes. + +As a result of that, ESTATUS_1000_XFULL is set to 1 even when +operating in copper TP mode. + +Remove this mode from the supported link modes, as this driver +currently only supports copper operation. + +Signed-off-by: David Bauer <mail@david-bauer.net> +--- + drivers/net/phy/at803x.c | 30 +++++++++++++++++++++++++++++- + 1 file changed, 29 insertions(+), 1 deletion(-) + +--- a/drivers/net/phy/at803x.c ++++ b/drivers/net/phy/at803x.c +@@ -1032,6 +1032,34 @@ static int at803x_set_tunable(struct phy + } + } + ++static int at803x_get_features(struct phy_device *phydev) ++{ ++ int err; ++ ++ err = genphy_read_abilities(phydev); ++ if (err) ++ return err; ++ ++ if (!at803x_match_phy_id(phydev, ATH8031_PHY_ID)) ++ return 0; ++ ++ /* AR8031/AR8033 have different status registers ++ * for copper and fiber operation. However, the ++ * extended status register is the same for both ++ * operation modes. ++ * ++ * As a result of that, ESTATUS_1000_XFULL is set ++ * to 1 even when operating in copper TP mode. ++ * ++ * Remove this mode from the supported link modes, ++ * as this driver currently only supports copper ++ * operation. ++ */ ++ linkmode_clear_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT, ++ phydev->supported); ++ return 0; ++} ++ + static int at803x_cable_test_result_trans(u16 status) + { + switch (FIELD_GET(AT803X_CDT_STATUS_STAT_MASK, status)) { +@@ -1364,7 +1392,7 @@ static struct phy_driver at803x_driver[] + .resume = at803x_resume, + .read_page = at803x_read_page, + .write_page = at803x_write_page, +- /* PHY_GBIT_FEATURES */ ++ .get_features = at803x_get_features, + .read_status = at803x_read_status, + .aneg_done = at803x_aneg_done, + .ack_interrupt = &at803x_ack_interrupt, diff --git a/target/linux/generic/pending-5.15/760-net-dsa-mv88e6xxx-fix-vlan-setup.patch b/target/linux/generic/pending-5.15/760-net-dsa-mv88e6xxx-fix-vlan-setup.patch new file mode 100644 index 0000000000..16be95ea38 --- /dev/null +++ b/target/linux/generic/pending-5.15/760-net-dsa-mv88e6xxx-fix-vlan-setup.patch @@ -0,0 +1,27 @@ +From a1b291f3f6c80a6c5ccad7283fc472d77a2a4763 Mon Sep 17 00:00:00 2001 +From: Russell King <rmk+kernel@armlinux.org.uk> +Date: Sun, 22 Dec 2019 12:40:11 +0000 +Subject: [PATCH] net: dsa: mv88e6xxx: fix vlan setup + +Provide an option that drivers can set to indicate they want to receive +vlan configuration even when vlan filtering is disabled. This is safe +for Marvell DSA bridges, which do not look up ingress traffic in the +VTU if the port is in 8021Q disabled state. Whether this change is +suitable for all DSA bridges is not known. + +Signed-off-by: Russell King <rmk+kernel@armlinux.org.uk> +Signed-off-by: DENG Qingfang <dqfext@gmail.com> +--- + drivers/net/dsa/mv88e6xxx/chip.c | 1 + + 1 file changed, 1 insertion(+) + +--- a/drivers/net/dsa/mv88e6xxx/chip.c ++++ b/drivers/net/dsa/mv88e6xxx/chip.c +@@ -2869,6 +2869,7 @@ static int mv88e6xxx_setup(struct dsa_sw + + chip->ds = ds; + ds->slave_mii_bus = mv88e6xxx_default_mdio_bus(chip); ++ ds->configure_vlan_while_not_filtering = true; + + mv88e6xxx_reg_lock(chip); + diff --git a/target/linux/generic/pending-5.15/762-net-bridge-switchdev-Refactor-br_switchdev_fdb_notif.patch b/target/linux/generic/pending-5.15/762-net-bridge-switchdev-Refactor-br_switchdev_fdb_notif.patch new file mode 100644 index 0000000000..fbc8342b0e --- /dev/null +++ b/target/linux/generic/pending-5.15/762-net-bridge-switchdev-Refactor-br_switchdev_fdb_notif.patch @@ -0,0 +1,77 @@ +From 46fe6cecb296d850c1ee2b333e57093ac4b733f3 Mon Sep 17 00:00:00 2001 +From: Tobias Waldekranz <tobias@waldekranz.com> +Date: Sat, 16 Jan 2021 02:25:09 +0100 +Subject: [PATCH] net: bridge: switchdev: Refactor br_switchdev_fdb_notify + +Instead of having to add more and more arguments to +br_switchdev_fdb_call_notifiers, get rid of it and build the info +struct directly in br_switchdev_fdb_notify. + +Signed-off-by: Tobias Waldekranz <tobias@waldekranz.com> +Reviewed-by: Vladimir Oltean <olteanv@gmail.com> +--- + net/bridge/br_switchdev.c | 41 +++++++++++---------------------------- + 1 file changed, 11 insertions(+), 30 deletions(-) + +--- a/net/bridge/br_switchdev.c ++++ b/net/bridge/br_switchdev.c +@@ -102,25 +102,16 @@ int br_switchdev_set_port_flag(struct ne + return 0; + } + +-static void +-br_switchdev_fdb_call_notifiers(bool adding, const unsigned char *mac, +- u16 vid, struct net_device *dev, +- bool added_by_user, bool offloaded) +-{ +- struct switchdev_notifier_fdb_info info; +- unsigned long notifier_type; +- +- info.addr = mac; +- info.vid = vid; +- info.added_by_user = added_by_user; +- info.offloaded = offloaded; +- notifier_type = adding ? SWITCHDEV_FDB_ADD_TO_DEVICE : SWITCHDEV_FDB_DEL_TO_DEVICE; +- call_switchdev_notifiers(notifier_type, dev, &info.info, NULL); +-} +- + void + br_switchdev_fdb_notify(const struct net_bridge_fdb_entry *fdb, int type) + { ++ struct switchdev_notifier_fdb_info info = { ++ .addr = fdb->key.addr.addr, ++ .vid = fdb->key.vlan_id, ++ .added_by_user = test_bit(BR_FDB_ADDED_BY_USER, &fdb->flags), ++ .offloaded = test_bit(BR_FDB_OFFLOADED, &fdb->flags), ++ }; ++ + if (!fdb->dst) + return; + if (test_bit(BR_FDB_LOCAL, &fdb->flags)) +@@ -128,22 +119,12 @@ br_switchdev_fdb_notify(const struct net + + switch (type) { + case RTM_DELNEIGH: +- br_switchdev_fdb_call_notifiers(false, fdb->key.addr.addr, +- fdb->key.vlan_id, +- fdb->dst->dev, +- test_bit(BR_FDB_ADDED_BY_USER, +- &fdb->flags), +- test_bit(BR_FDB_OFFLOADED, +- &fdb->flags)); ++ call_switchdev_notifiers(SWITCHDEV_FDB_DEL_TO_DEVICE, ++ fdb->dst->dev, &info.info, NULL); + break; + case RTM_NEWNEIGH: +- br_switchdev_fdb_call_notifiers(true, fdb->key.addr.addr, +- fdb->key.vlan_id, +- fdb->dst->dev, +- test_bit(BR_FDB_ADDED_BY_USER, +- &fdb->flags), +- test_bit(BR_FDB_OFFLOADED, +- &fdb->flags)); ++ call_switchdev_notifiers(SWITCHDEV_FDB_ADD_TO_DEVICE, ++ fdb->dst->dev, &info.info, NULL); + break; + } + } diff --git a/target/linux/generic/pending-5.15/763-net-bridge-switchdev-Include-local-flag-in-FDB-notif.patch b/target/linux/generic/pending-5.15/763-net-bridge-switchdev-Include-local-flag-in-FDB-notif.patch new file mode 100644 index 0000000000..41374c88df --- /dev/null +++ b/target/linux/generic/pending-5.15/763-net-bridge-switchdev-Include-local-flag-in-FDB-notif.patch @@ -0,0 +1,42 @@ +From ec5be4f79026282925ae383caa431a8d41e3456a Mon Sep 17 00:00:00 2001 +From: Tobias Waldekranz <tobias@waldekranz.com> +Date: Sat, 16 Jan 2021 02:25:10 +0100 +Subject: [PATCH] net: bridge: switchdev: Include local flag in FDB + notifications + +Some switchdev drivers, notably DSA, ignore all dynamically learned +address notifications (!added_by_user) as these are autonomously added +by the switch. Previously, such a notification was indistinguishable +from a local address notification. Include a local bit in the +notification so that the two classes can be discriminated. + +This allows DSA-like devices to add local addresses to the hardware +FDB (with the CPU as the destination), thereby avoiding flows towards +the CPU being flooded by the switch as unknown unicast. + +Signed-off-by: Tobias Waldekranz <tobias@waldekranz.com> +--- + include/net/switchdev.h | 1 + + net/bridge/br_switchdev.c | 1 + + 2 files changed, 2 insertions(+) + +--- a/include/net/switchdev.h ++++ b/include/net/switchdev.h +@@ -224,6 +224,7 @@ struct switchdev_notifier_fdb_info { + const unsigned char *addr; + u16 vid; + u8 added_by_user:1, ++ local:1, + offloaded:1; + }; + +--- a/net/bridge/br_switchdev.c ++++ b/net/bridge/br_switchdev.c +@@ -109,6 +109,7 @@ br_switchdev_fdb_notify(const struct net + .addr = fdb->key.addr.addr, + .vid = fdb->key.vlan_id, + .added_by_user = test_bit(BR_FDB_ADDED_BY_USER, &fdb->flags), ++ .local = test_bit(BR_FDB_LOCAL, &fdb->flags), + .offloaded = test_bit(BR_FDB_OFFLOADED, &fdb->flags), + }; + diff --git a/target/linux/generic/pending-5.15/764-net-bridge-switchdev-Send-FDB-notifications-for-host.patch b/target/linux/generic/pending-5.15/764-net-bridge-switchdev-Send-FDB-notifications-for-host.patch new file mode 100644 index 0000000000..630e03bbfd --- /dev/null +++ b/target/linux/generic/pending-5.15/764-net-bridge-switchdev-Send-FDB-notifications-for-host.patch @@ -0,0 +1,96 @@ +From 2e50fd9322047253c327550b4485cf8761035a8c Mon Sep 17 00:00:00 2001 +From: Tobias Waldekranz <tobias@waldekranz.com> +Date: Sat, 16 Jan 2021 02:25:11 +0100 +Subject: [PATCH] net: bridge: switchdev: Send FDB notifications for host + addresses + +Treat addresses added to the bridge itself in the same way as regular +ports and send out a notification so that drivers may sync it down to +the hardware FDB. + +Signed-off-by: Tobias Waldekranz <tobias@waldekranz.com> +--- + net/bridge/br_fdb.c | 4 ++-- + net/bridge/br_private.h | 7 ++++--- + net/bridge/br_switchdev.c | 11 +++++------ + 3 files changed, 11 insertions(+), 11 deletions(-) + +--- a/net/bridge/br_fdb.c ++++ b/net/bridge/br_fdb.c +@@ -602,7 +602,7 @@ void br_fdb_update(struct net_bridge *br + /* fastpath: update of existing entry */ + if (unlikely(source != fdb->dst && + !test_bit(BR_FDB_STICKY, &fdb->flags))) { +- br_switchdev_fdb_notify(fdb, RTM_DELNEIGH); ++ br_switchdev_fdb_notify(br, fdb, RTM_DELNEIGH); + fdb->dst = source; + fdb_modified = true; + /* Take over HW learned entry */ +@@ -735,7 +735,7 @@ static void fdb_notify(struct net_bridge + int err = -ENOBUFS; + + if (swdev_notify) +- br_switchdev_fdb_notify(fdb, type); ++ br_switchdev_fdb_notify(br, fdb, type); + + skb = nlmsg_new(fdb_nlmsg_size(), GFP_ATOMIC); + if (skb == NULL) +--- a/net/bridge/br_private.h ++++ b/net/bridge/br_private.h +@@ -1525,8 +1525,8 @@ bool nbp_switchdev_allowed_egress(const + int br_switchdev_set_port_flag(struct net_bridge_port *p, + unsigned long flags, + unsigned long mask); +-void br_switchdev_fdb_notify(const struct net_bridge_fdb_entry *fdb, +- int type); ++void br_switchdev_fdb_notify(struct net_bridge *br, ++ const struct net_bridge_fdb_entry *fdb, int type); + int br_switchdev_port_vlan_add(struct net_device *dev, u16 vid, u16 flags, + struct netlink_ext_ack *extack); + int br_switchdev_port_vlan_del(struct net_device *dev, u16 vid); +@@ -1572,7 +1572,8 @@ static inline int br_switchdev_port_vlan + } + + static inline void +-br_switchdev_fdb_notify(const struct net_bridge_fdb_entry *fdb, int type) ++br_switchdev_fdb_notify(struct net_bridge *br, ++ const struct net_bridge_fdb_entry *fdb, int type) + { + } + +--- a/net/bridge/br_switchdev.c ++++ b/net/bridge/br_switchdev.c +@@ -103,7 +103,8 @@ int br_switchdev_set_port_flag(struct ne + } + + void +-br_switchdev_fdb_notify(const struct net_bridge_fdb_entry *fdb, int type) ++br_switchdev_fdb_notify(struct net_bridge *br, ++ const struct net_bridge_fdb_entry *fdb, int type) + { + struct switchdev_notifier_fdb_info info = { + .addr = fdb->key.addr.addr, +@@ -112,20 +113,19 @@ br_switchdev_fdb_notify(const struct net + .local = test_bit(BR_FDB_LOCAL, &fdb->flags), + .offloaded = test_bit(BR_FDB_OFFLOADED, &fdb->flags), + }; ++ struct net_device *dev = fdb->dst ? fdb->dst->dev : br->dev; + +- if (!fdb->dst) +- return; + if (test_bit(BR_FDB_LOCAL, &fdb->flags)) + return; + + switch (type) { + case RTM_DELNEIGH: + call_switchdev_notifiers(SWITCHDEV_FDB_DEL_TO_DEVICE, +- fdb->dst->dev, &info.info, NULL); ++ dev, &info.info, NULL); + break; + case RTM_NEWNEIGH: + call_switchdev_notifiers(SWITCHDEV_FDB_ADD_TO_DEVICE, +- fdb->dst->dev, &info.info, NULL); ++ dev, &info.info, NULL); + break; + } + } diff --git a/target/linux/generic/pending-5.15/765-net-dsa-Include-local-addresses-in-assisted-CPU-port.patch b/target/linux/generic/pending-5.15/765-net-dsa-Include-local-addresses-in-assisted-CPU-port.patch new file mode 100644 index 0000000000..c0b4fd1d14 --- /dev/null +++ b/target/linux/generic/pending-5.15/765-net-dsa-Include-local-addresses-in-assisted-CPU-port.patch @@ -0,0 +1,36 @@ +From dd082716b43a3684b2f473ae5d1e76d1c076d86d Mon Sep 17 00:00:00 2001 +From: Tobias Waldekranz <tobias@waldekranz.com> +Date: Sat, 16 Jan 2021 02:25:12 +0100 +Subject: [PATCH] net: dsa: Include local addresses in assisted CPU port + learning + +Add local addresses (i.e. the ports' MAC addresses) to the hardware +FDB when assisted CPU port learning is enabled. + +NOTE: The bridge's own MAC address is also "local". If that address is +not shared with any port, the bridge's MAC is not be added by this +functionality - but the following commit takes care of that case. + +Signed-off-by: Tobias Waldekranz <tobias@waldekranz.com> +--- + net/dsa/slave.c | 8 +++++--- + 1 file changed, 5 insertions(+), 3 deletions(-) + +--- a/net/dsa/slave.c ++++ b/net/dsa/slave.c +@@ -2192,10 +2192,12 @@ static int dsa_slave_switchdev_event(str + fdb_info = ptr; + + if (dsa_slave_dev_check(dev)) { +- if (!fdb_info->added_by_user) +- return NOTIFY_OK; +- + dp = dsa_slave_to_port(dev); ++ ++ if (fdb_info->local && dp->ds->assisted_learning_on_cpu_port) ++ dp = dp->cpu_dp; ++ else if (!fdb_info->added_by_user) ++ return NOTIFY_OK; + } else { + /* Snoop addresses learnt on foreign interfaces + * bridged with us, for switches that don't diff --git a/target/linux/generic/pending-5.15/766-net-dsa-Include-bridge-addresses-in-assisted-CPU-por.patch b/target/linux/generic/pending-5.15/766-net-dsa-Include-bridge-addresses-in-assisted-CPU-por.patch new file mode 100644 index 0000000000..0b644d8300 --- /dev/null +++ b/target/linux/generic/pending-5.15/766-net-dsa-Include-bridge-addresses-in-assisted-CPU-por.patch @@ -0,0 +1,30 @@ +From 0663ebde114a6fb2c28c622ba5212b302d4d2581 Mon Sep 17 00:00:00 2001 +From: Tobias Waldekranz <tobias@waldekranz.com> +Date: Sat, 16 Jan 2021 02:25:13 +0100 +Subject: [PATCH] net: dsa: Include bridge addresses in assisted CPU port + learning + +Now that notifications are sent out for addresses added to the bridge +itself, extend DSA to include those addresses in the hardware FDB when +assisted CPU port learning is enabled. + +Signed-off-by: Tobias Waldekranz <tobias@waldekranz.com> +--- + net/dsa/slave.c | 6 +++++- + 1 file changed, 5 insertions(+), 1 deletion(-) + +--- a/net/dsa/slave.c ++++ b/net/dsa/slave.c +@@ -2206,7 +2206,11 @@ static int dsa_slave_switchdev_event(str + struct net_device *br_dev; + struct dsa_slave_priv *p; + +- br_dev = netdev_master_upper_dev_get_rcu(dev); ++ if (netif_is_bridge_master(dev)) ++ br_dev = dev; ++ else ++ br_dev = netdev_master_upper_dev_get_rcu(dev); ++ + if (!br_dev) + return NOTIFY_DONE; + diff --git a/target/linux/generic/pending-5.15/767-net-dsa-Sync-static-FDB-entries-on-foreign-interface.patch b/target/linux/generic/pending-5.15/767-net-dsa-Sync-static-FDB-entries-on-foreign-interface.patch new file mode 100644 index 0000000000..f71ea16b97 --- /dev/null +++ b/target/linux/generic/pending-5.15/767-net-dsa-Sync-static-FDB-entries-on-foreign-interface.patch @@ -0,0 +1,56 @@ +From 81e39fd78db82fb51b05fff309b9c521f1a0bc5a Mon Sep 17 00:00:00 2001 +From: Tobias Waldekranz <tobias@waldekranz.com> +Date: Sat, 16 Jan 2021 02:25:14 +0100 +Subject: [PATCH] net: dsa: Sync static FDB entries on foreign interfaces to + hardware + +Reuse the "assisted_learning_on_cpu_port" functionality to always add +entries for user-configured entries on foreign interfaces, even if +assisted_learning_on_cpu_port is not enabled. E.g. in this situation: + + br0 + / \ +swp0 dummy0 + +$ bridge fdb add 02:00:de:ad:00:01 dev dummy0 vlan 1 master + +Results in DSA adding an entry in the hardware FDB, pointing this +address towards the CPU port. + +The same is true for entries added to the bridge itself, e.g: + +$ bridge fdb add 02:00:de:ad:00:01 dev br0 vlan 1 self + +Signed-off-by: Tobias Waldekranz <tobias@waldekranz.com> +--- + net/dsa/slave.c | 12 ++++++++---- + 1 file changed, 8 insertions(+), 4 deletions(-) + +--- a/net/dsa/slave.c ++++ b/net/dsa/slave.c +@@ -2199,9 +2199,12 @@ static int dsa_slave_switchdev_event(str + else if (!fdb_info->added_by_user) + return NOTIFY_OK; + } else { +- /* Snoop addresses learnt on foreign interfaces +- * bridged with us, for switches that don't +- * automatically learn SA from CPU-injected traffic ++ /* Snoop addresses added to foreign interfaces ++ * bridged with us, or the bridge ++ * itself. Dynamically learned addresses can ++ * also be added for switches that don't ++ * automatically learn SA from CPU-injected ++ * traffic. + */ + struct net_device *br_dev; + struct dsa_slave_priv *p; +@@ -2223,7 +2226,8 @@ static int dsa_slave_switchdev_event(str + + dp = p->dp->cpu_dp; + +- if (!dp->ds->assisted_learning_on_cpu_port) ++ if (!fdb_info->added_by_user && ++ !dp->ds->assisted_learning_on_cpu_port) + return NOTIFY_DONE; + } + diff --git a/target/linux/generic/pending-5.15/768-net-dsa-mv88e6xxx-Request-assisted-learning-on-CPU-port.patch b/target/linux/generic/pending-5.15/768-net-dsa-mv88e6xxx-Request-assisted-learning-on-CPU-port.patch new file mode 100644 index 0000000000..9247401140 --- /dev/null +++ b/target/linux/generic/pending-5.15/768-net-dsa-mv88e6xxx-Request-assisted-learning-on-CPU-port.patch @@ -0,0 +1,27 @@ +From: Tobias Waldekranz <tobias@waldekranz.com> +Subject: [RFC net-next 7/7] net: dsa: mv88e6xxx: Request assisted learning on CPU port +Date: Sat, 16 Jan 2021 02:25:15 +0100 +Archived-At: <https://lore.kernel.org/netdev/20210116012515.3152-8-tobias@waldekranz.com/> + +While the hardware is capable of performing learning on the CPU port, +it requires alot of additions to the bridge's forwarding path in order +to handle multi-destination traffic correctly. + +Until that is in place, opt for the next best thing and let DSA sync +the relevant addresses down to the hardware FDB. + +Signed-off-by: Tobias Waldekranz <tobias@waldekranz.com> +--- + drivers/net/dsa/mv88e6xxx/chip.c | 1 + + 1 file changed, 1 insertion(+) + +--- a/drivers/net/dsa/mv88e6xxx/chip.c ++++ b/drivers/net/dsa/mv88e6xxx/chip.c +@@ -5436,6 +5436,7 @@ static int mv88e6xxx_register_switch(str + ds->ops = &mv88e6xxx_switch_ops; + ds->ageing_time_min = chip->info->age_time_coeff; + ds->ageing_time_max = chip->info->age_time_coeff * U8_MAX; ++ ds->assisted_learning_on_cpu_port = true; + + dev_set_drvdata(dev, ds); + diff --git a/target/linux/generic/pending-5.15/780-ARM-kirkwood-add-missing-linux-if_ether.h-for-ETH_AL.patch b/target/linux/generic/pending-5.15/780-ARM-kirkwood-add-missing-linux-if_ether.h-for-ETH_AL.patch new file mode 100644 index 0000000000..fcf7892c04 --- /dev/null +++ b/target/linux/generic/pending-5.15/780-ARM-kirkwood-add-missing-linux-if_ether.h-for-ETH_AL.patch @@ -0,0 +1,61 @@ +From patchwork Thu Aug 5 22:23:30 2021 +Content-Type: text/plain; charset="utf-8" +MIME-Version: 1.0 +Content-Transfer-Encoding: 7bit +X-Patchwork-Submitter: Daniel Golle <daniel@makrotopia.org> +X-Patchwork-Id: 12422209 +Date: Thu, 5 Aug 2021 23:23:30 +0100 +From: Daniel Golle <daniel@makrotopia.org> +To: linux-arm-kernel@lists.infradead.org, netdev@vger.kernel.org, + linux-kernel@vger.kernel.org +Cc: "David S. Miller" <davem@davemloft.net>, Andrew Lunn <andrew@lunn.ch>, + Michael Walle <michael@walle.cc> +Subject: [PATCH] ARM: kirkwood: add missing <linux/if_ether.h> for ETH_ALEN +Message-ID: <YQxk4jrbm31NM1US@makrotopia.org> +MIME-Version: 1.0 +Content-Disposition: inline +X-BeenThere: linux-arm-kernel@lists.infradead.org +X-Mailman-Version: 2.1.34 +Precedence: list +List-Id: <linux-arm-kernel.lists.infradead.org> +List-Archive: <http://lists.infradead.org/pipermail/linux-arm-kernel/> +Sender: "linux-arm-kernel" <linux-arm-kernel-bounces@lists.infradead.org> + +After commit 83216e3988cd1 ("of: net: pass the dst buffer to +of_get_mac_address()") build fails for kirkwood as ETH_ALEN is not +defined. + +arch/arm/mach-mvebu/kirkwood.c: In function 'kirkwood_dt_eth_fixup': +arch/arm/mach-mvebu/kirkwood.c:87:13: error: 'ETH_ALEN' undeclared (first use in this function); did you mean 'ESTALE'? + u8 tmpmac[ETH_ALEN]; + ^~~~~~~~ + ESTALE +arch/arm/mach-mvebu/kirkwood.c:87:13: note: each undeclared identifier is reported only once for each function it appears in +arch/arm/mach-mvebu/kirkwood.c:87:6: warning: unused variable 'tmpmac' [-Wunused-variable] + u8 tmpmac[ETH_ALEN]; + ^~~~~~ +make[5]: *** [scripts/Makefile.build:262: arch/arm/mach-mvebu/kirkwood.o] Error 1 +make[5]: *** Waiting for unfinished jobs.... + +Add missing #include <linux/if_ether.h> to fix this. + +Cc: David S. Miller <davem@davemloft.net> +Cc: Andrew Lunn <andrew@lunn.ch> +Cc: Michael Walle <michael@walle.cc> +Reported-by: https://buildbot.openwrt.org/master/images/#/builders/56/builds/220/steps/44/logs/stdio +Fixes: 83216e3988cd1 ("of: net: pass the dst buffer to of_get_mac_address()") +Signed-off-by: Daniel Golle <daniel@makrotopia.org> +--- + arch/arm/mach-mvebu/kirkwood.c | 1 + + 1 file changed, 1 insertion(+) + +--- a/arch/arm/mach-mvebu/kirkwood.c ++++ b/arch/arm/mach-mvebu/kirkwood.c +@@ -14,6 +14,7 @@ + #include <linux/kernel.h> + #include <linux/init.h> + #include <linux/mbus.h> ++#include <linux/if_ether.h> + #include <linux/of.h> + #include <linux/of_address.h> + #include <linux/of_net.h> diff --git a/target/linux/generic/pending-5.15/800-bcma-get-SoC-device-struct-copy-its-DMA-params-to-th.patch b/target/linux/generic/pending-5.15/800-bcma-get-SoC-device-struct-copy-its-DMA-params-to-th.patch new file mode 100644 index 0000000000..511a9f7555 --- /dev/null +++ b/target/linux/generic/pending-5.15/800-bcma-get-SoC-device-struct-copy-its-DMA-params-to-th.patch @@ -0,0 +1,64 @@ +From: =?UTF-8?q?Rafa=C5=82=20Mi=C5=82ecki?= <rafal@milecki.pl> +Subject: [PATCH] bcma: get SoC device struct & copy its DMA params to the + subdevices +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +For bus devices to be fully usable it's required to set their DMA +parameters. + +For years it has been missing and remained unnoticed because of +mips_dma_alloc_coherent() silently handling the empty coherent_dma_mask. +Kernel 4.19 came with a lot of DMA changes and caused a regression on +the bcm47xx. Starting with the commit f8c55dc6e828 ("MIPS: use generic +dma noncoherent ops for simple noncoherent platforms") DMA coherent +allocations just fail. Example: +[ 1.114914] bgmac_bcma bcma0:2: Allocation of TX ring 0x200 failed +[ 1.121215] bgmac_bcma bcma0:2: Unable to alloc memory for DMA +[ 1.127626] bgmac_bcma: probe of bcma0:2 failed with error -12 +[ 1.133838] bgmac_bcma: Broadcom 47xx GBit MAC driver loaded + +This change fixes above regression in addition to the MIPS bcm47xx +commit 321c46b91550 ("MIPS: BCM47XX: Setup struct device for the SoC"). + +It also fixes another *old* GPIO regression caused by a parent pointing +to the NULL: +[ 0.157054] missing gpiochip .dev parent pointer +[ 0.157287] bcma: bus0: Error registering GPIO driver: -22 +introduced by the commit 74f4e0cc6108 ("bcma: switch GPIO portions to +use GPIOLIB_IRQCHIP"). + +Fixes: f8c55dc6e828 ("MIPS: use generic dma noncoherent ops for simple noncoherent platforms") +Fixes: 74f4e0cc6108 ("bcma: switch GPIO portions to use GPIOLIB_IRQCHIP") +Cc: linux-mips@linux-mips.org +Cc: Christoph Hellwig <hch@lst.de> +Cc: Linus Walleij <linus.walleij@linaro.org> +Signed-off-by: Rafał Miłecki <rafal@milecki.pl> +--- + +--- a/drivers/bcma/host_soc.c ++++ b/drivers/bcma/host_soc.c +@@ -191,6 +191,8 @@ int __init bcma_host_soc_init(struct bcm + struct bcma_bus *bus = &soc->bus; + int err; + ++ bus->dev = soc->dev; ++ + /* Scan bus and initialize it */ + err = bcma_bus_early_register(bus); + if (err) +--- a/drivers/bcma/main.c ++++ b/drivers/bcma/main.c +@@ -241,8 +241,10 @@ void bcma_prepare_core(struct bcma_bus * + core->dev.bus = &bcma_bus_type; + dev_set_name(&core->dev, "bcma%d:%d", bus->num, core->core_index); + core->dev.parent = bus->dev; +- if (bus->dev) ++ if (bus->dev) { + bcma_of_fill_device(bus->dev, core); ++ dma_coerce_mask_and_coherent(&core->dev, bus->dev->coherent_dma_mask); ++ } + + switch (bus->hosttype) { + case BCMA_HOSTTYPE_PCI: diff --git a/target/linux/generic/pending-5.15/801-gpio-gpio-cascade-add-generic-GPIO-cascade.patch b/target/linux/generic/pending-5.15/801-gpio-gpio-cascade-add-generic-GPIO-cascade.patch new file mode 100644 index 0000000000..af44ff24ac --- /dev/null +++ b/target/linux/generic/pending-5.15/801-gpio-gpio-cascade-add-generic-GPIO-cascade.patch @@ -0,0 +1,222 @@ +From fc23ea48ba52c24f201fe5ca0132ee1a3de5a70a Mon Sep 17 00:00:00 2001 +From: Mauri Sandberg <maukka@ext.kapsi.fi> +Date: Thu, 25 Mar 2021 11:48:05 +0200 +Subject: [PATCH 2/2] gpio: gpio-cascade: add generic GPIO cascade + +Adds support for building cascades of GPIO lines. That is, it allows +setups when there is one upstream line and multiple cascaded lines, out +of which one can be chosen at a time. The status of the upstream line +can be conveyed to the selected cascaded line or, vice versa, the status +of the cascaded line can be conveyed to the upstream line. + +A multiplexer is being used to select, which cascaded GPIO line is being +used at any given time. + +At the moment only input direction is supported. In future it should be +possible to add support for output direction, too. + +Signed-off-by: Mauri Sandberg <maukka@ext.kapsi.fi> +Reviewed-by: Linus Walleij <linus.walleij@linaro.org> +Reviewed-by: Andy Shevchenko <andy.shevchenko@gmail.com> +--- +v7 -> v8: + - rearrange members in struct gpio_cascade + - cosmetic changes in file header and in one function declaration + - added Reviewed-by tags by Linus and Andy +v6 -> v7: + - In Kconfig add info about module name + - adhere to new convention that allows lines longer than 80 chars + - use dev_probe_err with upstream gpio line too + - refactor for cleaner exit of probe function. +v5 -> v6: + - In Kconfig, remove dependency to OF_GPIO and select only MULTIPLEXER + - refactor code preferring one-liners + - clean up prints, removing them from success-path. + - don't explicitly set gpio_chip.of_node as it's done in the GPIO library + - use devm_gpiochip_add_data instead of gpiochip_add +v4 -> v5: + - renamed gpio-mux-input -> gpio-cascade. refactored code accordingly + here and there and changed to use new bindings and compatible string + - ambigious and vague 'pin' was rename to 'upstream_line' + - dropped Tested-by and Reviewed-by due to changes in bindings + - dropped Reported-by suggested by an automatic bot as it was not really + appropriate to begin with + - functionally it's the same as v4 +v3 -> v4: + - Changed author email + - Included Tested-by and Reviewed-by from Drew +v2 -> v3: + - use managed device resources + - update Kconfig description +v1 -> v2: + - removed .owner from platform_driver as per test bot's instruction + - added MODULE_AUTHOR, MODULE_DESCRIPTION, MODULE_LICENSE + - added gpio_mux_input_get_direction as it's recommended for all chips + - removed because this is input only chip: gpio_mux_input_set_value + - removed because they are not needed for input/output only chips: + gpio_mux_input_direction_input + gpio_mux_input_direction_output + - fixed typo in an error message + - added info message about successful registration + - removed can_sleep flag as this does not sleep while getting GPIO value + like I2C or SPI do + - Updated description in Kconfig +--- + drivers/gpio/Kconfig | 15 +++++ + drivers/gpio/Makefile | 1 + + drivers/gpio/gpio-cascade.c | 117 ++++++++++++++++++++++++++++++++++++ + 3 files changed, 133 insertions(+) + create mode 100644 drivers/gpio/gpio-cascade.c + +--- a/drivers/gpio/Kconfig ++++ b/drivers/gpio/Kconfig +@@ -1617,4 +1617,19 @@ config GPIO_MOCKUP + tools/testing/selftests/gpio/gpio-mockup.sh. Reference the usage in + it. + ++comment "Other GPIO expanders" ++ ++config GPIO_CASCADE ++ tristate "General GPIO cascade" ++ select MULTIPLEXER ++ help ++ Say yes here to enable support for generic GPIO cascade. ++ ++ This allows building one-to-many cascades of GPIO lines using ++ different types of multiplexers readily available. At the ++ moment only input lines are supported. ++ ++ To build the driver as a module choose 'm' and the resulting module ++ will be called 'gpio-cascade'. ++ + endif +--- a/drivers/gpio/Makefile ++++ b/drivers/gpio/Makefile +@@ -44,6 +44,7 @@ obj-$(CONFIG_GPIO_BD9571MWV) += gpio-bd + obj-$(CONFIG_GPIO_BRCMSTB) += gpio-brcmstb.o + obj-$(CONFIG_GPIO_BT8XX) += gpio-bt8xx.o + obj-$(CONFIG_GPIO_CADENCE) += gpio-cadence.o ++obj-$(CONFIG_GPIO_CASCADE) += gpio-cascade.o + obj-$(CONFIG_GPIO_CLPS711X) += gpio-clps711x.o + obj-$(CONFIG_GPIO_SNPS_CREG) += gpio-creg-snps.o + obj-$(CONFIG_GPIO_CRYSTAL_COVE) += gpio-crystalcove.o +--- /dev/null ++++ b/drivers/gpio/gpio-cascade.c +@@ -0,0 +1,117 @@ ++// SPDX-License-Identifier: GPL-2.0-only ++/* ++ * A generic GPIO cascade driver ++ * ++ * Copyright (C) 2021 Mauri Sandberg <maukka@ext.kapsi.fi> ++ * ++ * This allows building cascades of GPIO lines in a manner illustrated ++ * below: ++ * ++ * /|---- Cascaded GPIO line 0 ++ * Upstream | |---- Cascaded GPIO line 1 ++ * GPIO line ----+ | . ++ * | | . ++ * \|---- Cascaded GPIO line n ++ * ++ * A multiplexer is being used to select, which cascaded line is being ++ * addressed at any given time. ++ * ++ * At the moment only input mode is supported due to lack of means for ++ * testing output functionality. At least theoretically output should be ++ * possible with open drain constructions. ++ */ ++ ++#include <linux/module.h> ++#include <linux/slab.h> ++#include <linux/platform_device.h> ++#include <linux/mux/consumer.h> ++ ++#include <linux/gpio/consumer.h> ++#include <linux/gpio/driver.h> ++ ++struct gpio_cascade { ++ struct gpio_chip gpio_chip; ++ struct device *parent; ++ struct mux_control *mux_control; ++ struct gpio_desc *upstream_line; ++}; ++ ++static struct gpio_cascade *chip_to_cascade(struct gpio_chip *gc) ++{ ++ return container_of(gc, struct gpio_cascade, gpio_chip); ++} ++ ++static int gpio_cascade_get_direction(struct gpio_chip *gc, unsigned int offset) ++{ ++ return GPIO_LINE_DIRECTION_IN; ++} ++ ++static int gpio_cascade_get_value(struct gpio_chip *gc, unsigned int offset) ++{ ++ struct gpio_cascade *cas = chip_to_cascade(gc); ++ int ret; ++ ++ ret = mux_control_select(cas->mux_control, offset); ++ if (ret) ++ return ret; ++ ++ ret = gpiod_get_value(cas->upstream_line); ++ mux_control_deselect(cas->mux_control); ++ return ret; ++} ++ ++static int gpio_cascade_probe(struct platform_device *pdev) ++{ ++ struct device *dev = &pdev->dev; ++ struct gpio_cascade *cas; ++ struct mux_control *mc; ++ struct gpio_desc *upstream; ++ struct gpio_chip *gc; ++ ++ cas = devm_kzalloc(dev, sizeof(*cas), GFP_KERNEL); ++ if (!cas) ++ return -ENOMEM; ++ ++ mc = devm_mux_control_get(dev, NULL); ++ if (IS_ERR(mc)) ++ return dev_err_probe(dev, PTR_ERR(mc), "unable to get mux-control\n"); ++ ++ cas->mux_control = mc; ++ upstream = devm_gpiod_get(dev, "upstream", GPIOD_IN); ++ if (IS_ERR(upstream)) ++ return dev_err_probe(dev, PTR_ERR(upstream), "unable to claim upstream GPIO line\n"); ++ ++ cas->upstream_line = upstream; ++ cas->parent = dev; ++ ++ gc = &cas->gpio_chip; ++ gc->get = gpio_cascade_get_value; ++ gc->get_direction = gpio_cascade_get_direction; ++ gc->base = -1; ++ gc->ngpio = mux_control_states(mc); ++ gc->label = dev_name(cas->parent); ++ gc->parent = cas->parent; ++ gc->owner = THIS_MODULE; ++ ++ platform_set_drvdata(pdev, cas); ++ return devm_gpiochip_add_data(dev, &cas->gpio_chip, NULL); ++} ++ ++static const struct of_device_id gpio_cascade_id[] = { ++ { .compatible = "gpio-cascade" }, ++ { /* sentinel */ } ++}; ++MODULE_DEVICE_TABLE(of, gpio_cascade_id); ++ ++static struct platform_driver gpio_cascade_driver = { ++ .driver = { ++ .name = "gpio-cascade", ++ .of_match_table = gpio_cascade_id, ++ }, ++ .probe = gpio_cascade_probe, ++}; ++module_platform_driver(gpio_cascade_driver); ++ ++MODULE_AUTHOR("Mauri Sandberg <maukka@ext.kapsi.fi>"); ++MODULE_DESCRIPTION("Generic GPIO cascade"); ++MODULE_LICENSE("GPL"); diff --git a/target/linux/generic/pending-5.15/810-pci_disable_common_quirks.patch b/target/linux/generic/pending-5.15/810-pci_disable_common_quirks.patch new file mode 100644 index 0000000000..62f6fed126 --- /dev/null +++ b/target/linux/generic/pending-5.15/810-pci_disable_common_quirks.patch @@ -0,0 +1,62 @@ +From: Gabor Juhos <juhosg@openwrt.org> +Subject: debloat: add kernel config option to disabling common PCI quirks + +Signed-off-by: Gabor Juhos <juhosg@openwrt.org> +--- + drivers/pci/Kconfig | 6 ++++++ + drivers/pci/quirks.c | 6 ++++++ + 2 files changed, 12 insertions(+) + +--- a/drivers/pci/Kconfig ++++ b/drivers/pci/Kconfig +@@ -118,6 +118,13 @@ config XEN_PCIDEV_FRONTEND + The PCI device frontend driver allows the kernel to import arbitrary + PCI devices from a PCI backend to support PCI driver domains. + ++config PCI_DISABLE_COMMON_QUIRKS ++ bool "PCI disable common quirks" ++ depends on PCI ++ help ++ If you don't know what to do here, say N. ++ ++ + config PCI_ATS + bool + +--- a/drivers/pci/quirks.c ++++ b/drivers/pci/quirks.c +@@ -206,6 +206,7 @@ static void quirk_mmio_always_on(struct + DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_ANY_ID, PCI_ANY_ID, + PCI_CLASS_BRIDGE_HOST, 8, quirk_mmio_always_on); + ++#ifndef CONFIG_PCI_DISABLE_COMMON_QUIRKS + /* + * The Mellanox Tavor device gives false positive parity errors. Mark this + * device with a broken_parity_status to allow PCI scanning code to "skip" +@@ -3323,6 +3324,8 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_I + DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65f9, quirk_intel_mc_errata); + DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65fa, quirk_intel_mc_errata); + ++#endif /* !CONFIG_PCI_DISABLE_COMMON_QUIRKS */ ++ + /* + * Ivytown NTB BAR sizes are misreported by the hardware due to an erratum. + * To work around this, query the size it should be configured to by the +@@ -3348,6 +3351,8 @@ static void quirk_intel_ntb(struct pci_d + DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0e08, quirk_intel_ntb); + DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0e0d, quirk_intel_ntb); + ++#ifndef CONFIG_PCI_DISABLE_COMMON_QUIRKS ++ + /* + * Some BIOS implementations leave the Intel GPU interrupts enabled, even + * though no one is handling them (e.g., if the i915 driver is never +@@ -3386,6 +3391,8 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_IN + DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x010a, disable_igfx_irq); + DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0152, disable_igfx_irq); + ++#endif /* !CONFIG_PCI_DISABLE_COMMON_QUIRKS */ ++ + /* + * PCI devices which are on Intel chips can skip the 10ms delay + * before entering D3 mode. diff --git a/target/linux/generic/pending-5.15/811-pci_disable_usb_common_quirks.patch b/target/linux/generic/pending-5.15/811-pci_disable_usb_common_quirks.patch new file mode 100644 index 0000000000..42a8397839 --- /dev/null +++ b/target/linux/generic/pending-5.15/811-pci_disable_usb_common_quirks.patch @@ -0,0 +1,115 @@ +From: Felix Fietkau <nbd@nbd.name> +Subject: debloat: disable common USB quirks + +Signed-off-by: Felix Fietkau <nbd@nbd.name> +--- + drivers/usb/host/pci-quirks.c | 16 ++++++++++++++++ + drivers/usb/host/pci-quirks.h | 18 +++++++++++++++++- + include/linux/usb/hcd.h | 7 +++++++ + 3 files changed, 40 insertions(+), 1 deletion(-) + +--- a/drivers/usb/host/pci-quirks.c ++++ b/drivers/usb/host/pci-quirks.c +@@ -128,6 +128,8 @@ struct amd_chipset_type { + u8 rev; + }; + ++#ifndef CONFIG_PCI_DISABLE_COMMON_QUIRKS ++ + static struct amd_chipset_info { + struct pci_dev *nb_dev; + struct pci_dev *smbus_dev; +@@ -633,6 +635,10 @@ bool usb_amd_pt_check_port(struct device + } + EXPORT_SYMBOL_GPL(usb_amd_pt_check_port); + ++#endif /* CONFIG_PCI_DISABLE_COMMON_QUIRKS */ ++ ++#if IS_ENABLED(CONFIG_USB_UHCI_HCD) ++ + /* + * Make sure the controller is completely inactive, unable to + * generate interrupts or do DMA. +@@ -712,8 +718,17 @@ reset_needed: + uhci_reset_hc(pdev, base); + return 1; + } ++#else ++int uhci_check_and_reset_hc(struct pci_dev *pdev, unsigned long base) ++{ ++ return 0; ++} ++ ++#endif + EXPORT_SYMBOL_GPL(uhci_check_and_reset_hc); + ++#ifndef CONFIG_PCI_DISABLE_COMMON_QUIRKS ++ + static inline int io_type_enabled(struct pci_dev *pdev, unsigned int mask) + { + u16 cmd; +@@ -1285,3 +1300,4 @@ static void quirk_usb_early_handoff(stru + } + DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_ANY_ID, PCI_ANY_ID, + PCI_CLASS_SERIAL_USB, 8, quirk_usb_early_handoff); ++#endif +--- a/drivers/usb/host/pci-quirks.h ++++ b/drivers/usb/host/pci-quirks.h +@@ -5,6 +5,9 @@ + #ifdef CONFIG_USB_PCI + void uhci_reset_hc(struct pci_dev *pdev, unsigned long base); + int uhci_check_and_reset_hc(struct pci_dev *pdev, unsigned long base); ++#endif /* CONFIG_USB_PCI */ ++ ++#if defined(CONFIG_USB_PCI) && !defined(CONFIG_PCI_DISABLE_COMMON_QUIRKS) + int usb_hcd_amd_remote_wakeup_quirk(struct pci_dev *pdev); + bool usb_amd_hang_symptom_quirk(void); + bool usb_amd_prefetch_quirk(void); +@@ -19,6 +22,18 @@ void sb800_prefetch(struct device *dev, + bool usb_amd_pt_check_port(struct device *device, int port); + #else + struct pci_dev; ++static inline int usb_amd_quirk_pll_check(void) ++{ ++ return 0; ++} ++static inline bool usb_amd_hang_symptom_quirk(void) ++{ ++ return false; ++} ++static inline bool usb_amd_prefetch_quirk(void) ++{ ++ return false; ++} + static inline void usb_amd_quirk_pll_disable(void) {} + static inline void usb_amd_quirk_pll_enable(void) {} + static inline void usb_asmedia_modifyflowcontrol(struct pci_dev *pdev) {} +@@ -29,6 +44,11 @@ static inline bool usb_amd_pt_check_port + { + return false; + } ++static inline void usb_enable_intel_xhci_ports(struct pci_dev *xhci_pdev) {} ++static inline bool usb_xhci_needs_pci_reset(struct pci_dev *pdev) ++{ ++ return false; ++} + #endif /* CONFIG_USB_PCI */ + + #endif /* __LINUX_USB_PCI_QUIRKS_H */ +--- a/include/linux/usb/hcd.h ++++ b/include/linux/usb/hcd.h +@@ -484,7 +484,14 @@ extern int usb_hcd_pci_probe(struct pci_ + extern void usb_hcd_pci_remove(struct pci_dev *dev); + extern void usb_hcd_pci_shutdown(struct pci_dev *dev); + ++#ifndef CONFIG_PCI_DISABLE_COMMON_QUIRKS + extern int usb_hcd_amd_remote_wakeup_quirk(struct pci_dev *dev); ++#else ++static inline int usb_hcd_amd_remote_wakeup_quirk(struct pci_dev *dev) ++{ ++ return 0; ++} ++#endif + + #ifdef CONFIG_PM + extern const struct dev_pm_ops usb_hcd_pci_pm_ops; diff --git a/target/linux/generic/pending-5.15/820-w1-gpio-fix-problem-with-platfom-data-in-w1-gpio.patch b/target/linux/generic/pending-5.15/820-w1-gpio-fix-problem-with-platfom-data-in-w1-gpio.patch new file mode 100644 index 0000000000..33eb34c913 --- /dev/null +++ b/target/linux/generic/pending-5.15/820-w1-gpio-fix-problem-with-platfom-data-in-w1-gpio.patch @@ -0,0 +1,26 @@ +From d9c8bc8c1408f3e8529db6e4e04017b4c579c342 Mon Sep 17 00:00:00 2001 +From: Pawel Dembicki <paweldembicki@gmail.com> +Date: Sun, 18 Feb 2018 17:08:04 +0100 +Subject: [PATCH] w1: gpio: fix problem with platfom data in w1-gpio + +In devices, where fdt is used, is impossible to apply platform data +without proper fdt node. + +This patch allow to use platform data in devices with fdt. + +Signed-off-by: Pawel Dembicki <paweldembicki@gmail.com> +--- + drivers/w1/masters/w1-gpio.c | 7 +++---- + 1 file changed, 3 insertions(+), 4 deletions(-) + +--- a/drivers/w1/masters/w1-gpio.c ++++ b/drivers/w1/masters/w1-gpio.c +@@ -76,7 +76,7 @@ static int w1_gpio_probe(struct platform + enum gpiod_flags gflags = GPIOD_OUT_LOW_OPEN_DRAIN; + int err; + +- if (of_have_populated_dt()) { ++ if (of_have_populated_dt() && !dev_get_platdata(&pdev->dev)) { + pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL); + if (!pdata) + return -ENOMEM; diff --git a/target/linux/generic/pending-5.15/834-ledtrig-libata.patch b/target/linux/generic/pending-5.15/834-ledtrig-libata.patch new file mode 100644 index 0000000000..25cffb1444 --- /dev/null +++ b/target/linux/generic/pending-5.15/834-ledtrig-libata.patch @@ -0,0 +1,149 @@ +From: Daniel Golle <daniel@makrotopia.org> +Subject: libata: add ledtrig support + +This adds a LED trigger for each ATA port indicating disk activity. + +As this is needed only on specific platforms (NAS SoCs and such), +these platforms should define ARCH_WANTS_LIBATA_LEDS if there +are boards with LED(s) intended to indicate ATA disk activity and +need the OS to take care of that. +In that way, if not selected, LED trigger support not will be +included in libata-core and both, codepaths and structures remain +untouched. + +Signed-off-by: Daniel Golle <daniel@makrotopia.org> +--- + drivers/ata/Kconfig | 16 ++++++++++++++++ + drivers/ata/libata-core.c | 41 +++++++++++++++++++++++++++++++++++++++++ + include/linux/libata.h | 9 +++++++++ + 3 files changed, 66 insertions(+) + +--- a/drivers/ata/Kconfig ++++ b/drivers/ata/Kconfig +@@ -67,6 +67,22 @@ config ATA_FORCE + + If unsure, say Y. + ++config ARCH_WANT_LIBATA_LEDS ++ bool ++ ++config ATA_LEDS ++ bool "support ATA port LED triggers" ++ depends on ARCH_WANT_LIBATA_LEDS ++ select NEW_LEDS ++ select LEDS_CLASS ++ select LEDS_TRIGGERS ++ default y ++ help ++ This option adds a LED trigger for each registered ATA port. ++ It is used to drive disk activity leds connected via GPIO. ++ ++ If unsure, say N. ++ + config ATA_ACPI + bool "ATA ACPI Support" + depends on ACPI +--- a/drivers/ata/libata-core.c ++++ b/drivers/ata/libata-core.c +@@ -650,6 +650,19 @@ u64 ata_tf_read_block(const struct ata_t + return block; + } + ++#ifdef CONFIG_ATA_LEDS ++#define LIBATA_BLINK_DELAY 20 /* ms */ ++static inline void ata_led_act(struct ata_port *ap) ++{ ++ unsigned long led_delay = LIBATA_BLINK_DELAY; ++ ++ if (unlikely(!ap->ledtrig)) ++ return; ++ ++ led_trigger_blink_oneshot(ap->ledtrig, &led_delay, &led_delay, 0); ++} ++#endif ++ + /** + * ata_build_rw_tf - Build ATA taskfile for given read/write request + * @tf: Target ATA taskfile +@@ -4548,6 +4561,9 @@ struct ata_queued_cmd *ata_qc_new_init(s + if (tag < 0) + return NULL; + } ++#ifdef CONFIG_ATA_LEDS ++ ata_led_act(ap); ++#endif + + qc = __ata_qc_from_tag(ap, tag); + qc->tag = qc->hw_tag = tag; +@@ -5326,6 +5342,9 @@ struct ata_port *ata_port_alloc(struct a + ap->stats.unhandled_irq = 1; + ap->stats.idle_irq = 1; + #endif ++#ifdef CONFIG_ATA_LEDS ++ ap->ledtrig = kzalloc(sizeof(struct led_trigger), GFP_KERNEL); ++#endif + ata_sff_port_init(ap); + + return ap; +@@ -5361,6 +5380,12 @@ static void ata_host_release(struct kref + + kfree(ap->pmp_link); + kfree(ap->slave_link); ++#ifdef CONFIG_ATA_LEDS ++ if (ap->ledtrig) { ++ led_trigger_unregister(ap->ledtrig); ++ kfree(ap->ledtrig); ++ }; ++#endif + kfree(ap); + host->ports[i] = NULL; + } +@@ -5767,7 +5792,23 @@ int ata_host_register(struct ata_host *h + host->ports[i]->print_id = atomic_inc_return(&ata_print_id); + host->ports[i]->local_port_no = i + 1; + } ++#ifdef CONFIG_ATA_LEDS ++ for (i = 0; i < host->n_ports; i++) { ++ if (unlikely(!host->ports[i]->ledtrig)) ++ continue; + ++ snprintf(host->ports[i]->ledtrig_name, ++ sizeof(host->ports[i]->ledtrig_name), "ata%u", ++ host->ports[i]->print_id); ++ ++ host->ports[i]->ledtrig->name = host->ports[i]->ledtrig_name; ++ ++ if (led_trigger_register(host->ports[i]->ledtrig)) { ++ kfree(host->ports[i]->ledtrig); ++ host->ports[i]->ledtrig = NULL; ++ } ++ } ++#endif + /* Create associated sysfs transport objects */ + for (i = 0; i < host->n_ports; i++) { + rc = ata_tport_add(host->dev,host->ports[i]); +--- a/include/linux/libata.h ++++ b/include/linux/libata.h +@@ -23,6 +23,9 @@ + #include <linux/cdrom.h> + #include <linux/sched.h> + #include <linux/async.h> ++#ifdef CONFIG_ATA_LEDS ++#include <linux/leds.h> ++#endif + + /* + * Define if arch has non-standard setup. This is a _PCI_ standard +@@ -883,6 +886,12 @@ struct ata_port { + #ifdef CONFIG_ATA_ACPI + struct ata_acpi_gtm __acpi_init_gtm; /* use ata_acpi_init_gtm() */ + #endif ++ ++#ifdef CONFIG_ATA_LEDS ++ struct led_trigger *ledtrig; ++ char ledtrig_name[8]; ++#endif ++ + /* owned by EH */ + u8 sector_buf[ATA_SECT_SIZE] ____cacheline_aligned; + }; diff --git a/target/linux/generic/pending-5.15/840-hwrng-bcm2835-set-quality-to-1000.patch b/target/linux/generic/pending-5.15/840-hwrng-bcm2835-set-quality-to-1000.patch new file mode 100644 index 0000000000..580f0b1bfa --- /dev/null +++ b/target/linux/generic/pending-5.15/840-hwrng-bcm2835-set-quality-to-1000.patch @@ -0,0 +1,26 @@ +From d6988cf1d16faac56899918bb2b1be8d85155e3f Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?=C3=81lvaro=20Fern=C3=A1ndez=20Rojas?= <noltari@gmail.com> +Date: Sat, 20 Feb 2021 18:36:38 +0100 +Subject: [PATCH] hwrng: bcm2835: set quality to 1000 +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +This allows devices without a high precission timer to reduce boot from >100s +to <30s. + +Signed-off-by: Álvaro Fernández Rojas <noltari@gmail.com> +--- + drivers/char/hw_random/bcm2835-rng.c | 1 + + 1 file changed, 1 insertion(+) + +--- a/drivers/char/hw_random/bcm2835-rng.c ++++ b/drivers/char/hw_random/bcm2835-rng.c +@@ -163,6 +163,7 @@ static int bcm2835_rng_probe(struct plat + priv->rng.init = bcm2835_rng_init; + priv->rng.read = bcm2835_rng_read; + priv->rng.cleanup = bcm2835_rng_cleanup; ++ priv->rng.quality = 1000; + + if (dev_of_node(dev)) { + rng_id = of_match_node(bcm2835_rng_of_match, dev->of_node); diff --git a/target/linux/generic/pending-5.15/842-net-qmi_wwan-add-ZTE-MF286D-modem-19d2-1485.patch b/target/linux/generic/pending-5.15/842-net-qmi_wwan-add-ZTE-MF286D-modem-19d2-1485.patch new file mode 100644 index 0000000000..f2f0e61089 --- /dev/null +++ b/target/linux/generic/pending-5.15/842-net-qmi_wwan-add-ZTE-MF286D-modem-19d2-1485.patch @@ -0,0 +1,59 @@ +From 078c6a1cbd4cd7496048786beec2e312577bebbf Mon Sep 17 00:00:00 2001 +From: Pawel Dembicki <paweldembicki@gmail.com> +Date: Tue, 11 Jan 2022 23:11:32 +0100 +Subject: [PATCH] net: qmi_wwan: add ZTE MF286D modem 19d2:1485 +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +Modem from ZTE MF286D is an Qualcomm MDM9250 based 3G/4G modem. + +T: Bus=02 Lev=01 Prnt=01 Port=00 Cnt=01 Dev#= 3 Spd=5000 MxCh= 0 +D: Ver= 3.00 Cls=00(>ifc ) Sub=00 Prot=00 MxPS= 9 #Cfgs= 1 +P: Vendor=19d2 ProdID=1485 Rev=52.87 +S: Manufacturer=ZTE,Incorporated +S: Product=ZTE Technologies MSM +S: SerialNumber=MF286DZTED000000 +C:* #Ifs= 7 Cfg#= 1 Atr=80 MxPwr=896mA +A: FirstIf#= 0 IfCount= 2 Cls=02(comm.) Sub=06 Prot=00 +I:* If#= 0 Alt= 0 #EPs= 1 Cls=02(comm.) Sub=02 Prot=ff Driver=rndis_host +E: Ad=82(I) Atr=03(Int.) MxPS= 8 Ivl=32ms +I:* If#= 1 Alt= 0 #EPs= 2 Cls=0a(data ) Sub=00 Prot=00 Driver=rndis_host +E: Ad=81(I) Atr=02(Bulk) MxPS=1024 Ivl=0ms +E: Ad=01(O) Atr=02(Bulk) MxPS=1024 Ivl=0ms +I:* If#= 2 Alt= 0 #EPs= 2 Cls=ff(vend.) Sub=ff Prot=ff Driver=option +E: Ad=83(I) Atr=02(Bulk) MxPS=1024 Ivl=0ms +E: Ad=02(O) Atr=02(Bulk) MxPS=1024 Ivl=0ms +I:* If#= 3 Alt= 0 #EPs= 3 Cls=ff(vend.) Sub=ff Prot=ff Driver=option +E: Ad=85(I) Atr=03(Int.) MxPS= 10 Ivl=32ms +E: Ad=84(I) Atr=02(Bulk) MxPS=1024 Ivl=0ms +E: Ad=03(O) Atr=02(Bulk) MxPS=1024 Ivl=0ms +I:* If#= 4 Alt= 0 #EPs= 3 Cls=ff(vend.) Sub=ff Prot=ff Driver=option +E: Ad=87(I) Atr=03(Int.) MxPS= 10 Ivl=32ms +E: Ad=86(I) Atr=02(Bulk) MxPS=1024 Ivl=0ms +E: Ad=04(O) Atr=02(Bulk) MxPS=1024 Ivl=0ms +I:* If#= 5 Alt= 0 #EPs= 3 Cls=ff(vend.) Sub=ff Prot=ff Driver=qmi_wwan +E: Ad=88(I) Atr=03(Int.) MxPS= 8 Ivl=32ms +E: Ad=8e(I) Atr=02(Bulk) MxPS=1024 Ivl=0ms +E: Ad=0f(O) Atr=02(Bulk) MxPS=1024 Ivl=0ms +I:* If#= 6 Alt= 0 #EPs= 2 Cls=ff(vend.) Sub=42 Prot=01 Driver=usbfs +E: Ad=05(O) Atr=02(Bulk) MxPS=1024 Ivl=0ms +E: Ad=89(I) Atr=02(Bulk) MxPS=1024 Ivl=0ms + +Signed-off-by: Pawel Dembicki <paweldembicki@gmail.com> +Acked-by: Bjørn Mork <bjorn@mork.no> +Signed-off-by: David S. Miller <davem@davemloft.net> +--- + drivers/net/usb/qmi_wwan.c | 1 + + 1 file changed, 1 insertion(+) + +--- a/drivers/net/usb/qmi_wwan.c ++++ b/drivers/net/usb/qmi_wwan.c +@@ -1252,6 +1252,7 @@ static const struct usb_device_id produc + {QMI_FIXED_INTF(0x19d2, 0x1426, 2)}, /* ZTE MF91 */ + {QMI_FIXED_INTF(0x19d2, 0x1428, 2)}, /* Telewell TW-LTE 4G v2 */ + {QMI_FIXED_INTF(0x19d2, 0x1432, 3)}, /* ZTE ME3620 */ ++ {QMI_FIXED_INTF(0x19d2, 0x1485, 5)}, /* ZTE MF286D */ + {QMI_FIXED_INTF(0x19d2, 0x2002, 4)}, /* ZTE (Vodafone) K3765-Z */ + {QMI_FIXED_INTF(0x2001, 0x7e16, 3)}, /* D-Link DWM-221 */ + {QMI_FIXED_INTF(0x2001, 0x7e19, 4)}, /* D-Link DWM-221 B1 */ diff --git a/target/linux/generic/pending-5.15/850-0001-PCI-aardvark-Replace-custom-PCIE_CORE_INT_-macros-wi.patch b/target/linux/generic/pending-5.15/850-0001-PCI-aardvark-Replace-custom-PCIE_CORE_INT_-macros-wi.patch new file mode 100644 index 0000000000..fecd19166c --- /dev/null +++ b/target/linux/generic/pending-5.15/850-0001-PCI-aardvark-Replace-custom-PCIE_CORE_INT_-macros-wi.patch @@ -0,0 +1,40 @@ +From 43f3f187e6f62ca40802afe39495c8a3e20b4bfa Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?Pali=20Roh=C3=A1r?= <pali@kernel.org> +Date: Mon, 10 Jan 2022 01:50:50 +0100 +Subject: [PATCH] PCI: aardvark: Replace custom PCIE_CORE_INT_* macros with + PCI_INTERRUPT_* +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +Header file linux/pci.h defines enum pci_interrupt_pin with corresponding +PCI_INTERRUPT_* values. + +Signed-off-by: Pali Rohár <pali@kernel.org> +Signed-off-by: Marek Behún <kabel@kernel.org> +--- + drivers/pci/controller/pci-aardvark.c | 6 +----- + 1 file changed, 1 insertion(+), 5 deletions(-) + +--- a/drivers/pci/controller/pci-aardvark.c ++++ b/drivers/pci/controller/pci-aardvark.c +@@ -37,10 +37,6 @@ + #define PCIE_CORE_ERR_CAPCTL_ECRC_CHK_TX_EN BIT(6) + #define PCIE_CORE_ERR_CAPCTL_ECRC_CHCK BIT(7) + #define PCIE_CORE_ERR_CAPCTL_ECRC_CHCK_RCV BIT(8) +-#define PCIE_CORE_INT_A_ASSERT_ENABLE 1 +-#define PCIE_CORE_INT_B_ASSERT_ENABLE 2 +-#define PCIE_CORE_INT_C_ASSERT_ENABLE 3 +-#define PCIE_CORE_INT_D_ASSERT_ENABLE 4 + /* PIO registers base address and register offsets */ + #define PIO_BASE_ADDR 0x4000 + #define PIO_CTRL (PIO_BASE_ADDR + 0x0) +@@ -966,7 +962,7 @@ static int advk_sw_pci_bridge_init(struc + bridge->conf.pref_mem_limit = cpu_to_le16(PCI_PREF_RANGE_TYPE_64); + + /* Support interrupt A for MSI feature */ +- bridge->conf.intpin = PCIE_CORE_INT_A_ASSERT_ENABLE; ++ bridge->conf.intpin = PCI_INTERRUPT_INTA; + + /* Aardvark HW provides PCIe Capability structure in version 2 */ + bridge->pcie_conf.cap = cpu_to_le16(2); diff --git a/target/linux/generic/pending-5.15/850-0002-PCI-aardvark-Fix-reading-MSI-interrupt-number.patch b/target/linux/generic/pending-5.15/850-0002-PCI-aardvark-Fix-reading-MSI-interrupt-number.patch new file mode 100644 index 0000000000..72a432fc06 --- /dev/null +++ b/target/linux/generic/pending-5.15/850-0002-PCI-aardvark-Fix-reading-MSI-interrupt-number.patch @@ -0,0 +1,57 @@ +From a29a7d01cd778854e08108461cba321a63d98871 Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?Pali=20Roh=C3=A1r?= <pali@kernel.org> +Date: Fri, 2 Jul 2021 16:39:47 +0200 +Subject: [PATCH] PCI: aardvark: Fix reading MSI interrupt number +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +In advk_pcie_handle_msi() the authors expect that when bit i in the W1C +register PCIE_MSI_STATUS_REG is cleared, the PCIE_MSI_PAYLOAD_REG is +updated to contain the MSI number corresponding to index i. + +Experiments show that this is not so, and instead PCIE_MSI_PAYLOAD_REG +always contains the number of the last received MSI, overall. + +Do not read PCIE_MSI_PAYLOAD_REG register for determining MSI interrupt +number. Since Aardvark already forbids more than 32 interrupts and uses +own allocated hwirq numbers, the msi_idx already corresponds to the +received MSI number. + +Fixes: 8c39d710363c ("PCI: aardvark: Add Aardvark PCI host controller driver") +Signed-off-by: Pali Rohár <pali@kernel.org> +Signed-off-by: Marek Behún <kabel@kernel.org> +--- + drivers/pci/controller/pci-aardvark.c | 13 ++++++------- + 1 file changed, 6 insertions(+), 7 deletions(-) + +--- a/drivers/pci/controller/pci-aardvark.c ++++ b/drivers/pci/controller/pci-aardvark.c +@@ -1393,7 +1393,7 @@ static void advk_pcie_remove_irq_domain( + static void advk_pcie_handle_msi(struct advk_pcie *pcie) + { + u32 msi_val, msi_mask, msi_status, msi_idx; +- u16 msi_data; ++ int virq; + + msi_mask = advk_readl(pcie, PCIE_MSI_MASK_REG); + msi_val = advk_readl(pcie, PCIE_MSI_STATUS_REG); +@@ -1403,13 +1403,12 @@ static void advk_pcie_handle_msi(struct + if (!(BIT(msi_idx) & msi_status)) + continue; + +- /* +- * msi_idx contains bits [4:0] of the msi_data and msi_data +- * contains 16bit MSI interrupt number +- */ + advk_writel(pcie, BIT(msi_idx), PCIE_MSI_STATUS_REG); +- msi_data = advk_readl(pcie, PCIE_MSI_PAYLOAD_REG) & PCIE_MSI_DATA_MASK; +- generic_handle_irq(msi_data); ++ virq = irq_find_mapping(pcie->msi_inner_domain, msi_idx); ++ if (virq) ++ generic_handle_irq(virq); ++ else ++ dev_err_ratelimited(&pcie->pdev->dev, "unexpected MSI 0x%02x\n", msi_idx); + } + + advk_writel(pcie, PCIE_ISR0_MSI_INT_PENDING, diff --git a/target/linux/generic/pending-5.15/850-0003-PCI-aardvark-Fix-support-for-MSI-interrupts.patch b/target/linux/generic/pending-5.15/850-0003-PCI-aardvark-Fix-support-for-MSI-interrupts.patch new file mode 100644 index 0000000000..8b2d07089a --- /dev/null +++ b/target/linux/generic/pending-5.15/850-0003-PCI-aardvark-Fix-support-for-MSI-interrupts.patch @@ -0,0 +1,72 @@ +From bb03b126ea6c9e57177b537dd022246fa5dbef16 Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?Pali=20Roh=C3=A1r?= <pali@kernel.org> +Date: Fri, 12 Feb 2021 16:24:07 +0100 +Subject: [PATCH] PCI: aardvark: Fix support for MSI interrupts +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +Aardvark hardware supports Multi-MSI and MSI_FLAG_MULTI_PCI_MSI is already +set for the MSI chip. But when allocating MSI interrupt numbers for +Multi-MSI, the numbers need to be properly aligned, otherwise endpoint +devices send MSI interrupt with incorrect numbers. + +Fix this issue by using function bitmap_find_free_region() instead of +bitmap_find_next_zero_area(). + +To ensure that aligned MSI interrupt numbers are used by endpoint devices, +we cannot use Linux virtual irq numbers (as they are random and not +properly aligned). Instead we need to use the aligned hwirq numbers. + +This change fixes receiving MSI interrupts on Armada 3720 boards and +allows using NVMe disks which use Multi-MSI feature with 3 interrupts. + +Without this NVMe disks freeze booting as linux nvme-core.c is waiting +60s for an interrupt. + +Signed-off-by: Pali Rohár <pali@kernel.org> +Signed-off-by: Marek Behún <kabel@kernel.org> +--- + drivers/pci/controller/pci-aardvark.c | 16 ++++++---------- + 1 file changed, 6 insertions(+), 10 deletions(-) + +--- a/drivers/pci/controller/pci-aardvark.c ++++ b/drivers/pci/controller/pci-aardvark.c +@@ -1189,7 +1189,7 @@ static void advk_msi_irq_compose_msi_msg + + msg->address_lo = lower_32_bits(msi_msg); + msg->address_hi = upper_32_bits(msi_msg); +- msg->data = data->irq; ++ msg->data = data->hwirq; + } + + static int advk_msi_set_affinity(struct irq_data *irq_data, +@@ -1206,15 +1206,11 @@ static int advk_msi_irq_domain_alloc(str + int hwirq, i; + + mutex_lock(&pcie->msi_used_lock); +- hwirq = bitmap_find_next_zero_area(pcie->msi_used, MSI_IRQ_NUM, +- 0, nr_irqs, 0); +- if (hwirq >= MSI_IRQ_NUM) { +- mutex_unlock(&pcie->msi_used_lock); +- return -ENOSPC; +- } +- +- bitmap_set(pcie->msi_used, hwirq, nr_irqs); ++ hwirq = bitmap_find_free_region(pcie->msi_used, MSI_IRQ_NUM, ++ order_base_2(nr_irqs)); + mutex_unlock(&pcie->msi_used_lock); ++ if (hwirq < 0) ++ return -ENOSPC; + + for (i = 0; i < nr_irqs; i++) + irq_domain_set_info(domain, virq + i, hwirq + i, +@@ -1232,7 +1228,7 @@ static void advk_msi_irq_domain_free(str + struct advk_pcie *pcie = domain->host_data; + + mutex_lock(&pcie->msi_used_lock); +- bitmap_clear(pcie->msi_used, d->hwirq, nr_irqs); ++ bitmap_release_region(pcie->msi_used, d->hwirq, order_base_2(nr_irqs)); + mutex_unlock(&pcie->msi_used_lock); + } + diff --git a/target/linux/generic/pending-5.15/850-0004-PCI-aardvark-Rewrite-IRQ-code-to-chained-IRQ-handler.patch b/target/linux/generic/pending-5.15/850-0004-PCI-aardvark-Rewrite-IRQ-code-to-chained-IRQ-handler.patch new file mode 100644 index 0000000000..47a26ceeb8 --- /dev/null +++ b/target/linux/generic/pending-5.15/850-0004-PCI-aardvark-Rewrite-IRQ-code-to-chained-IRQ-handler.patch @@ -0,0 +1,125 @@ +From 0cd5141d1866afb23286fe90cd846441fe7aeb39 Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?Pali=20Roh=C3=A1r?= <pali@kernel.org> +Date: Sat, 27 Mar 2021 14:44:11 +0100 +Subject: [PATCH] PCI: aardvark: Rewrite IRQ code to chained IRQ handler +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +Rewrite the code to use irq_set_chained_handler_and_data() handler with +chained_irq_enter() and chained_irq_exit() processing instead of using +devm_request_irq(). + +advk_pcie_irq_handler() reads IRQ status bits and calls other functions +based on which bits are set. These functions then read its own IRQ status +bits and calls other aardvark functions based on these bits. Finally +generic_handle_domain_irq() with translated linux IRQ numbers are called. + +Signed-off-by: Pali Rohár <pali@kernel.org> +Signed-off-by: Marek Behún <kabel@kernel.org> +--- + drivers/pci/controller/pci-aardvark.c | 48 +++++++++++++++------------ + 1 file changed, 26 insertions(+), 22 deletions(-) + +--- a/drivers/pci/controller/pci-aardvark.c ++++ b/drivers/pci/controller/pci-aardvark.c +@@ -275,6 +275,7 @@ struct advk_pcie { + u32 actions; + } wins[OB_WIN_COUNT]; + u8 wins_count; ++ int irq; + struct irq_domain *irq_domain; + struct irq_chip irq_chip; + raw_spinlock_t irq_lock; +@@ -1442,21 +1443,26 @@ static void advk_pcie_handle_int(struct + } + } + +-static irqreturn_t advk_pcie_irq_handler(int irq, void *arg) ++static void advk_pcie_irq_handler(struct irq_desc *desc) + { +- struct advk_pcie *pcie = arg; +- u32 status; ++ struct advk_pcie *pcie = irq_desc_get_handler_data(desc); ++ struct irq_chip *chip = irq_desc_get_chip(desc); ++ u32 val, mask, status; + +- status = advk_readl(pcie, HOST_CTRL_INT_STATUS_REG); +- if (!(status & PCIE_IRQ_CORE_INT)) +- return IRQ_NONE; ++ chained_irq_enter(chip, desc); + +- advk_pcie_handle_int(pcie); ++ val = advk_readl(pcie, HOST_CTRL_INT_STATUS_REG); ++ mask = advk_readl(pcie, HOST_CTRL_INT_MASK_REG); ++ status = val & ((~mask) & PCIE_IRQ_ALL_MASK); + +- /* Clear interrupt */ +- advk_writel(pcie, PCIE_IRQ_CORE_INT, HOST_CTRL_INT_STATUS_REG); ++ if (status & PCIE_IRQ_CORE_INT) { ++ advk_pcie_handle_int(pcie); + +- return IRQ_HANDLED; ++ /* Clear interrupt */ ++ advk_writel(pcie, PCIE_IRQ_CORE_INT, HOST_CTRL_INT_STATUS_REG); ++ } ++ ++ chained_irq_exit(chip, desc); + } + + static void __maybe_unused advk_pcie_disable_phy(struct advk_pcie *pcie) +@@ -1523,7 +1529,7 @@ static int advk_pcie_probe(struct platfo + struct advk_pcie *pcie; + struct pci_host_bridge *bridge; + struct resource_entry *entry; +- int ret, irq; ++ int ret; + + bridge = devm_pci_alloc_host_bridge(dev, sizeof(struct advk_pcie)); + if (!bridge) +@@ -1611,17 +1617,9 @@ static int advk_pcie_probe(struct platfo + if (IS_ERR(pcie->base)) + return PTR_ERR(pcie->base); + +- irq = platform_get_irq(pdev, 0); +- if (irq < 0) +- return irq; +- +- ret = devm_request_irq(dev, irq, advk_pcie_irq_handler, +- IRQF_SHARED | IRQF_NO_THREAD, "advk-pcie", +- pcie); +- if (ret) { +- dev_err(dev, "Failed to register interrupt\n"); +- return ret; +- } ++ pcie->irq = platform_get_irq(pdev, 0); ++ if (pcie->irq < 0) ++ return pcie->irq; + + pcie->reset_gpio = devm_gpiod_get_from_of_node(dev, dev->of_node, + "reset-gpios", 0, +@@ -1670,11 +1668,14 @@ static int advk_pcie_probe(struct platfo + return ret; + } + ++ irq_set_chained_handler_and_data(pcie->irq, advk_pcie_irq_handler, pcie); ++ + bridge->sysdata = pcie; + bridge->ops = &advk_pcie_ops; + + ret = pci_host_probe(bridge); + if (ret < 0) { ++ irq_set_chained_handler_and_data(pcie->irq, NULL, NULL); + advk_pcie_remove_msi_irq_domain(pcie); + advk_pcie_remove_irq_domain(pcie); + return ret; +@@ -1722,6 +1723,9 @@ static int advk_pcie_remove(struct platf + advk_writel(pcie, PCIE_ISR1_ALL_MASK, PCIE_ISR1_REG); + advk_writel(pcie, PCIE_IRQ_ALL_MASK, HOST_CTRL_INT_STATUS_REG); + ++ /* Remove IRQ handler */ ++ irq_set_chained_handler_and_data(pcie->irq, NULL, NULL); ++ + /* Remove IRQ domains */ + advk_pcie_remove_msi_irq_domain(pcie); + advk_pcie_remove_irq_domain(pcie); diff --git a/target/linux/generic/pending-5.15/850-0005-PCI-aardvark-Check-return-value-of-generic_handle_do.patch b/target/linux/generic/pending-5.15/850-0005-PCI-aardvark-Check-return-value-of-generic_handle_do.patch new file mode 100644 index 0000000000..9cc37f727e --- /dev/null +++ b/target/linux/generic/pending-5.15/850-0005-PCI-aardvark-Check-return-value-of-generic_handle_do.patch @@ -0,0 +1,31 @@ +From 69c1f2c6f45a556361fd8e8d2d4eb20e2c8d3d95 Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?Pali=20Roh=C3=A1r?= <pali@kernel.org> +Date: Thu, 18 Mar 2021 17:04:32 +0100 +Subject: [PATCH] PCI: aardvark: Check return value of + generic_handle_domain_irq() when processing INTx IRQ +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +It is possible that we receive spurious INTx interrupt. Check for the +return value of generic_handle_domain_irq() when processing INTx IRQ. + +Signed-off-by: Pali Rohár <pali@kernel.org> +Signed-off-by: Marek Behún <kabel@kernel.org> +--- + drivers/pci/controller/pci-aardvark.c | 4 +++- + 1 file changed, 3 insertions(+), 1 deletion(-) + +--- a/drivers/pci/controller/pci-aardvark.c ++++ b/drivers/pci/controller/pci-aardvark.c +@@ -1439,7 +1439,9 @@ static void advk_pcie_handle_int(struct + PCIE_ISR1_REG); + + virq = irq_find_mapping(pcie->irq_domain, i); +- generic_handle_irq(virq); ++ if (generic_handle_irq(virq) == -EINVAL) ++ dev_err_ratelimited(&pcie->pdev->dev, "unexpected INT%c IRQ\n", ++ (char)i + 'A'); + } + } + diff --git a/target/linux/generic/pending-5.15/850-0006-PCI-aardvark-Make-MSI-irq_chip-structures-static-dri.patch b/target/linux/generic/pending-5.15/850-0006-PCI-aardvark-Make-MSI-irq_chip-structures-static-dri.patch new file mode 100644 index 0000000000..154b11290a --- /dev/null +++ b/target/linux/generic/pending-5.15/850-0006-PCI-aardvark-Make-MSI-irq_chip-structures-static-dri.patch @@ -0,0 +1,93 @@ +From 5eb36a6b9508da442aac80f4df23e3951bbfa7aa Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?Marek=20Beh=C3=BAn?= <kabel@kernel.org> +Date: Mon, 10 Jan 2022 00:03:41 +0100 +Subject: [PATCH] PCI: aardvark: Make MSI irq_chip structures static driver + structures +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +Marc Zyngier says [1] that we should use struct irq_chip as a global +static struct in the driver. Even though the structure currently +contains a dynamic member (parent_device), Marc says [2] that he plans +to kill it and make the structure completely static. + +Convert Aardvark's priv->msi_bottom_irq_chip and priv->msi_irq_chip to +static driver structure. + +[1] https://lore.kernel.org/linux-pci/877dbcvngf.wl-maz@kernel.org/ +[2] https://lore.kernel.org/linux-pci/874k6gvkhz.wl-maz@kernel.org/ + +Signed-off-by: Marek Behún <kabel@kernel.org> +--- + drivers/pci/controller/pci-aardvark.c | 26 ++++++++++++-------------- + 1 file changed, 12 insertions(+), 14 deletions(-) + +--- a/drivers/pci/controller/pci-aardvark.c ++++ b/drivers/pci/controller/pci-aardvark.c +@@ -281,8 +281,6 @@ struct advk_pcie { + raw_spinlock_t irq_lock; + struct irq_domain *msi_domain; + struct irq_domain *msi_inner_domain; +- struct irq_chip msi_bottom_irq_chip; +- struct irq_chip msi_irq_chip; + struct msi_domain_info msi_domain_info; + DECLARE_BITMAP(msi_used, MSI_IRQ_NUM); + struct mutex msi_used_lock; +@@ -1199,6 +1197,12 @@ static int advk_msi_set_affinity(struct + return -EINVAL; + } + ++static struct irq_chip advk_msi_bottom_irq_chip = { ++ .name = "MSI", ++ .irq_compose_msi_msg = advk_msi_irq_compose_msi_msg, ++ .irq_set_affinity = advk_msi_set_affinity, ++}; ++ + static int advk_msi_irq_domain_alloc(struct irq_domain *domain, + unsigned int virq, + unsigned int nr_irqs, void *args) +@@ -1215,7 +1219,7 @@ static int advk_msi_irq_domain_alloc(str + + for (i = 0; i < nr_irqs; i++) + irq_domain_set_info(domain, virq + i, hwirq + i, +- &pcie->msi_bottom_irq_chip, ++ &advk_msi_bottom_irq_chip, + domain->host_data, handle_simple_irq, + NULL, NULL); + +@@ -1285,29 +1289,23 @@ static const struct irq_domain_ops advk_ + .xlate = irq_domain_xlate_onecell, + }; + ++static struct irq_chip advk_msi_irq_chip = { ++ .name = "advk-MSI", ++}; ++ + static int advk_pcie_init_msi_irq_domain(struct advk_pcie *pcie) + { + struct device *dev = &pcie->pdev->dev; + struct device_node *node = dev->of_node; +- struct irq_chip *bottom_ic, *msi_ic; + struct msi_domain_info *msi_di; + phys_addr_t msi_msg_phys; + + mutex_init(&pcie->msi_used_lock); + +- bottom_ic = &pcie->msi_bottom_irq_chip; +- +- bottom_ic->name = "MSI"; +- bottom_ic->irq_compose_msi_msg = advk_msi_irq_compose_msi_msg; +- bottom_ic->irq_set_affinity = advk_msi_set_affinity; +- +- msi_ic = &pcie->msi_irq_chip; +- msi_ic->name = "advk-MSI"; +- + msi_di = &pcie->msi_domain_info; + msi_di->flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | + MSI_FLAG_MULTI_PCI_MSI; +- msi_di->chip = msi_ic; ++ msi_di->chip = &advk_msi_irq_chip; + + msi_msg_phys = virt_to_phys(&pcie->msi_msg); + diff --git a/target/linux/generic/pending-5.15/850-0007-PCI-aardvark-Make-msi_domain_info-structure-a-static.patch b/target/linux/generic/pending-5.15/850-0007-PCI-aardvark-Make-msi_domain_info-structure-a-static.patch new file mode 100644 index 0000000000..2f15b5b8d6 --- /dev/null +++ b/target/linux/generic/pending-5.15/850-0007-PCI-aardvark-Make-msi_domain_info-structure-a-static.patch @@ -0,0 +1,64 @@ +From c092ab8994f1f777054c0179a9deb40b87ee606f Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?Marek=20Beh=C3=BAn?= <kabel@kernel.org> +Date: Mon, 10 Jan 2022 00:10:46 +0100 +Subject: [PATCH] PCI: aardvark: Make msi_domain_info structure a static driver + structure +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +Make Aardvark's msi_domain_info structure into a private driver structure. +Domain info is same for every potential instatination of a controller. + +Signed-off-by: Marek Behún <kabel@kernel.org> +--- + drivers/pci/controller/pci-aardvark.c | 16 ++++++++-------- + 1 file changed, 8 insertions(+), 8 deletions(-) + +--- a/drivers/pci/controller/pci-aardvark.c ++++ b/drivers/pci/controller/pci-aardvark.c +@@ -281,7 +281,6 @@ struct advk_pcie { + raw_spinlock_t irq_lock; + struct irq_domain *msi_domain; + struct irq_domain *msi_inner_domain; +- struct msi_domain_info msi_domain_info; + DECLARE_BITMAP(msi_used, MSI_IRQ_NUM); + struct mutex msi_used_lock; + u16 msi_msg; +@@ -1293,20 +1292,20 @@ static struct irq_chip advk_msi_irq_chip + .name = "advk-MSI", + }; + ++static struct msi_domain_info advk_msi_domain_info = { ++ .flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | ++ MSI_FLAG_MULTI_PCI_MSI, ++ .chip = &advk_msi_irq_chip, ++}; ++ + static int advk_pcie_init_msi_irq_domain(struct advk_pcie *pcie) + { + struct device *dev = &pcie->pdev->dev; + struct device_node *node = dev->of_node; +- struct msi_domain_info *msi_di; + phys_addr_t msi_msg_phys; + + mutex_init(&pcie->msi_used_lock); + +- msi_di = &pcie->msi_domain_info; +- msi_di->flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | +- MSI_FLAG_MULTI_PCI_MSI; +- msi_di->chip = &advk_msi_irq_chip; +- + msi_msg_phys = virt_to_phys(&pcie->msi_msg); + + advk_writel(pcie, lower_32_bits(msi_msg_phys), +@@ -1322,7 +1321,8 @@ static int advk_pcie_init_msi_irq_domain + + pcie->msi_domain = + pci_msi_create_irq_domain(of_node_to_fwnode(node), +- msi_di, pcie->msi_inner_domain); ++ &advk_msi_domain_info, ++ pcie->msi_inner_domain); + if (!pcie->msi_domain) { + irq_domain_remove(pcie->msi_inner_domain); + return -ENOMEM; diff --git a/target/linux/generic/pending-5.15/850-0008-PCI-aardvark-Use-dev_fwnode-instead-of-of_node_to_fw.patch b/target/linux/generic/pending-5.15/850-0008-PCI-aardvark-Use-dev_fwnode-instead-of-of_node_to_fw.patch new file mode 100644 index 0000000000..73f7cb9476 --- /dev/null +++ b/target/linux/generic/pending-5.15/850-0008-PCI-aardvark-Use-dev_fwnode-instead-of-of_node_to_fw.patch @@ -0,0 +1,40 @@ +From 59029739d42b439628e2f64f3d8f2db9be97deff Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?Marek=20Beh=C3=BAn?= <kabel@kernel.org> +Date: Mon, 10 Jan 2022 00:15:17 +0100 +Subject: [PATCH] PCI: aardvark: Use dev_fwnode() instead of + of_node_to_fwnode(dev->of_node) +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +Use simple + dev_fwnode(dev) +instead of + struct device_node *node = dev->of_node; + of_node_to_fwnode(node) +especially since the node variable is not used elsewhere in the function. + +Signed-off-by: Marek Behún <kabel@kernel.org> +--- + drivers/pci/controller/pci-aardvark.c | 3 +-- + 1 file changed, 1 insertion(+), 2 deletions(-) + +--- a/drivers/pci/controller/pci-aardvark.c ++++ b/drivers/pci/controller/pci-aardvark.c +@@ -1301,7 +1301,6 @@ static struct msi_domain_info advk_msi_d + static int advk_pcie_init_msi_irq_domain(struct advk_pcie *pcie) + { + struct device *dev = &pcie->pdev->dev; +- struct device_node *node = dev->of_node; + phys_addr_t msi_msg_phys; + + mutex_init(&pcie->msi_used_lock); +@@ -1320,7 +1319,7 @@ static int advk_pcie_init_msi_irq_domain + return -ENOMEM; + + pcie->msi_domain = +- pci_msi_create_irq_domain(of_node_to_fwnode(node), ++ pci_msi_create_irq_domain(dev_fwnode(dev), + &advk_msi_domain_info, + pcie->msi_inner_domain); + if (!pcie->msi_domain) { diff --git a/target/linux/generic/pending-5.15/850-0009-PCI-aardvark-Refactor-unmasking-summary-MSI-interrup.patch b/target/linux/generic/pending-5.15/850-0009-PCI-aardvark-Refactor-unmasking-summary-MSI-interrup.patch new file mode 100644 index 0000000000..6cdaddc51c --- /dev/null +++ b/target/linux/generic/pending-5.15/850-0009-PCI-aardvark-Refactor-unmasking-summary-MSI-interrup.patch @@ -0,0 +1,44 @@ +From 98feaf97bc64fc640a6c5b1394cd18fc7cd7dac8 Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?Pali=20Roh=C3=A1r?= <pali@kernel.org> +Date: Sun, 28 Mar 2021 14:34:49 +0200 +Subject: [PATCH] PCI: aardvark: Refactor unmasking summary MSI interrupt +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +Refactor the masking of ISR0/1 Sources and unmasking of summary MSI interrupt +so that it corresponds to the comments: +- first mask all ISR0/1 +- then unmask all MSIs +- then unmask summary MSI interrupt + +Signed-off-by: Pali Rohár <pali@kernel.org> +Signed-off-by: Marek Behún <kabel@kernel.org> +--- + drivers/pci/controller/pci-aardvark.c | 10 ++++++---- + 1 file changed, 6 insertions(+), 4 deletions(-) + +--- a/drivers/pci/controller/pci-aardvark.c ++++ b/drivers/pci/controller/pci-aardvark.c +@@ -578,15 +578,17 @@ static void advk_pcie_setup_hw(struct ad + advk_writel(pcie, PCIE_IRQ_ALL_MASK, HOST_CTRL_INT_STATUS_REG); + + /* Disable All ISR0/1 Sources */ +- reg = PCIE_ISR0_ALL_MASK; +- reg &= ~PCIE_ISR0_MSI_INT_PENDING; +- advk_writel(pcie, reg, PCIE_ISR0_MASK_REG); +- ++ advk_writel(pcie, PCIE_ISR0_ALL_MASK, PCIE_ISR0_MASK_REG); + advk_writel(pcie, PCIE_ISR1_ALL_MASK, PCIE_ISR1_MASK_REG); + + /* Unmask all MSIs */ + advk_writel(pcie, ~(u32)PCIE_MSI_ALL_MASK, PCIE_MSI_MASK_REG); + ++ /* Unmask summary MSI interrupt */ ++ reg = advk_readl(pcie, PCIE_ISR0_MASK_REG); ++ reg &= ~PCIE_ISR0_MSI_INT_PENDING; ++ advk_writel(pcie, reg, PCIE_ISR0_MASK_REG); ++ + /* Enable summary interrupt for GIC SPI source */ + reg = PCIE_IRQ_ALL_MASK & (~PCIE_IRQ_ENABLE_INTS_MASK); + advk_writel(pcie, reg, HOST_CTRL_INT_MASK_REG); diff --git a/target/linux/generic/pending-5.15/850-0010-PCI-aardvark-Add-support-for-masking-MSI-interrupts.patch b/target/linux/generic/pending-5.15/850-0010-PCI-aardvark-Add-support-for-masking-MSI-interrupts.patch new file mode 100644 index 0000000000..808094ebd8 --- /dev/null +++ b/target/linux/generic/pending-5.15/850-0010-PCI-aardvark-Add-support-for-masking-MSI-interrupts.patch @@ -0,0 +1,117 @@ +From 7f353accca6e4a3222991c65b1a6801503973bd3 Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?Pali=20Roh=C3=A1r?= <pali@kernel.org> +Date: Fri, 2 Jul 2021 16:44:10 +0200 +Subject: [PATCH] PCI: aardvark: Add support for masking MSI interrupts +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +We should not unmask MSIs at setup, but only when kernel asks for them +to be unmasked. + +At setup, mask all MSIs, and implement IRQ chip callbacks for masking +and unmasking particular MSIs. + +Signed-off-by: Pali Rohár <pali@kernel.org> +Signed-off-by: Marek Behún <kabel@kernel.org> +--- + drivers/pci/controller/pci-aardvark.c | 54 ++++++++++++++++++++++++--- + 1 file changed, 49 insertions(+), 5 deletions(-) + +--- a/drivers/pci/controller/pci-aardvark.c ++++ b/drivers/pci/controller/pci-aardvark.c +@@ -281,6 +281,7 @@ struct advk_pcie { + raw_spinlock_t irq_lock; + struct irq_domain *msi_domain; + struct irq_domain *msi_inner_domain; ++ raw_spinlock_t msi_irq_lock; + DECLARE_BITMAP(msi_used, MSI_IRQ_NUM); + struct mutex msi_used_lock; + u16 msi_msg; +@@ -577,12 +578,10 @@ static void advk_pcie_setup_hw(struct ad + advk_writel(pcie, PCIE_ISR1_ALL_MASK, PCIE_ISR1_REG); + advk_writel(pcie, PCIE_IRQ_ALL_MASK, HOST_CTRL_INT_STATUS_REG); + +- /* Disable All ISR0/1 Sources */ ++ /* Disable All ISR0/1 and MSI Sources */ + advk_writel(pcie, PCIE_ISR0_ALL_MASK, PCIE_ISR0_MASK_REG); + advk_writel(pcie, PCIE_ISR1_ALL_MASK, PCIE_ISR1_MASK_REG); +- +- /* Unmask all MSIs */ +- advk_writel(pcie, ~(u32)PCIE_MSI_ALL_MASK, PCIE_MSI_MASK_REG); ++ advk_writel(pcie, PCIE_MSI_ALL_MASK, PCIE_MSI_MASK_REG); + + /* Unmask summary MSI interrupt */ + reg = advk_readl(pcie, PCIE_ISR0_MASK_REG); +@@ -1198,10 +1197,52 @@ static int advk_msi_set_affinity(struct + return -EINVAL; + } + ++static void advk_msi_irq_mask(struct irq_data *d) ++{ ++ struct advk_pcie *pcie = d->domain->host_data; ++ irq_hw_number_t hwirq = irqd_to_hwirq(d); ++ unsigned long flags; ++ u32 mask; ++ ++ raw_spin_lock_irqsave(&pcie->msi_irq_lock, flags); ++ mask = advk_readl(pcie, PCIE_MSI_MASK_REG); ++ mask |= BIT(hwirq); ++ advk_writel(pcie, mask, PCIE_MSI_MASK_REG); ++ raw_spin_unlock_irqrestore(&pcie->msi_irq_lock, flags); ++} ++ ++static void advk_msi_irq_unmask(struct irq_data *d) ++{ ++ struct advk_pcie *pcie = d->domain->host_data; ++ irq_hw_number_t hwirq = irqd_to_hwirq(d); ++ unsigned long flags; ++ u32 mask; ++ ++ raw_spin_lock_irqsave(&pcie->msi_irq_lock, flags); ++ mask = advk_readl(pcie, PCIE_MSI_MASK_REG); ++ mask &= ~BIT(hwirq); ++ advk_writel(pcie, mask, PCIE_MSI_MASK_REG); ++ raw_spin_unlock_irqrestore(&pcie->msi_irq_lock, flags); ++} ++ ++static void advk_msi_top_irq_mask(struct irq_data *d) ++{ ++ pci_msi_mask_irq(d); ++ irq_chip_mask_parent(d); ++} ++ ++static void advk_msi_top_irq_unmask(struct irq_data *d) ++{ ++ pci_msi_unmask_irq(d); ++ irq_chip_unmask_parent(d); ++} ++ + static struct irq_chip advk_msi_bottom_irq_chip = { + .name = "MSI", + .irq_compose_msi_msg = advk_msi_irq_compose_msi_msg, + .irq_set_affinity = advk_msi_set_affinity, ++ .irq_mask = advk_msi_irq_mask, ++ .irq_unmask = advk_msi_irq_unmask, + }; + + static int advk_msi_irq_domain_alloc(struct irq_domain *domain, +@@ -1291,7 +1332,9 @@ static const struct irq_domain_ops advk_ + }; + + static struct irq_chip advk_msi_irq_chip = { +- .name = "advk-MSI", ++ .name = "advk-MSI", ++ .irq_mask = advk_msi_top_irq_mask, ++ .irq_unmask = advk_msi_top_irq_unmask, + }; + + static struct msi_domain_info advk_msi_domain_info = { +@@ -1305,6 +1348,7 @@ static int advk_pcie_init_msi_irq_domain + struct device *dev = &pcie->pdev->dev; + phys_addr_t msi_msg_phys; + ++ raw_spin_lock_init(&pcie->msi_irq_lock); + mutex_init(&pcie->msi_used_lock); + + msi_msg_phys = virt_to_phys(&pcie->msi_msg); diff --git a/target/linux/generic/pending-5.15/850-0011-PCI-aardvark-Fix-setting-MSI-address.patch b/target/linux/generic/pending-5.15/850-0011-PCI-aardvark-Fix-setting-MSI-address.patch new file mode 100644 index 0000000000..4807f24a12 --- /dev/null +++ b/target/linux/generic/pending-5.15/850-0011-PCI-aardvark-Fix-setting-MSI-address.patch @@ -0,0 +1,91 @@ +From fa73c200f181436eab859374657c53a73778d8ad Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?Pali=20Roh=C3=A1r?= <pali@kernel.org> +Date: Fri, 26 Mar 2021 17:35:44 +0100 +Subject: [PATCH] PCI: aardvark: Fix setting MSI address +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +MSI address for receiving MSI interrupts needs to be correctly set before +enabling processing of MSI interrupts. + +Move code for setting PCIE_MSI_ADDR_LOW_REG and PCIE_MSI_ADDR_HIGH_REG +from advk_pcie_init_msi_irq_domain() to advk_pcie_setup_hw(), before +enabling PCIE_CORE_CTRL2_MSI_ENABLE. + +After this we can remove the now unused member msi_msg, which was used +only for MSI doorbell address. MSI address can be any address which cannot +be used to DMA to. So change it to the address of the main struct advk_pcie. + +Fixes: 8c39d710363c ("PCI: aardvark: Add Aardvark PCI host controller driver") +Signed-off-by: Pali Rohár <pali@kernel.org> +Acked-by: Marc Zyngier <maz@kernel.org> +Signed-off-by: Marek Behún <kabel@kernel.org> +Cc: stable@vger.kernel.org # f21a8b1b6837 ("PCI: aardvark: Move to MSI handling using generic MSI support") +--- + drivers/pci/controller/pci-aardvark.c | 21 +++++++++------------ + 1 file changed, 9 insertions(+), 12 deletions(-) + +--- a/drivers/pci/controller/pci-aardvark.c ++++ b/drivers/pci/controller/pci-aardvark.c +@@ -284,7 +284,6 @@ struct advk_pcie { + raw_spinlock_t msi_irq_lock; + DECLARE_BITMAP(msi_used, MSI_IRQ_NUM); + struct mutex msi_used_lock; +- u16 msi_msg; + int link_gen; + struct pci_bridge_emul bridge; + struct gpio_desc *reset_gpio; +@@ -479,6 +478,7 @@ static void advk_pcie_disable_ob_win(str + + static void advk_pcie_setup_hw(struct advk_pcie *pcie) + { ++ phys_addr_t msi_addr; + u32 reg; + int i; + +@@ -567,6 +567,11 @@ static void advk_pcie_setup_hw(struct ad + reg |= LANE_COUNT_1; + advk_writel(pcie, reg, PCIE_CORE_CTRL0_REG); + ++ /* Set MSI address */ ++ msi_addr = virt_to_phys(pcie); ++ advk_writel(pcie, lower_32_bits(msi_addr), PCIE_MSI_ADDR_LOW_REG); ++ advk_writel(pcie, upper_32_bits(msi_addr), PCIE_MSI_ADDR_HIGH_REG); ++ + /* Enable MSI */ + reg = advk_readl(pcie, PCIE_CORE_CTRL2_REG); + reg |= PCIE_CORE_CTRL2_MSI_ENABLE; +@@ -1184,10 +1189,10 @@ static void advk_msi_irq_compose_msi_msg + struct msi_msg *msg) + { + struct advk_pcie *pcie = irq_data_get_irq_chip_data(data); +- phys_addr_t msi_msg = virt_to_phys(&pcie->msi_msg); ++ phys_addr_t msi_addr = virt_to_phys(pcie); + +- msg->address_lo = lower_32_bits(msi_msg); +- msg->address_hi = upper_32_bits(msi_msg); ++ msg->address_lo = lower_32_bits(msi_addr); ++ msg->address_hi = upper_32_bits(msi_addr); + msg->data = data->hwirq; + } + +@@ -1346,18 +1351,10 @@ static struct msi_domain_info advk_msi_d + static int advk_pcie_init_msi_irq_domain(struct advk_pcie *pcie) + { + struct device *dev = &pcie->pdev->dev; +- phys_addr_t msi_msg_phys; + + raw_spin_lock_init(&pcie->msi_irq_lock); + mutex_init(&pcie->msi_used_lock); + +- msi_msg_phys = virt_to_phys(&pcie->msi_msg); +- +- advk_writel(pcie, lower_32_bits(msi_msg_phys), +- PCIE_MSI_ADDR_LOW_REG); +- advk_writel(pcie, upper_32_bits(msi_msg_phys), +- PCIE_MSI_ADDR_HIGH_REG); +- + pcie->msi_inner_domain = + irq_domain_add_linear(NULL, MSI_IRQ_NUM, + &advk_msi_domain_ops, pcie); diff --git a/target/linux/generic/pending-5.15/850-0012-PCI-aardvark-Enable-MSI-X-support.patch b/target/linux/generic/pending-5.15/850-0012-PCI-aardvark-Enable-MSI-X-support.patch new file mode 100644 index 0000000000..20757c7a80 --- /dev/null +++ b/target/linux/generic/pending-5.15/850-0012-PCI-aardvark-Enable-MSI-X-support.patch @@ -0,0 +1,38 @@ +From 735a4ac9782b96fbe1543c578aa8334364f21abd Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?Pali=20Roh=C3=A1r?= <pali@kernel.org> +Date: Fri, 2 Apr 2021 14:05:24 +0200 +Subject: [PATCH] PCI: aardvark: Enable MSI-X support +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +According to PCI 3.0 specification, sending both MSI and MSI-X interrupts +is done by DWORD memory write operation to doorbell message address. The +write operation for MSI has zero upper 16 bits and the MSI interrupt number +in the lower 16 bits, while the write operation for MSI-X contains a 32-bit +value from MSI-X table. + +Since the driver only uses interrupt numbers from range 0..31, the upper +16 bits of the DWORD memory write operation to doorbell message address +are zero even for MSI-X interrupts. Thus we can enable MSI-X interrupts. + +Testing proves that kernel can correctly receive MSI-X interrupts from PCIe +cards which supports both MSI and MSI-X interrupts. + +Signed-off-by: Pali Rohár <pali@kernel.org> +Signed-off-by: Marek Behún <kabel@kernel.org> +--- + drivers/pci/controller/pci-aardvark.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +--- a/drivers/pci/controller/pci-aardvark.c ++++ b/drivers/pci/controller/pci-aardvark.c +@@ -1344,7 +1344,7 @@ static struct irq_chip advk_msi_irq_chip + + static struct msi_domain_info advk_msi_domain_info = { + .flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | +- MSI_FLAG_MULTI_PCI_MSI, ++ MSI_FLAG_MULTI_PCI_MSI | MSI_FLAG_PCI_MSIX, + .chip = &advk_msi_irq_chip, + }; + diff --git a/target/linux/generic/pending-5.15/850-0013-PCI-aardvark-Add-support-for-ERR-interrupt-on-emulat.patch b/target/linux/generic/pending-5.15/850-0013-PCI-aardvark-Add-support-for-ERR-interrupt-on-emulat.patch new file mode 100644 index 0000000000..eb27610aa3 --- /dev/null +++ b/target/linux/generic/pending-5.15/850-0013-PCI-aardvark-Add-support-for-ERR-interrupt-on-emulat.patch @@ -0,0 +1,100 @@ +From 7f3e55a3890fa26d15e2e4e90213962d1a7f6df9 Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?Pali=20Roh=C3=A1r?= <pali@kernel.org> +Date: Fri, 12 Feb 2021 20:32:55 +0100 +Subject: [PATCH] PCI: aardvark: Add support for ERR interrupt on emulated + bridge +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +ERR interrupt is triggered when corresponding bit is unmasked in both ISR0 +and PCI_EXP_DEVCTL registers. Unmasking ERR bits in PCI_EXP_DEVCTL register +is not enough. This means that currently the ERR interrupt is never +triggered. + +Unmask ERR bits in ISR0 register at driver probe time. ERR interrupt is not +triggered until ERR bits are unmasked also in PCI_EXP_DEVCTL register, +which is done by AER driver. So it is safe to unconditionally unmask all +ERR bits in aardvark probe. + +Aardvark HW sets PCI_ERR_ROOT_AER_IRQ to zero and when corresponding bits +in ISR0 and PCI_EXP_DEVCTL are enabled, the HW triggers a generic interrupt +on GIC. Chain this interrupt to PCIe interrupt 0 with +generic_handle_domain_irq() to allow processing of ERR interrupts. + +Signed-off-by: Pali Rohár <pali@kernel.org> +Signed-off-by: Marek Behún <kabel@kernel.org> +--- + drivers/pci/controller/pci-aardvark.c | 36 ++++++++++++++++++++++++++- + 1 file changed, 35 insertions(+), 1 deletion(-) + +--- a/drivers/pci/controller/pci-aardvark.c ++++ b/drivers/pci/controller/pci-aardvark.c +@@ -97,6 +97,10 @@ + #define PCIE_MSG_PM_PME_MASK BIT(7) + #define PCIE_ISR0_MASK_REG (CONTROL_BASE_ADDR + 0x44) + #define PCIE_ISR0_MSI_INT_PENDING BIT(24) ++#define PCIE_ISR0_CORR_ERR BIT(11) ++#define PCIE_ISR0_NFAT_ERR BIT(12) ++#define PCIE_ISR0_FAT_ERR BIT(13) ++#define PCIE_ISR0_ERR_MASK GENMASK(13, 11) + #define PCIE_ISR0_INTX_ASSERT(val) BIT(16 + (val)) + #define PCIE_ISR0_INTX_DEASSERT(val) BIT(20 + (val)) + #define PCIE_ISR0_ALL_MASK GENMASK(31, 0) +@@ -785,11 +789,15 @@ advk_pci_bridge_emul_base_conf_read(stru + case PCI_INTERRUPT_LINE: { + /* + * From the whole 32bit register we support reading from HW only +- * one bit: PCI_BRIDGE_CTL_BUS_RESET. ++ * two bits: PCI_BRIDGE_CTL_BUS_RESET and PCI_BRIDGE_CTL_SERR. + * Other bits are retrieved only from emulated config buffer. + */ + __le32 *cfgspace = (__le32 *)&bridge->conf; + u32 val = le32_to_cpu(cfgspace[PCI_INTERRUPT_LINE / 4]); ++ if (advk_readl(pcie, PCIE_ISR0_MASK_REG) & PCIE_ISR0_ERR_MASK) ++ val &= ~(PCI_BRIDGE_CTL_SERR << 16); ++ else ++ val |= PCI_BRIDGE_CTL_SERR << 16; + if (advk_readl(pcie, PCIE_CORE_CTRL1_REG) & HOT_RESET_GEN) + val |= PCI_BRIDGE_CTL_BUS_RESET << 16; + else +@@ -815,6 +823,19 @@ advk_pci_bridge_emul_base_conf_write(str + break; + + case PCI_INTERRUPT_LINE: ++ /* ++ * According to Figure 6-3: Pseudo Logic Diagram for Error ++ * Message Controls in PCIe base specification, SERR# Enable bit ++ * in Bridge Control register enable receiving of ERR_* messages ++ */ ++ if (mask & (PCI_BRIDGE_CTL_SERR << 16)) { ++ u32 val = advk_readl(pcie, PCIE_ISR0_MASK_REG); ++ if (new & (PCI_BRIDGE_CTL_SERR << 16)) ++ val &= ~PCIE_ISR0_ERR_MASK; ++ else ++ val |= PCIE_ISR0_ERR_MASK; ++ advk_writel(pcie, val, PCIE_ISR0_MASK_REG); ++ } + if (mask & (PCI_BRIDGE_CTL_BUS_RESET << 16)) { + u32 val = advk_readl(pcie, PCIE_CORE_CTRL1_REG); + if (new & (PCI_BRIDGE_CTL_BUS_RESET << 16)) +@@ -1466,6 +1487,19 @@ static void advk_pcie_handle_int(struct + isr1_mask = advk_readl(pcie, PCIE_ISR1_MASK_REG); + isr1_status = isr1_val & ((~isr1_mask) & PCIE_ISR1_ALL_MASK); + ++ /* Process ERR interrupt */ ++ if (isr0_status & PCIE_ISR0_ERR_MASK) { ++ advk_writel(pcie, PCIE_ISR0_ERR_MASK, PCIE_ISR0_REG); ++ ++ /* ++ * Aardvark HW returns zero for PCI_ERR_ROOT_AER_IRQ, so use ++ * PCIe interrupt 0 ++ */ ++ virq = irq_find_mapping(pcie->irq_domain, 0); ++ if (generic_handle_irq(virq) == -EINVAL) ++ dev_err_ratelimited(&pcie->pdev->dev, "unhandled ERR IRQ\n"); ++ } ++ + /* Process MSI interrupts */ + if (isr0_status & PCIE_ISR0_MSI_INT_PENDING) + advk_pcie_handle_msi(pcie); diff --git a/target/linux/generic/pending-5.15/850-0014-PCI-aardvark-Fix-reading-PCI_EXP_RTSTA_PME-bit-on-em.patch b/target/linux/generic/pending-5.15/850-0014-PCI-aardvark-Fix-reading-PCI_EXP_RTSTA_PME-bit-on-em.patch new file mode 100644 index 0000000000..4a2cd77144 --- /dev/null +++ b/target/linux/generic/pending-5.15/850-0014-PCI-aardvark-Fix-reading-PCI_EXP_RTSTA_PME-bit-on-em.patch @@ -0,0 +1,44 @@ +From 5f354992eeef9a51c67796dc9f7f578d3584baa2 Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?Pali=20Roh=C3=A1r?= <pali@kernel.org> +Date: Wed, 8 Dec 2021 05:57:54 +0100 +Subject: [PATCH] PCI: aardvark: Fix reading PCI_EXP_RTSTA_PME bit on emulated + bridge +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +The emulated bridge returns incorrect value for PCI_EXP_RTSTA register +during readout in advk_pci_bridge_emul_pcie_conf_read() function: the +correct bit is BIT(16), but we are setting BIT(23), because the code +does + *value = (isr0 & PCIE_MSG_PM_PME_MASK) << 16 +where + PCIE_MSG_PM_PME_MASK +is + BIT(7). + +The code should probably have been something like + *value = (!!(isr0 & PCIE_MSG_PM_PME_MASK)) << 16, +but we are better of using an if() and using the proper macro for this +bit. + +Fixes: 8a3ebd8de328 ("PCI: aardvark: Implement emulated root PCI bridge config space") +Signed-off-by: Pali Rohár <pali@kernel.org> +Signed-off-by: Marek Behún <kabel@kernel.org> +--- + drivers/pci/controller/pci-aardvark.c | 4 +++- + 1 file changed, 3 insertions(+), 1 deletion(-) + +--- a/drivers/pci/controller/pci-aardvark.c ++++ b/drivers/pci/controller/pci-aardvark.c +@@ -874,7 +874,9 @@ advk_pci_bridge_emul_pcie_conf_read(stru + case PCI_EXP_RTSTA: { + u32 isr0 = advk_readl(pcie, PCIE_ISR0_REG); + u32 msglog = advk_readl(pcie, PCIE_MSG_LOG_REG); +- *value = (isr0 & PCIE_MSG_PM_PME_MASK) << 16 | (msglog >> 16); ++ *value = msglog >> 16; ++ if (isr0 & PCIE_MSG_PM_PME_MASK) ++ *value |= PCI_EXP_RTSTA_PME; + return PCI_BRIDGE_EMUL_HANDLED; + } + diff --git a/target/linux/generic/pending-5.15/850-0015-PCI-aardvark-Optimize-writing-PCI_EXP_RTCTL_PMEIE-an.patch b/target/linux/generic/pending-5.15/850-0015-PCI-aardvark-Optimize-writing-PCI_EXP_RTCTL_PMEIE-an.patch new file mode 100644 index 0000000000..5ed809def2 --- /dev/null +++ b/target/linux/generic/pending-5.15/850-0015-PCI-aardvark-Optimize-writing-PCI_EXP_RTCTL_PMEIE-an.patch @@ -0,0 +1,52 @@ +From 3fe0073d116d9902df08761c1cf0d733dd4c38fc Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?Pali=20Roh=C3=A1r?= <pali@kernel.org> +Date: Wed, 8 Dec 2021 06:03:50 +0100 +Subject: [PATCH] PCI: aardvark: Optimize writing PCI_EXP_RTCTL_PMEIE and + PCI_EXP_RTSTA_PME on emulated bridge +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +To optimize advk_pci_bridge_emul_pcie_conf_write() code, touch +PCIE_ISR0_REG and PCIE_ISR0_MASK_REG registers only when it is really +needed, when processing PCI_EXP_RTCTL_PMEIE and PCI_EXP_RTSTA_PME bits. + +Signed-off-by: Pali Rohár <pali@kernel.org> +Signed-off-by: Marek Behún <kabel@kernel.org> +--- + drivers/pci/controller/pci-aardvark.c | 20 +++++++++++--------- + 1 file changed, 11 insertions(+), 9 deletions(-) + +--- a/drivers/pci/controller/pci-aardvark.c ++++ b/drivers/pci/controller/pci-aardvark.c +@@ -932,19 +932,21 @@ advk_pci_bridge_emul_pcie_conf_write(str + advk_pcie_wait_for_retrain(pcie); + break; + +- case PCI_EXP_RTCTL: { ++ case PCI_EXP_RTCTL: + /* Only mask/unmask PME interrupt */ +- u32 val = advk_readl(pcie, PCIE_ISR0_MASK_REG) & +- ~PCIE_MSG_PM_PME_MASK; +- if ((new & PCI_EXP_RTCTL_PMEIE) == 0) +- val |= PCIE_MSG_PM_PME_MASK; +- advk_writel(pcie, val, PCIE_ISR0_MASK_REG); ++ if (mask & PCI_EXP_RTCTL_PMEIE) { ++ u32 val = advk_readl(pcie, PCIE_ISR0_MASK_REG); ++ if (new & PCI_EXP_RTCTL_PMEIE) ++ val &= ~PCIE_MSG_PM_PME_MASK; ++ else ++ val |= PCIE_MSG_PM_PME_MASK; ++ advk_writel(pcie, val, PCIE_ISR0_MASK_REG); ++ } + break; +- } + + case PCI_EXP_RTSTA: +- new = (new & PCI_EXP_RTSTA_PME) >> 9; +- advk_writel(pcie, new, PCIE_ISR0_REG); ++ if (new & PCI_EXP_RTSTA_PME) ++ advk_writel(pcie, PCIE_MSG_PM_PME_MASK, PCIE_ISR0_REG); + break; + + case PCI_EXP_DEVCTL: diff --git a/target/linux/generic/pending-5.15/850-0016-PCI-aardvark-Add-support-for-PME-interrupts.patch b/target/linux/generic/pending-5.15/850-0016-PCI-aardvark-Add-support-for-PME-interrupts.patch new file mode 100644 index 0000000000..4272b6dd6e --- /dev/null +++ b/target/linux/generic/pending-5.15/850-0016-PCI-aardvark-Add-support-for-PME-interrupts.patch @@ -0,0 +1,47 @@ +From 7acd8ef92e8789e10b5d736d73cea3b625087f26 Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?Pali=20Roh=C3=A1r?= <pali@kernel.org> +Date: Wed, 8 Dec 2021 06:07:44 +0100 +Subject: [PATCH] PCI: aardvark: Add support for PME interrupts +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +Currently enabling PCI_EXP_RTSTA_PME bit in PCI_EXP_RTCTL register does +nothing. This is because PCIe PME driver expects to receive PCIe interrupt +defined in PCI_EXP_FLAGS_IRQ register, but aardvark hardware does not +trigger PCIe INTx/MSI interrupt for PME event, rather it triggers custom +aardvark interrupt which this driver is not processing yet. + +Fix this issue by handling PME interrupt in advk_pcie_handle_int() and +chaining it to PCIe interrupt 0 with generic_handle_domain_irq() (since +aardvark sets PCI_EXP_FLAGS_IRQ to zero). With this change PCIe PME driver +finally starts receiving PME interrupt. + +Signed-off-by: Pali Rohár <pali@kernel.org> +Signed-off-by: Marek Behún <kabel@kernel.org> +--- + drivers/pci/controller/pci-aardvark.c | 13 +++++++++++++ + 1 file changed, 13 insertions(+) + +--- a/drivers/pci/controller/pci-aardvark.c ++++ b/drivers/pci/controller/pci-aardvark.c +@@ -1491,6 +1491,19 @@ static void advk_pcie_handle_int(struct + isr1_mask = advk_readl(pcie, PCIE_ISR1_MASK_REG); + isr1_status = isr1_val & ((~isr1_mask) & PCIE_ISR1_ALL_MASK); + ++ /* Process PME interrupt */ ++ if (isr0_status & PCIE_MSG_PM_PME_MASK) { ++ /* ++ * Do not clear PME interrupt bit in ISR0, it is cleared by IRQ ++ * receiver by writing to the PCI_EXP_RTSTA register of emulated ++ * root bridge. Aardvark HW returns zero for PCI_EXP_FLAGS_IRQ, ++ * so use PCIe interrupt 0. ++ */ ++ virq = irq_find_mapping(pcie->irq_domain, 0); ++ if (generic_handle_irq(virq) == -EINVAL) ++ dev_err_ratelimited(&pcie->pdev->dev, "unhandled PME IRQ\n"); ++ } ++ + /* Process ERR interrupt */ + if (isr0_status & PCIE_ISR0_ERR_MASK) { + advk_writel(pcie, PCIE_ISR0_ERR_MASK, PCIE_ISR0_REG); diff --git a/target/linux/generic/pending-5.15/850-0017-PCI-aardvark-Fix-support-for-PME-requester-on-emulat.patch b/target/linux/generic/pending-5.15/850-0017-PCI-aardvark-Fix-support-for-PME-requester-on-emulat.patch new file mode 100644 index 0000000000..6aa7dcbd76 --- /dev/null +++ b/target/linux/generic/pending-5.15/850-0017-PCI-aardvark-Fix-support-for-PME-requester-on-emulat.patch @@ -0,0 +1,173 @@ +From 68727b545332327b4c2f9c0f8d006be8970e7832 Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?Pali=20Roh=C3=A1r?= <pali@kernel.org> +Date: Fri, 19 Feb 2021 14:22:22 +0100 +Subject: [PATCH] PCI: aardvark: Fix support for PME requester on emulated + bridge +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +Enable aardvark PME interrupt unconditionally by unmasking it and read PME +requester ID to emulated bridge config space immediately after receiving +interrupt. + +PME requester ID is stored in the PCIE_MSG_LOG_REG register, which contains +the last inbound message. So when new inbound message is received by HW +(including non-PM), the content in PCIE_MSG_LOG_REG register is replaced by +a new value. + +PCIe specification mandates that subsequent PMEs are kept pending until the +PME Status Register bit is cleared by software by writing a 1b. + +Support for masking/unmasking PME interrupt on emulated bridge via +PCI_EXP_RTCTL_PMEIE bit is now implemented only in emulated bridge config +space, to ensure that we do not miss any aardvark PME interrupt. + +Reading of PCI_EXP_RTCAP and PCI_EXP_RTSTA registers is simplified as final +value is now always stored into emulated bridge config space by the +interrupt handler, so there is no need to implement support for these +registers in read_pcie callback. + +Clearing of W1C bit PCI_EXP_RTSTA_PME is now also simplified as it is done +by pci-bridge-emul.c code for emulated bridge config space. So there is no +need to implement support for clearing this bit in write_pcie callback. + +Signed-off-by: Pali Rohár <pali@kernel.org> +Signed-off-by: Marek Behún <kabel@kernel.org> +--- + drivers/pci/controller/pci-aardvark.c | 94 +++++++++++++++------------ + 1 file changed, 52 insertions(+), 42 deletions(-) + +--- a/drivers/pci/controller/pci-aardvark.c ++++ b/drivers/pci/controller/pci-aardvark.c +@@ -597,6 +597,11 @@ static void advk_pcie_setup_hw(struct ad + reg &= ~PCIE_ISR0_MSI_INT_PENDING; + advk_writel(pcie, reg, PCIE_ISR0_MASK_REG); + ++ /* Unmask PME interrupt for processing of PME requester */ ++ reg = advk_readl(pcie, PCIE_ISR0_MASK_REG); ++ reg &= ~PCIE_MSG_PM_PME_MASK; ++ advk_writel(pcie, reg, PCIE_ISR0_MASK_REG); ++ + /* Enable summary interrupt for GIC SPI source */ + reg = PCIE_IRQ_ALL_MASK & (~PCIE_IRQ_ENABLE_INTS_MASK); + advk_writel(pcie, reg, HOST_CTRL_INT_MASK_REG); +@@ -863,22 +868,11 @@ advk_pci_bridge_emul_pcie_conf_read(stru + *value = PCI_EXP_SLTSTA_PDS << 16; + return PCI_BRIDGE_EMUL_HANDLED; + +- case PCI_EXP_RTCTL: { +- u32 val = advk_readl(pcie, PCIE_ISR0_MASK_REG); +- *value = (val & PCIE_MSG_PM_PME_MASK) ? 0 : PCI_EXP_RTCTL_PMEIE; +- *value |= le16_to_cpu(bridge->pcie_conf.rootctl) & PCI_EXP_RTCTL_CRSSVE; +- *value |= PCI_EXP_RTCAP_CRSVIS << 16; +- return PCI_BRIDGE_EMUL_HANDLED; +- } +- +- case PCI_EXP_RTSTA: { +- u32 isr0 = advk_readl(pcie, PCIE_ISR0_REG); +- u32 msglog = advk_readl(pcie, PCIE_MSG_LOG_REG); +- *value = msglog >> 16; +- if (isr0 & PCIE_MSG_PM_PME_MASK) +- *value |= PCI_EXP_RTSTA_PME; +- return PCI_BRIDGE_EMUL_HANDLED; +- } ++ /* ++ * PCI_EXP_RTCTL and PCI_EXP_RTSTA are also supported, but do not need ++ * to be handled here, because their values are stored in emulated ++ * config space buffer, and we read them from there when needed. ++ */ + + case PCI_EXP_LNKCAP: { + u32 val = advk_readl(pcie, PCIE_CORE_PCIEXP_CAP + reg); +@@ -932,22 +926,19 @@ advk_pci_bridge_emul_pcie_conf_write(str + advk_pcie_wait_for_retrain(pcie); + break; + +- case PCI_EXP_RTCTL: +- /* Only mask/unmask PME interrupt */ +- if (mask & PCI_EXP_RTCTL_PMEIE) { +- u32 val = advk_readl(pcie, PCIE_ISR0_MASK_REG); +- if (new & PCI_EXP_RTCTL_PMEIE) +- val &= ~PCIE_MSG_PM_PME_MASK; +- else +- val |= PCIE_MSG_PM_PME_MASK; +- advk_writel(pcie, val, PCIE_ISR0_MASK_REG); +- } ++ case PCI_EXP_RTCTL: { ++ u16 rootctl = le16_to_cpu(bridge->pcie_conf.rootctl); ++ /* Only emulation of PMEIE and CRSSVE bits is provided */ ++ rootctl &= PCI_EXP_RTCTL_PMEIE | PCI_EXP_RTCTL_CRSSVE; ++ bridge->pcie_conf.rootctl = cpu_to_le16(rootctl); + break; ++ } + +- case PCI_EXP_RTSTA: +- if (new & PCI_EXP_RTSTA_PME) +- advk_writel(pcie, PCIE_MSG_PM_PME_MASK, PCIE_ISR0_REG); +- break; ++ /* ++ * PCI_EXP_RTSTA is also supported, but does not need to be handled ++ * here, because its value is stored in emulated config space buffer, ++ * and we write it there when needed. ++ */ + + case PCI_EXP_DEVCTL: + case PCI_EXP_DEVCTL2: +@@ -1452,6 +1443,34 @@ static void advk_pcie_remove_irq_domain( + irq_domain_remove(pcie->irq_domain); + } + ++static void advk_pcie_handle_pme(struct advk_pcie *pcie) ++{ ++ u32 requester = advk_readl(pcie, PCIE_MSG_LOG_REG) >> 16; ++ int virq; ++ ++ advk_writel(pcie, PCIE_MSG_PM_PME_MASK, PCIE_ISR0_REG); ++ ++ /* ++ * PCIE_MSG_LOG_REG contains the last inbound message, so store ++ * the requester ID only when PME was not asserted yet. ++ * Also do not trigger PME interrupt when PME is still asserted. ++ */ ++ if (!(le32_to_cpu(pcie->bridge.pcie_conf.rootsta) & PCI_EXP_RTSTA_PME)) { ++ pcie->bridge.pcie_conf.rootsta = cpu_to_le32(requester | PCI_EXP_RTSTA_PME); ++ ++ /* ++ * Trigger PME interrupt only if PMEIE bit in Root Control is set. ++ * Aardvark HW returns zero for PCI_EXP_FLAGS_IRQ, so use PCIe interrupt 0. ++ */ ++ if (!(le16_to_cpu(pcie->bridge.pcie_conf.rootctl) & PCI_EXP_RTCTL_PMEIE)) ++ return; ++ ++ virq = irq_find_mapping(pcie->irq_domain, 0); ++ if (generic_handle_irq(virq) == -EINVAL) ++ dev_err_ratelimited(&pcie->pdev->dev, "unhandled PME IRQ\n"); ++ } ++} ++ + static void advk_pcie_handle_msi(struct advk_pcie *pcie) + { + u32 msi_val, msi_mask, msi_status, msi_idx; +@@ -1491,18 +1510,9 @@ static void advk_pcie_handle_int(struct + isr1_mask = advk_readl(pcie, PCIE_ISR1_MASK_REG); + isr1_status = isr1_val & ((~isr1_mask) & PCIE_ISR1_ALL_MASK); + +- /* Process PME interrupt */ +- if (isr0_status & PCIE_MSG_PM_PME_MASK) { +- /* +- * Do not clear PME interrupt bit in ISR0, it is cleared by IRQ +- * receiver by writing to the PCI_EXP_RTSTA register of emulated +- * root bridge. Aardvark HW returns zero for PCI_EXP_FLAGS_IRQ, +- * so use PCIe interrupt 0. +- */ +- virq = irq_find_mapping(pcie->irq_domain, 0); +- if (generic_handle_irq(virq) == -EINVAL) +- dev_err_ratelimited(&pcie->pdev->dev, "unhandled PME IRQ\n"); +- } ++ /* Process PME interrupt as the first one to do not miss PME requester id */ ++ if (isr0_status & PCIE_MSG_PM_PME_MASK) ++ advk_pcie_handle_pme(pcie); + + /* Process ERR interrupt */ + if (isr0_status & PCIE_ISR0_ERR_MASK) { diff --git a/target/linux/generic/pending-5.15/850-0018-PCI-aardvark-Use-separate-INTA-interrupt-for-emulate.patch b/target/linux/generic/pending-5.15/850-0018-PCI-aardvark-Use-separate-INTA-interrupt-for-emulate.patch new file mode 100644 index 0000000000..e9f65aca23 --- /dev/null +++ b/target/linux/generic/pending-5.15/850-0018-PCI-aardvark-Use-separate-INTA-interrupt-for-emulate.patch @@ -0,0 +1,161 @@ +From db305233136f5aa2444a8287a279384e8458c458 Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?Pali=20Roh=C3=A1r?= <pali@kernel.org> +Date: Thu, 1 Apr 2021 20:12:48 +0200 +Subject: [PATCH] PCI: aardvark: Use separate INTA interrupt for emulated root + bridge +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +Emulated root bridge currently provides only one Legacy INTA interrupt +which is used for reporting PCIe PME and ERR events and handled by kernel +PCIe PME and AER drivers. + +Aardvark HW reports these PME and ERR events separately, so there is no +need to mix real INTA interrupt and emulated INTA interrupt for PCIe PME +and AER drivers. + +Register a new advk-RP (as in Root Port) irq chip and a new irq domain +for emulated root bridge and use this new separate irq domain for +providing INTA interrupt from emulated root bridge for PME and ERR events. + +The real INTA interrupt from real devices is now separate. + +A custom map_irq callback function on PCI host bridge structure is used to +allocate IRQ mapping for emulated root bridge from new irq domain. Original +callback of_irq_parse_and_map_pci() is used for all other devices as before. + +Signed-off-by: Pali Rohár <pali@kernel.org> +Signed-off-by: Marek Behún <kabel@kernel.org> +--- + drivers/pci/controller/pci-aardvark.c | 69 ++++++++++++++++++++++++++- + 1 file changed, 67 insertions(+), 2 deletions(-) + +--- a/drivers/pci/controller/pci-aardvark.c ++++ b/drivers/pci/controller/pci-aardvark.c +@@ -280,6 +280,7 @@ struct advk_pcie { + } wins[OB_WIN_COUNT]; + u8 wins_count; + int irq; ++ struct irq_domain *rp_irq_domain; + struct irq_domain *irq_domain; + struct irq_chip irq_chip; + raw_spinlock_t irq_lock; +@@ -1443,6 +1444,44 @@ static void advk_pcie_remove_irq_domain( + irq_domain_remove(pcie->irq_domain); + } + ++static struct irq_chip advk_rp_irq_chip = { ++ .name = "advk-RP", ++}; ++ ++static int advk_pcie_rp_irq_map(struct irq_domain *h, ++ unsigned int virq, irq_hw_number_t hwirq) ++{ ++ struct advk_pcie *pcie = h->host_data; ++ ++ irq_set_chip_and_handler(virq, &advk_rp_irq_chip, handle_simple_irq); ++ irq_set_chip_data(virq, pcie); ++ ++ return 0; ++} ++ ++static const struct irq_domain_ops advk_pcie_rp_irq_domain_ops = { ++ .map = advk_pcie_rp_irq_map, ++ .xlate = irq_domain_xlate_onecell, ++}; ++ ++static int advk_pcie_init_rp_irq_domain(struct advk_pcie *pcie) ++{ ++ pcie->rp_irq_domain = irq_domain_add_linear(NULL, 1, ++ &advk_pcie_rp_irq_domain_ops, ++ pcie); ++ if (!pcie->rp_irq_domain) { ++ dev_err(&pcie->pdev->dev, "Failed to add Root Port IRQ domain\n"); ++ return -ENOMEM; ++ } ++ ++ return 0; ++} ++ ++static void advk_pcie_remove_rp_irq_domain(struct advk_pcie *pcie) ++{ ++ irq_domain_remove(pcie->rp_irq_domain); ++} ++ + static void advk_pcie_handle_pme(struct advk_pcie *pcie) + { + u32 requester = advk_readl(pcie, PCIE_MSG_LOG_REG) >> 16; +@@ -1465,7 +1504,7 @@ static void advk_pcie_handle_pme(struct + if (!(le16_to_cpu(pcie->bridge.pcie_conf.rootctl) & PCI_EXP_RTCTL_PMEIE)) + return; + +- virq = irq_find_mapping(pcie->irq_domain, 0); ++ virq = irq_find_mapping(pcie->rp_irq_domain, 0); + if (generic_handle_irq(virq) == -EINVAL) + dev_err_ratelimited(&pcie->pdev->dev, "unhandled PME IRQ\n"); + } +@@ -1522,7 +1561,7 @@ static void advk_pcie_handle_int(struct + * Aardvark HW returns zero for PCI_ERR_ROOT_AER_IRQ, so use + * PCIe interrupt 0 + */ +- virq = irq_find_mapping(pcie->irq_domain, 0); ++ virq = irq_find_mapping(pcie->rp_irq_domain, 0); + if (generic_handle_irq(virq) == -EINVAL) + dev_err_ratelimited(&pcie->pdev->dev, "unhandled ERR IRQ\n"); + } +@@ -1568,6 +1607,21 @@ static void advk_pcie_irq_handler(struct + chained_irq_exit(chip, desc); + } + ++static int advk_pcie_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) ++{ ++ struct advk_pcie *pcie = dev->bus->sysdata; ++ ++ /* ++ * Emulated root bridge has its own emulated irq chip and irq domain. ++ * Argument pin is the INTx pin (1=INTA, 2=INTB, 3=INTC, 4=INTD) and ++ * hwirq for irq_create_mapping() is indexed from zero. ++ */ ++ if (pci_is_root_bus(dev->bus)) ++ return irq_create_mapping(pcie->rp_irq_domain, pin - 1); ++ else ++ return of_irq_parse_and_map_pci(dev, slot, pin); ++} ++ + static void __maybe_unused advk_pcie_disable_phy(struct advk_pcie *pcie) + { + phy_power_off(pcie->phy); +@@ -1771,14 +1825,24 @@ static int advk_pcie_probe(struct platfo + return ret; + } + ++ ret = advk_pcie_init_rp_irq_domain(pcie); ++ if (ret) { ++ dev_err(dev, "Failed to initialize irq\n"); ++ advk_pcie_remove_msi_irq_domain(pcie); ++ advk_pcie_remove_irq_domain(pcie); ++ return ret; ++ } ++ + irq_set_chained_handler_and_data(pcie->irq, advk_pcie_irq_handler, pcie); + + bridge->sysdata = pcie; + bridge->ops = &advk_pcie_ops; ++ bridge->map_irq = advk_pcie_map_irq; + + ret = pci_host_probe(bridge); + if (ret < 0) { + irq_set_chained_handler_and_data(pcie->irq, NULL, NULL); ++ advk_pcie_remove_rp_irq_domain(pcie); + advk_pcie_remove_msi_irq_domain(pcie); + advk_pcie_remove_irq_domain(pcie); + return ret; +@@ -1830,6 +1894,7 @@ static int advk_pcie_remove(struct platf + irq_set_chained_handler_and_data(pcie->irq, NULL, NULL); + + /* Remove IRQ domains */ ++ advk_pcie_remove_rp_irq_domain(pcie); + advk_pcie_remove_msi_irq_domain(pcie); + advk_pcie_remove_irq_domain(pcie); + diff --git a/target/linux/generic/pending-5.15/850-0019-PCI-aardvark-Remove-irq_mask_ack-callback-for-INTx-i.patch b/target/linux/generic/pending-5.15/850-0019-PCI-aardvark-Remove-irq_mask_ack-callback-for-INTx-i.patch new file mode 100644 index 0000000000..75f31ba19e --- /dev/null +++ b/target/linux/generic/pending-5.15/850-0019-PCI-aardvark-Remove-irq_mask_ack-callback-for-INTx-i.patch @@ -0,0 +1,29 @@ +From 8c9eef96e24f34ff8b62b230700416b822691a37 Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?Pali=20Roh=C3=A1r?= <pali@kernel.org> +Date: Thu, 1 Apr 2021 14:24:12 +0200 +Subject: [PATCH] PCI: aardvark: Remove irq_mask_ack callback for INTx + interrupts +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +Callback for irq_mask_ack is the same as for irq_mask. As there is no +special handling for irq_ack, there is no need to define irq_mask_ack too. + +Signed-off-by: Pali Rohár <pali@kernel.org> +Acked-by: Marc Zyngier <maz@kernel.org> +Signed-off-by: Marek Behún <kabel@kernel.org> +--- + drivers/pci/controller/pci-aardvark.c | 1 - + 1 file changed, 1 deletion(-) + +--- a/drivers/pci/controller/pci-aardvark.c ++++ b/drivers/pci/controller/pci-aardvark.c +@@ -1422,7 +1422,6 @@ static int advk_pcie_init_irq_domain(str + } + + irq_chip->irq_mask = advk_pcie_irq_mask; +- irq_chip->irq_mask_ack = advk_pcie_irq_mask; + irq_chip->irq_unmask = advk_pcie_irq_unmask; + + pcie->irq_domain = diff --git a/target/linux/generic/pending-5.15/850-0020-PCI-aardvark-Don-t-mask-irq-when-mapping.patch b/target/linux/generic/pending-5.15/850-0020-PCI-aardvark-Don-t-mask-irq-when-mapping.patch new file mode 100644 index 0000000000..5583dc1b6f --- /dev/null +++ b/target/linux/generic/pending-5.15/850-0020-PCI-aardvark-Don-t-mask-irq-when-mapping.patch @@ -0,0 +1,27 @@ +From dc01fca5a9d9c09ce9a3fb2bc2e7715c37ff3bd9 Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?Pali=20Roh=C3=A1r?= <pali@kernel.org> +Date: Thu, 1 Apr 2021 14:30:06 +0200 +Subject: [PATCH] PCI: aardvark: Don't mask irq when mapping +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +By default, all Legacy INTx interrupts are masked, so there is no need to +mask this interrupt during irq_map callback. + +Signed-off-by: Pali Rohár <pali@kernel.org> +Signed-off-by: Marek Behún <kabel@kernel.org> +--- + drivers/pci/controller/pci-aardvark.c | 1 - + 1 file changed, 1 deletion(-) + +--- a/drivers/pci/controller/pci-aardvark.c ++++ b/drivers/pci/controller/pci-aardvark.c +@@ -1339,7 +1339,6 @@ static int advk_pcie_irq_map(struct irq_ + { + struct advk_pcie *pcie = h->host_data; + +- advk_pcie_irq_mask(irq_get_irq_data(virq)); + irq_set_status_flags(virq, IRQ_LEVEL); + irq_set_chip_and_handler(virq, &pcie->irq_chip, + handle_level_irq); diff --git a/target/linux/generic/pending-5.15/850-0021-PCI-aardvark-Drop-__maybe_unused-from-advk_pcie_disa.patch b/target/linux/generic/pending-5.15/850-0021-PCI-aardvark-Drop-__maybe_unused-from-advk_pcie_disa.patch new file mode 100644 index 0000000000..f359663ca4 --- /dev/null +++ b/target/linux/generic/pending-5.15/850-0021-PCI-aardvark-Drop-__maybe_unused-from-advk_pcie_disa.patch @@ -0,0 +1,28 @@ +From a511c99262ce19ee06908d27212b39ec4c5aeb17 Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?Marek=20Beh=C3=BAn?= <kabel@kernel.org> +Date: Wed, 8 Dec 2021 04:40:29 +0100 +Subject: [PATCH] PCI: aardvark: Drop __maybe_unused from + advk_pcie_disable_phy() +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +This function is now always used in driver remove method, drop the +__maybe_unused attribute. + +Signed-off-by: Marek Behún <kabel@kernel.org> +--- + drivers/pci/controller/pci-aardvark.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +--- a/drivers/pci/controller/pci-aardvark.c ++++ b/drivers/pci/controller/pci-aardvark.c +@@ -1620,7 +1620,7 @@ static int advk_pcie_map_irq(const struc + return of_irq_parse_and_map_pci(dev, slot, pin); + } + +-static void __maybe_unused advk_pcie_disable_phy(struct advk_pcie *pcie) ++static void advk_pcie_disable_phy(struct advk_pcie *pcie) + { + phy_power_off(pcie->phy); + phy_exit(pcie->phy); diff --git a/target/linux/generic/pending-5.15/850-0022-PCI-aardvark-Update-comment-about-link-going-down-af.patch b/target/linux/generic/pending-5.15/850-0022-PCI-aardvark-Update-comment-about-link-going-down-af.patch new file mode 100644 index 0000000000..cc489ebc8a --- /dev/null +++ b/target/linux/generic/pending-5.15/850-0022-PCI-aardvark-Update-comment-about-link-going-down-af.patch @@ -0,0 +1,35 @@ +From bafda858364003a70b9cda84282f9761587f8033 Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?Marek=20Beh=C3=BAn?= <kabel@kernel.org> +Date: Mon, 10 Jan 2022 00:47:38 +0100 +Subject: [PATCH] PCI: aardvark: Update comment about link going down after + link-up +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +Update the comment about what happens when link goes down after we have +checked for link-up. If a PIO request is done while link-down, we have +a serious problem. + +Signed-off-by: Marek Behún <kabel@kernel.org> +--- + drivers/pci/controller/pci-aardvark.c | 8 ++++++-- + 1 file changed, 6 insertions(+), 2 deletions(-) + +--- a/drivers/pci/controller/pci-aardvark.c ++++ b/drivers/pci/controller/pci-aardvark.c +@@ -1005,8 +1005,12 @@ static bool advk_pcie_valid_device(struc + return false; + + /* +- * If the link goes down after we check for link-up, nothing bad +- * happens but the config access times out. ++ * If the link goes down after we check for link-up, we have a problem: ++ * if a PIO request is executed while link-down, the whole controller ++ * gets stuck in a non-functional state, and even after link comes up ++ * again, PIO requests won't work anymore, and a reset of the whole PCIe ++ * controller is needed. Therefore we need to prevent sending PIO ++ * requests while the link is down. + */ + if (!pci_is_root_bus(bus) && !advk_pcie_link_up(pcie)) + return false; diff --git a/target/linux/generic/pending-5.15/850-0023-PCI-aardvark-Make-main-irq_chip-structure-a-static-d.patch b/target/linux/generic/pending-5.15/850-0023-PCI-aardvark-Make-main-irq_chip-structure-a-static-d.patch new file mode 100644 index 0000000000..a5e2d8a3dd --- /dev/null +++ b/target/linux/generic/pending-5.15/850-0023-PCI-aardvark-Make-main-irq_chip-structure-a-static-d.patch @@ -0,0 +1,102 @@ +From 663b9f99bb35dbc0c7b685f71ee3668a60d31320 Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?Marek=20Beh=C3=BAn?= <kabel@kernel.org> +Date: Mon, 10 Jan 2022 02:02:00 +0100 +Subject: [PATCH] PCI: aardvark: Make main irq_chip structure a static driver + structure +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +Marc Zyngier says [1] that we should use struct irq_chip as a global +static struct in the driver. Even though the structure currently +contains a dynamic member (parent_device), Marc says [2] that he plans +to kill it and make the structure completely static. + +We have already converted others irq_chip structures in this driver in +this way, but we omitted this one because the .name member is +dynamically created from device's name, and the name is displayed in +sysfs, so changing it would break sysfs ABI. + +The rationale for changing the name (to "advk-INT") in spite of sysfs +ABI, and thus allowing to convert to a static structure, is that after +the other changes we made in this series, the IRQ chip is basically +something different: it no logner generates ERR and PME interrupts (they +are generated by emulated bridge's rp_irq_chip). + +[1] https://lore.kernel.org/linux-pci/877dbcvngf.wl-maz@kernel.org/ +[2] https://lore.kernel.org/linux-pci/874k6gvkhz.wl-maz@kernel.org/ + +Signed-off-by: Marek Behún <kabel@kernel.org> +--- + drivers/pci/controller/pci-aardvark.c | 25 +++++++------------------ + 1 file changed, 7 insertions(+), 18 deletions(-) + +--- a/drivers/pci/controller/pci-aardvark.c ++++ b/drivers/pci/controller/pci-aardvark.c +@@ -282,7 +282,6 @@ struct advk_pcie { + int irq; + struct irq_domain *rp_irq_domain; + struct irq_domain *irq_domain; +- struct irq_chip irq_chip; + raw_spinlock_t irq_lock; + struct irq_domain *msi_domain; + struct irq_domain *msi_inner_domain; +@@ -1338,14 +1337,19 @@ static void advk_pcie_irq_unmask(struct + raw_spin_unlock_irqrestore(&pcie->irq_lock, flags); + } + ++static struct irq_chip advk_irq_chip = { ++ .name = "advk-INT", ++ .irq_mask = advk_pcie_irq_mask, ++ .irq_unmask = advk_pcie_irq_unmask, ++}; ++ + static int advk_pcie_irq_map(struct irq_domain *h, + unsigned int virq, irq_hw_number_t hwirq) + { + struct advk_pcie *pcie = h->host_data; + + irq_set_status_flags(virq, IRQ_LEVEL); +- irq_set_chip_and_handler(virq, &pcie->irq_chip, +- handle_level_irq); ++ irq_set_chip_and_handler(virq, &advk_irq_chip, handle_level_irq); + irq_set_chip_data(virq, pcie); + + return 0; +@@ -1404,7 +1408,6 @@ static int advk_pcie_init_irq_domain(str + struct device *dev = &pcie->pdev->dev; + struct device_node *node = dev->of_node; + struct device_node *pcie_intc_node; +- struct irq_chip *irq_chip; + int ret = 0; + + raw_spin_lock_init(&pcie->irq_lock); +@@ -1415,28 +1418,14 @@ static int advk_pcie_init_irq_domain(str + return -ENODEV; + } + +- irq_chip = &pcie->irq_chip; +- +- irq_chip->name = devm_kasprintf(dev, GFP_KERNEL, "%s-irq", +- dev_name(dev)); +- if (!irq_chip->name) { +- ret = -ENOMEM; +- goto out_put_node; +- } +- +- irq_chip->irq_mask = advk_pcie_irq_mask; +- irq_chip->irq_unmask = advk_pcie_irq_unmask; +- + pcie->irq_domain = + irq_domain_add_linear(pcie_intc_node, PCI_NUM_INTX, + &advk_pcie_irq_domain_ops, pcie); + if (!pcie->irq_domain) { + dev_err(dev, "Failed to get a INTx IRQ domain\n"); + ret = -ENOMEM; +- goto out_put_node; + } + +-out_put_node: + of_node_put(pcie_intc_node); + return ret; + } diff --git a/target/linux/generic/pending-5.15/851-0001-phy-marvell-phy-mvebu-a3700-comphy-Remove-port-from-.patch b/target/linux/generic/pending-5.15/851-0001-phy-marvell-phy-mvebu-a3700-comphy-Remove-port-from-.patch new file mode 100644 index 0000000000..4a963be952 --- /dev/null +++ b/target/linux/generic/pending-5.15/851-0001-phy-marvell-phy-mvebu-a3700-comphy-Remove-port-from-.patch @@ -0,0 +1,217 @@ +From a719f7ba7fcba05d85801c6f0267f389a21627c1 Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?Pali=20Roh=C3=A1r?= <pali@kernel.org> +Date: Fri, 24 Sep 2021 13:03:02 +0200 +Subject: [PATCH] phy: marvell: phy-mvebu-a3700-comphy: Remove port from driver + configuration +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +Port number is encoded into argument for SMC call. It is zero for SATA, +PCIe and also both USB 3.0 PHYs. It is non-zero only for Ethernet PHY +(incorrectly called SGMII) on lane 0. Ethernet PHY on lane 1 also uses zero +port number. + +So construct "port" bits for SMC call argument can be constructed directly +from PHY type and lane number. + +Change driver code to always pass zero port number for non-ethernet PHYs +and for ethernet PHYs determinate port number from lane number. This +simplifies the driver. + +As port number from DT PHY configuration is not used anymore, remove whole +driver code which parses it. This also simplifies the driver. + +Signed-off-by: Pali Rohár <pali@kernel.org> +Signed-off-by: Marek Behún <kabel@kernel.org> +Reviewed-by: Miquel Raynal <miquel.raynal@bootlin.com> +--- + drivers/phy/marvell/phy-mvebu-a3700-comphy.c | 62 +++++++++----------- + 1 file changed, 29 insertions(+), 33 deletions(-) + +--- a/drivers/phy/marvell/phy-mvebu-a3700-comphy.c ++++ b/drivers/phy/marvell/phy-mvebu-a3700-comphy.c +@@ -20,7 +20,6 @@ + #include <linux/platform_device.h> + + #define MVEBU_A3700_COMPHY_LANES 3 +-#define MVEBU_A3700_COMPHY_PORTS 2 + + /* COMPHY Fast SMC function identifiers */ + #define COMPHY_SIP_POWER_ON 0x82000001 +@@ -45,51 +44,47 @@ + #define COMPHY_FW_NET(mode, idx, speed) (COMPHY_FW_MODE(mode) | \ + ((idx) << 8) | \ + ((speed) << 2)) +-#define COMPHY_FW_PCIE(mode, idx, speed, width) (COMPHY_FW_NET(mode, idx, speed) | \ ++#define COMPHY_FW_PCIE(mode, speed, width) (COMPHY_FW_NET(mode, 0, speed) | \ + ((width) << 18)) + + struct mvebu_a3700_comphy_conf { + unsigned int lane; + enum phy_mode mode; + int submode; +- unsigned int port; + u32 fw_mode; + }; + +-#define MVEBU_A3700_COMPHY_CONF(_lane, _mode, _smode, _port, _fw) \ ++#define MVEBU_A3700_COMPHY_CONF(_lane, _mode, _smode, _fw) \ + { \ + .lane = _lane, \ + .mode = _mode, \ + .submode = _smode, \ +- .port = _port, \ + .fw_mode = _fw, \ + } + +-#define MVEBU_A3700_COMPHY_CONF_GEN(_lane, _mode, _port, _fw) \ +- MVEBU_A3700_COMPHY_CONF(_lane, _mode, PHY_INTERFACE_MODE_NA, _port, _fw) ++#define MVEBU_A3700_COMPHY_CONF_GEN(_lane, _mode, _fw) \ ++ MVEBU_A3700_COMPHY_CONF(_lane, _mode, PHY_INTERFACE_MODE_NA, _fw) + +-#define MVEBU_A3700_COMPHY_CONF_ETH(_lane, _smode, _port, _fw) \ +- MVEBU_A3700_COMPHY_CONF(_lane, PHY_MODE_ETHERNET, _smode, _port, _fw) ++#define MVEBU_A3700_COMPHY_CONF_ETH(_lane, _smode, _fw) \ ++ MVEBU_A3700_COMPHY_CONF(_lane, PHY_MODE_ETHERNET, _smode, _fw) + + static const struct mvebu_a3700_comphy_conf mvebu_a3700_comphy_modes[] = { + /* lane 0 */ +- MVEBU_A3700_COMPHY_CONF_GEN(0, PHY_MODE_USB_HOST_SS, 0, ++ MVEBU_A3700_COMPHY_CONF_GEN(0, PHY_MODE_USB_HOST_SS, + COMPHY_FW_MODE_USB3H), +- MVEBU_A3700_COMPHY_CONF_ETH(0, PHY_INTERFACE_MODE_SGMII, 1, ++ MVEBU_A3700_COMPHY_CONF_ETH(0, PHY_INTERFACE_MODE_SGMII, + COMPHY_FW_MODE_SGMII), +- MVEBU_A3700_COMPHY_CONF_ETH(0, PHY_INTERFACE_MODE_2500BASEX, 1, ++ MVEBU_A3700_COMPHY_CONF_ETH(0, PHY_INTERFACE_MODE_2500BASEX, + COMPHY_FW_MODE_2500BASEX), + /* lane 1 */ +- MVEBU_A3700_COMPHY_CONF_GEN(1, PHY_MODE_PCIE, 0, +- COMPHY_FW_MODE_PCIE), +- MVEBU_A3700_COMPHY_CONF_ETH(1, PHY_INTERFACE_MODE_SGMII, 0, ++ MVEBU_A3700_COMPHY_CONF_GEN(1, PHY_MODE_PCIE, COMPHY_FW_MODE_PCIE), ++ MVEBU_A3700_COMPHY_CONF_ETH(1, PHY_INTERFACE_MODE_SGMII, + COMPHY_FW_MODE_SGMII), +- MVEBU_A3700_COMPHY_CONF_ETH(1, PHY_INTERFACE_MODE_2500BASEX, 0, ++ MVEBU_A3700_COMPHY_CONF_ETH(1, PHY_INTERFACE_MODE_2500BASEX, + COMPHY_FW_MODE_2500BASEX), + /* lane 2 */ +- MVEBU_A3700_COMPHY_CONF_GEN(2, PHY_MODE_SATA, 0, +- COMPHY_FW_MODE_SATA), +- MVEBU_A3700_COMPHY_CONF_GEN(2, PHY_MODE_USB_HOST_SS, 0, ++ MVEBU_A3700_COMPHY_CONF_GEN(2, PHY_MODE_SATA, COMPHY_FW_MODE_SATA), ++ MVEBU_A3700_COMPHY_CONF_GEN(2, PHY_MODE_USB_HOST_SS, + COMPHY_FW_MODE_USB3H), + }; + +@@ -98,7 +93,6 @@ struct mvebu_a3700_comphy_lane { + unsigned int id; + enum phy_mode mode; + int submode; +- int port; + }; + + static int mvebu_a3700_comphy_smc(unsigned long function, unsigned long lane, +@@ -120,7 +114,7 @@ static int mvebu_a3700_comphy_smc(unsign + } + } + +-static int mvebu_a3700_comphy_get_fw_mode(int lane, int port, ++static int mvebu_a3700_comphy_get_fw_mode(int lane, + enum phy_mode mode, + int submode) + { +@@ -132,7 +126,6 @@ static int mvebu_a3700_comphy_get_fw_mod + + for (i = 0; i < n; i++) { + if (mvebu_a3700_comphy_modes[i].lane == lane && +- mvebu_a3700_comphy_modes[i].port == port && + mvebu_a3700_comphy_modes[i].mode == mode && + mvebu_a3700_comphy_modes[i].submode == submode) + break; +@@ -153,7 +146,7 @@ static int mvebu_a3700_comphy_set_mode(s + if (submode == PHY_INTERFACE_MODE_1000BASEX) + submode = PHY_INTERFACE_MODE_SGMII; + +- fw_mode = mvebu_a3700_comphy_get_fw_mode(lane->id, lane->port, mode, ++ fw_mode = mvebu_a3700_comphy_get_fw_mode(lane->id, mode, + submode); + if (fw_mode < 0) { + dev_err(lane->dev, "invalid COMPHY mode\n"); +@@ -172,9 +165,10 @@ static int mvebu_a3700_comphy_power_on(s + struct mvebu_a3700_comphy_lane *lane = phy_get_drvdata(phy); + u32 fw_param; + int fw_mode; ++ int fw_port; + int ret; + +- fw_mode = mvebu_a3700_comphy_get_fw_mode(lane->id, lane->port, ++ fw_mode = mvebu_a3700_comphy_get_fw_mode(lane->id, + lane->mode, lane->submode); + if (fw_mode < 0) { + dev_err(lane->dev, "invalid COMPHY mode\n"); +@@ -191,17 +185,18 @@ static int mvebu_a3700_comphy_power_on(s + fw_param = COMPHY_FW_MODE(fw_mode); + break; + case PHY_MODE_ETHERNET: ++ fw_port = (lane->id == 0) ? 1 : 0; + switch (lane->submode) { + case PHY_INTERFACE_MODE_SGMII: + dev_dbg(lane->dev, "set lane %d to SGMII mode\n", + lane->id); +- fw_param = COMPHY_FW_NET(fw_mode, lane->port, ++ fw_param = COMPHY_FW_NET(fw_mode, fw_port, + COMPHY_FW_SPEED_1_25G); + break; + case PHY_INTERFACE_MODE_2500BASEX: + dev_dbg(lane->dev, "set lane %d to 2500BASEX mode\n", + lane->id); +- fw_param = COMPHY_FW_NET(fw_mode, lane->port, ++ fw_param = COMPHY_FW_NET(fw_mode, fw_port, + COMPHY_FW_SPEED_3_125G); + break; + default: +@@ -212,8 +207,7 @@ static int mvebu_a3700_comphy_power_on(s + break; + case PHY_MODE_PCIE: + dev_dbg(lane->dev, "set lane %d to PCIe mode\n", lane->id); +- fw_param = COMPHY_FW_PCIE(fw_mode, lane->port, +- COMPHY_FW_SPEED_5G, ++ fw_param = COMPHY_FW_PCIE(fw_mode, COMPHY_FW_SPEED_5G, + phy->attrs.bus_width); + break; + default: +@@ -247,17 +241,20 @@ static struct phy *mvebu_a3700_comphy_xl + struct of_phandle_args *args) + { + struct mvebu_a3700_comphy_lane *lane; ++ unsigned int port; + struct phy *phy; + +- if (WARN_ON(args->args[0] >= MVEBU_A3700_COMPHY_PORTS)) +- return ERR_PTR(-EINVAL); +- + phy = of_phy_simple_xlate(dev, args); + if (IS_ERR(phy)) + return phy; + + lane = phy_get_drvdata(phy); +- lane->port = args->args[0]; ++ ++ port = args->args[0]; ++ if (port != 0 && (port != 1 || lane->id != 0)) { ++ dev_err(lane->dev, "invalid port number %u\n", port); ++ return ERR_PTR(-EINVAL); ++ } + + return phy; + } +@@ -302,7 +299,6 @@ static int mvebu_a3700_comphy_probe(stru + lane->mode = PHY_MODE_INVALID; + lane->submode = PHY_INTERFACE_MODE_NA; + lane->id = lane_id; +- lane->port = -1; + phy_set_drvdata(phy, lane); + } + diff --git a/target/linux/generic/pending-5.15/851-0002-phy-marvell-phy-mvebu-a3700-comphy-Add-native-kernel.patch b/target/linux/generic/pending-5.15/851-0002-phy-marvell-phy-mvebu-a3700-comphy-Add-native-kernel.patch new file mode 100644 index 0000000000..73ead1e16c --- /dev/null +++ b/target/linux/generic/pending-5.15/851-0002-phy-marvell-phy-mvebu-a3700-comphy-Add-native-kernel.patch @@ -0,0 +1,1564 @@ +From 9d276da259cce20b2ed7a868b6e6a6a205f7bb04 Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?Pali=20Roh=C3=A1r?= <pali@kernel.org> +Date: Thu, 23 Sep 2021 19:20:13 +0200 +Subject: [PATCH] phy: marvell: phy-mvebu-a3700-comphy: Add native kernel + implementation +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +Remove old RPC implementation and add a new native kernel implementation. + +The old implementation uses ARM SMC API to issue RPC calls to ARM Trusted +Firmware which provides real implementation of PHY configuration. + +But older versions of ARM Trusted Firmware do not provide this PHY +configuration functionality, simply returning: operation not supported; or +worse, some versions provide the configuration functionality incorrectly. + +For example the firmware shipped in ESPRESSObin board has this older +version of ARM Trusted Firmware and therefore SATA, USB 3.0 and PCIe +functionality do not work with newer versions of Linux kernel. + +Due to the above reasons, the following commits were introduced into Linux, +to workaround these issues by ignoring -EOPNOTSUPP error code from +phy-mvebu-a3700-comphy driver function phy_power_on(): + +commit 45aefe3d2251 ("ata: ahci: mvebu: Make SATA PHY optional for Armada +3720") +commit 3241929b67d2 ("usb: host: xhci: mvebu: make USB 3.0 PHY optional for +Armada 3720") +commit b0c6ae0f8948 ("PCI: aardvark: Fix initialization with old Marvell's +Arm Trusted Firmware") + +Replace this RPC implementation with proper native kernel implementation, +which is independent on the firmware. Never return -EOPNOTSUPP for proper +arguments. + +This should solve multiple issues with real-world boards, where it is not +possible or really inconvenient to change the firmware. Let's eliminate +these issues. + +This implementation is ported directly from Armada 3720 comphy driver found +in newest version of ARM Trusted Firmware source code, but with various +fixes of register names, some added comments, some refactoring due to the +original code not conforming to kernel standards. Also PCIe mode poweroff +support was added here, and PHY reset support. These changes are also going +to be sent to ARM Trusted Firmware. + +Signed-off-by: Pali Rohár <pali@kernel.org> +Acked-by: Miquel Raynal <miquel.raynal@bootlin.com> +[ Pali did the porting from ATF. + I (Marek) then fixed some register names, some various other things, + added some comments and refactored the code to kernel standards. Also + fixed PHY poweroff and added PHY reset. ] +Signed-off-by: Marek Behún <kabel@kernel.org> +--- + drivers/phy/marvell/phy-mvebu-a3700-comphy.c | 1351 ++++++++++++++++-- + 1 file changed, 1234 insertions(+), 117 deletions(-) + +--- a/drivers/phy/marvell/phy-mvebu-a3700-comphy.c ++++ b/drivers/phy/marvell/phy-mvebu-a3700-comphy.c +@@ -5,12 +5,16 @@ + * Authors: + * Evan Wang <xswang@marvell.com> + * Miquèl Raynal <miquel.raynal@bootlin.com> ++ * Pali Rohár <pali@kernel.org> ++ * Marek Behún <kabel@kernel.org> + * + * Structure inspired from phy-mvebu-cp110-comphy.c written by Antoine Tenart. +- * SMC call initial support done by Grzegorz Jaszczyk. ++ * Comphy code from ARM Trusted Firmware ported by Pali Rohár <pali@kernel.org> ++ * and Marek Behún <kabel@kernel.org>. + */ + +-#include <linux/arm-smccc.h> ++#include <linux/bitfield.h> ++#include <linux/clk.h> + #include <linux/io.h> + #include <linux/iopoll.h> + #include <linux/mfd/syscon.h> +@@ -18,103 +22,1147 @@ + #include <linux/phy.h> + #include <linux/phy/phy.h> + #include <linux/platform_device.h> ++#include <linux/spinlock.h> + +-#define MVEBU_A3700_COMPHY_LANES 3 ++#define PLL_SET_DELAY_US 600 ++#define COMPHY_PLL_SLEEP 1000 ++#define COMPHY_PLL_TIMEOUT 150000 ++ ++/* Comphy lane2 indirect access register offset */ ++#define COMPHY_LANE2_INDIR_ADDR 0x0 ++#define COMPHY_LANE2_INDIR_DATA 0x4 ++ ++/* SATA and USB3 PHY offset compared to SATA PHY */ ++#define COMPHY_LANE2_REGS_BASE 0x200 ++ ++/* ++ * When accessing common PHY lane registers directly, we need to shift by 1, ++ * since the registers are 16-bit. ++ */ ++#define COMPHY_LANE_REG_DIRECT(reg) (((reg) & 0x7FF) << 1) ++ ++/* COMPHY registers */ ++#define COMPHY_POWER_PLL_CTRL 0x01 ++#define PU_IVREF_BIT BIT(15) ++#define PU_PLL_BIT BIT(14) ++#define PU_RX_BIT BIT(13) ++#define PU_TX_BIT BIT(12) ++#define PU_TX_INTP_BIT BIT(11) ++#define PU_DFE_BIT BIT(10) ++#define RESET_DTL_RX_BIT BIT(9) ++#define PLL_LOCK_BIT BIT(8) ++#define REF_FREF_SEL_MASK GENMASK(4, 0) ++#define REF_FREF_SEL_SERDES_25MHZ FIELD_PREP(REF_FREF_SEL_MASK, 0x1) ++#define REF_FREF_SEL_SERDES_40MHZ FIELD_PREP(REF_FREF_SEL_MASK, 0x3) ++#define REF_FREF_SEL_SERDES_50MHZ FIELD_PREP(REF_FREF_SEL_MASK, 0x4) ++#define REF_FREF_SEL_PCIE_USB3_25MHZ FIELD_PREP(REF_FREF_SEL_MASK, 0x2) ++#define REF_FREF_SEL_PCIE_USB3_40MHZ FIELD_PREP(REF_FREF_SEL_MASK, 0x3) ++#define COMPHY_MODE_MASK GENMASK(7, 5) ++#define COMPHY_MODE_SATA FIELD_PREP(COMPHY_MODE_MASK, 0x0) ++#define COMPHY_MODE_PCIE FIELD_PREP(COMPHY_MODE_MASK, 0x3) ++#define COMPHY_MODE_SERDES FIELD_PREP(COMPHY_MODE_MASK, 0x4) ++#define COMPHY_MODE_USB3 FIELD_PREP(COMPHY_MODE_MASK, 0x5) ++ ++#define COMPHY_KVCO_CAL_CTRL 0x02 ++#define USE_MAX_PLL_RATE_BIT BIT(12) ++#define SPEED_PLL_MASK GENMASK(7, 2) ++#define SPEED_PLL_VALUE_16 FIELD_PREP(SPEED_PLL_MASK, 0x10) ++ ++#define COMPHY_DIG_LOOPBACK_EN 0x23 ++#define SEL_DATA_WIDTH_MASK GENMASK(11, 10) ++#define DATA_WIDTH_10BIT FIELD_PREP(SEL_DATA_WIDTH_MASK, 0x0) ++#define DATA_WIDTH_20BIT FIELD_PREP(SEL_DATA_WIDTH_MASK, 0x1) ++#define DATA_WIDTH_40BIT FIELD_PREP(SEL_DATA_WIDTH_MASK, 0x2) ++#define PLL_READY_TX_BIT BIT(4) ++ ++#define COMPHY_SYNC_PATTERN 0x24 ++#define TXD_INVERT_BIT BIT(10) ++#define RXD_INVERT_BIT BIT(11) ++ ++#define COMPHY_SYNC_MASK_GEN 0x25 ++#define PHY_GEN_MAX_MASK GENMASK(11, 10) ++#define PHY_GEN_MAX_USB3_5G FIELD_PREP(PHY_GEN_MAX_MASK, 0x1) ++ ++#define COMPHY_ISOLATION_CTRL 0x26 ++#define PHY_ISOLATE_MODE BIT(15) ++ ++#define COMPHY_GEN2_SET2 0x3e ++#define GS2_TX_SSC_AMP_MASK GENMASK(15, 9) ++#define GS2_TX_SSC_AMP_4128 FIELD_PREP(GS2_TX_SSC_AMP_MASK, 0x20) ++#define GS2_VREG_RXTX_MAS_ISET_MASK GENMASK(8, 7) ++#define GS2_VREG_RXTX_MAS_ISET_60U FIELD_PREP(GS2_VREG_RXTX_MAS_ISET_MASK,\ ++ 0x0) ++#define GS2_VREG_RXTX_MAS_ISET_80U FIELD_PREP(GS2_VREG_RXTX_MAS_ISET_MASK,\ ++ 0x1) ++#define GS2_VREG_RXTX_MAS_ISET_100U FIELD_PREP(GS2_VREG_RXTX_MAS_ISET_MASK,\ ++ 0x2) ++#define GS2_VREG_RXTX_MAS_ISET_120U FIELD_PREP(GS2_VREG_RXTX_MAS_ISET_MASK,\ ++ 0x3) ++#define GS2_RSVD_6_0_MASK GENMASK(6, 0) ++ ++#define COMPHY_GEN3_SET2 0x3f ++ ++#define COMPHY_IDLE_SYNC_EN 0x48 ++#define IDLE_SYNC_EN BIT(12) ++ ++#define COMPHY_MISC_CTRL0 0x4F ++#define CLK100M_125M_EN BIT(4) ++#define TXDCLK_2X_SEL BIT(6) ++#define CLK500M_EN BIT(7) ++#define PHY_REF_CLK_SEL BIT(10) ++ ++#define COMPHY_SFT_RESET 0x52 ++#define SFT_RST BIT(9) ++#define SFT_RST_NO_REG BIT(10) ++ ++#define COMPHY_MISC_CTRL1 0x73 ++#define SEL_BITS_PCIE_FORCE BIT(15) ++ ++#define COMPHY_GEN2_SET3 0x112 ++#define GS3_FFE_CAP_SEL_MASK GENMASK(3, 0) ++#define GS3_FFE_CAP_SEL_VALUE FIELD_PREP(GS3_FFE_CAP_SEL_MASK, 0xF) ++ ++/* PIPE registers */ ++#define COMPHY_PIPE_LANE_CFG0 0x180 ++#define PRD_TXDEEMPH0_MASK BIT(0) ++#define PRD_TXMARGIN_MASK GENMASK(3, 1) ++#define PRD_TXSWING_MASK BIT(4) ++#define CFG_TX_ALIGN_POS_MASK GENMASK(8, 5) ++ ++#define COMPHY_PIPE_LANE_CFG1 0x181 ++#define PRD_TXDEEMPH1_MASK BIT(15) ++#define USE_MAX_PLL_RATE_EN BIT(9) ++#define TX_DET_RX_MODE BIT(6) ++#define GEN2_TX_DATA_DLY_MASK GENMASK(4, 3) ++#define GEN2_TX_DATA_DLY_DEFT FIELD_PREP(GEN2_TX_DATA_DLY_MASK, 2) ++#define TX_ELEC_IDLE_MODE_EN BIT(0) ++ ++#define COMPHY_PIPE_LANE_STAT1 0x183 ++#define TXDCLK_PCLK_EN BIT(0) ++ ++#define COMPHY_PIPE_LANE_CFG4 0x188 ++#define SPREAD_SPECTRUM_CLK_EN BIT(7) ++ ++#define COMPHY_PIPE_RST_CLK_CTRL 0x1C1 ++#define PIPE_SOFT_RESET BIT(0) ++#define PIPE_REG_RESET BIT(1) ++#define MODE_CORE_CLK_FREQ_SEL BIT(9) ++#define MODE_PIPE_WIDTH_32 BIT(3) ++#define MODE_REFDIV_MASK GENMASK(5, 4) ++#define MODE_REFDIV_BY_4 FIELD_PREP(MODE_REFDIV_MASK, 0x2) ++ ++#define COMPHY_PIPE_TEST_MODE_CTRL 0x1C2 ++#define MODE_MARGIN_OVERRIDE BIT(2) ++ ++#define COMPHY_PIPE_CLK_SRC_LO 0x1C3 ++#define MODE_CLK_SRC BIT(0) ++#define BUNDLE_PERIOD_SEL BIT(1) ++#define BUNDLE_PERIOD_SCALE_MASK GENMASK(3, 2) ++#define BUNDLE_SAMPLE_CTRL BIT(4) ++#define PLL_READY_DLY_MASK GENMASK(7, 5) ++#define CFG_SEL_20B BIT(15) ++ ++#define COMPHY_PIPE_PWR_MGM_TIM1 0x1D0 ++#define CFG_PM_OSCCLK_WAIT_MASK GENMASK(15, 12) ++#define CFG_PM_RXDEN_WAIT_MASK GENMASK(11, 8) ++#define CFG_PM_RXDEN_WAIT_1_UNIT FIELD_PREP(CFG_PM_RXDEN_WAIT_MASK, 0x1) ++#define CFG_PM_RXDLOZ_WAIT_MASK GENMASK(7, 0) ++#define CFG_PM_RXDLOZ_WAIT_7_UNIT FIELD_PREP(CFG_PM_RXDLOZ_WAIT_MASK, 0x7) ++#define CFG_PM_RXDLOZ_WAIT_12_UNIT FIELD_PREP(CFG_PM_RXDLOZ_WAIT_MASK, 0xC) ++ ++/* ++ * This register is not from PHY lane register space. It only exists in the ++ * indirect register space, before the actual PHY lane 2 registers. So the ++ * offset is absolute, not relative to COMPHY_LANE2_REGS_BASE. ++ * It is used only for SATA PHY initialization. ++ */ ++#define COMPHY_RESERVED_REG 0x0E ++#define PHYCTRL_FRM_PIN_BIT BIT(13) + +-/* COMPHY Fast SMC function identifiers */ +-#define COMPHY_SIP_POWER_ON 0x82000001 +-#define COMPHY_SIP_POWER_OFF 0x82000002 +-#define COMPHY_SIP_PLL_LOCK 0x82000003 +- +-#define COMPHY_FW_MODE_SATA 0x1 +-#define COMPHY_FW_MODE_SGMII 0x2 +-#define COMPHY_FW_MODE_2500BASEX 0x3 +-#define COMPHY_FW_MODE_USB3H 0x4 +-#define COMPHY_FW_MODE_USB3D 0x5 +-#define COMPHY_FW_MODE_PCIE 0x6 +-#define COMPHY_FW_MODE_USB3 0xa +- +-#define COMPHY_FW_SPEED_1_25G 0 /* SGMII 1G */ +-#define COMPHY_FW_SPEED_2_5G 1 +-#define COMPHY_FW_SPEED_3_125G 2 /* 2500BASE-X */ +-#define COMPHY_FW_SPEED_5G 3 +-#define COMPHY_FW_SPEED_MAX 0x3F +- +-#define COMPHY_FW_MODE(mode) ((mode) << 12) +-#define COMPHY_FW_NET(mode, idx, speed) (COMPHY_FW_MODE(mode) | \ +- ((idx) << 8) | \ +- ((speed) << 2)) +-#define COMPHY_FW_PCIE(mode, speed, width) (COMPHY_FW_NET(mode, 0, speed) | \ +- ((width) << 18)) ++/* South Bridge PHY Configuration Registers */ ++#define COMPHY_PHY_REG(lane, reg) (((1 - (lane)) * 0x28) + ((reg) & 0x3f)) ++ ++/* ++ * lane0: USB3/GbE1 PHY Configuration 1 ++ * lane1: PCIe/GbE0 PHY Configuration 1 ++ * (used only by SGMII code) ++ */ ++#define COMPHY_PHY_CFG1 0x0 ++#define PIN_PU_IVREF_BIT BIT(1) ++#define PIN_RESET_CORE_BIT BIT(11) ++#define PIN_RESET_COMPHY_BIT BIT(12) ++#define PIN_PU_PLL_BIT BIT(16) ++#define PIN_PU_RX_BIT BIT(17) ++#define PIN_PU_TX_BIT BIT(18) ++#define PIN_TX_IDLE_BIT BIT(19) ++#define GEN_RX_SEL_MASK GENMASK(25, 22) ++#define GEN_RX_SEL_VALUE(val) FIELD_PREP(GEN_RX_SEL_MASK, (val)) ++#define GEN_TX_SEL_MASK GENMASK(29, 26) ++#define GEN_TX_SEL_VALUE(val) FIELD_PREP(GEN_TX_SEL_MASK, (val)) ++#define SERDES_SPEED_1_25_G 0x6 ++#define SERDES_SPEED_3_125_G 0x8 ++#define PHY_RX_INIT_BIT BIT(30) ++ ++/* ++ * lane0: USB3/GbE1 PHY Status 1 ++ * lane1: PCIe/GbE0 PHY Status 1 ++ * (used only by SGMII code) ++ */ ++#define COMPHY_PHY_STAT1 0x18 ++#define PHY_RX_INIT_DONE_BIT BIT(0) ++#define PHY_PLL_READY_RX_BIT BIT(2) ++#define PHY_PLL_READY_TX_BIT BIT(3) ++ ++/* PHY Selector */ ++#define COMPHY_SELECTOR_PHY_REG 0xFC ++/* bit0: 0: Lane1 is GbE0; 1: Lane1 is PCIe */ ++#define COMPHY_SELECTOR_PCIE_GBE0_SEL_BIT BIT(0) ++/* bit4: 0: Lane0 is GbE1; 1: Lane0 is USB3 */ ++#define COMPHY_SELECTOR_USB3_GBE1_SEL_BIT BIT(4) ++/* bit8: 0: Lane0 is USB3 instead of GbE1, Lane2 is SATA; 1: Lane2 is USB3 */ ++#define COMPHY_SELECTOR_USB3_PHY_SEL_BIT BIT(8) + + struct mvebu_a3700_comphy_conf { + unsigned int lane; + enum phy_mode mode; + int submode; +- u32 fw_mode; + }; + +-#define MVEBU_A3700_COMPHY_CONF(_lane, _mode, _smode, _fw) \ ++#define MVEBU_A3700_COMPHY_CONF(_lane, _mode, _smode) \ + { \ + .lane = _lane, \ + .mode = _mode, \ + .submode = _smode, \ +- .fw_mode = _fw, \ + } + +-#define MVEBU_A3700_COMPHY_CONF_GEN(_lane, _mode, _fw) \ +- MVEBU_A3700_COMPHY_CONF(_lane, _mode, PHY_INTERFACE_MODE_NA, _fw) ++#define MVEBU_A3700_COMPHY_CONF_GEN(_lane, _mode) \ ++ MVEBU_A3700_COMPHY_CONF(_lane, _mode, PHY_INTERFACE_MODE_NA) + +-#define MVEBU_A3700_COMPHY_CONF_ETH(_lane, _smode, _fw) \ +- MVEBU_A3700_COMPHY_CONF(_lane, PHY_MODE_ETHERNET, _smode, _fw) ++#define MVEBU_A3700_COMPHY_CONF_ETH(_lane, _smode) \ ++ MVEBU_A3700_COMPHY_CONF(_lane, PHY_MODE_ETHERNET, _smode) + + static const struct mvebu_a3700_comphy_conf mvebu_a3700_comphy_modes[] = { + /* lane 0 */ +- MVEBU_A3700_COMPHY_CONF_GEN(0, PHY_MODE_USB_HOST_SS, +- COMPHY_FW_MODE_USB3H), +- MVEBU_A3700_COMPHY_CONF_ETH(0, PHY_INTERFACE_MODE_SGMII, +- COMPHY_FW_MODE_SGMII), +- MVEBU_A3700_COMPHY_CONF_ETH(0, PHY_INTERFACE_MODE_2500BASEX, +- COMPHY_FW_MODE_2500BASEX), ++ MVEBU_A3700_COMPHY_CONF_GEN(0, PHY_MODE_USB_HOST_SS), ++ MVEBU_A3700_COMPHY_CONF_ETH(0, PHY_INTERFACE_MODE_SGMII), ++ MVEBU_A3700_COMPHY_CONF_ETH(0, PHY_INTERFACE_MODE_1000BASEX), ++ MVEBU_A3700_COMPHY_CONF_ETH(0, PHY_INTERFACE_MODE_2500BASEX), + /* lane 1 */ +- MVEBU_A3700_COMPHY_CONF_GEN(1, PHY_MODE_PCIE, COMPHY_FW_MODE_PCIE), +- MVEBU_A3700_COMPHY_CONF_ETH(1, PHY_INTERFACE_MODE_SGMII, +- COMPHY_FW_MODE_SGMII), +- MVEBU_A3700_COMPHY_CONF_ETH(1, PHY_INTERFACE_MODE_2500BASEX, +- COMPHY_FW_MODE_2500BASEX), ++ MVEBU_A3700_COMPHY_CONF_GEN(1, PHY_MODE_PCIE), ++ MVEBU_A3700_COMPHY_CONF_ETH(1, PHY_INTERFACE_MODE_SGMII), ++ MVEBU_A3700_COMPHY_CONF_ETH(1, PHY_INTERFACE_MODE_1000BASEX), ++ MVEBU_A3700_COMPHY_CONF_ETH(1, PHY_INTERFACE_MODE_2500BASEX), + /* lane 2 */ +- MVEBU_A3700_COMPHY_CONF_GEN(2, PHY_MODE_SATA, COMPHY_FW_MODE_SATA), +- MVEBU_A3700_COMPHY_CONF_GEN(2, PHY_MODE_USB_HOST_SS, +- COMPHY_FW_MODE_USB3H), ++ MVEBU_A3700_COMPHY_CONF_GEN(2, PHY_MODE_SATA), ++ MVEBU_A3700_COMPHY_CONF_GEN(2, PHY_MODE_USB_HOST_SS), ++}; ++ ++struct mvebu_a3700_comphy_priv { ++ void __iomem *comphy_regs; ++ void __iomem *lane0_phy_regs; /* USB3 and GbE1 */ ++ void __iomem *lane1_phy_regs; /* PCIe and GbE0 */ ++ void __iomem *lane2_phy_indirect; /* SATA and USB3 */ ++ spinlock_t lock; /* for PHY selector access */ ++ bool xtal_is_40m; + }; + + struct mvebu_a3700_comphy_lane { ++ struct mvebu_a3700_comphy_priv *priv; + struct device *dev; + unsigned int id; + enum phy_mode mode; + int submode; ++ bool invert_tx; ++ bool invert_rx; ++ bool needs_reset; ++}; ++ ++struct gbe_phy_init_data_fix { ++ u16 addr; ++ u16 value; ++}; ++ ++/* Changes to 40M1G25 mode data required for running 40M3G125 init mode */ ++static struct gbe_phy_init_data_fix gbe_phy_init_fix[] = { ++ { 0x005, 0x07CC }, { 0x015, 0x0000 }, { 0x01B, 0x0000 }, ++ { 0x01D, 0x0000 }, { 0x01E, 0x0000 }, { 0x01F, 0x0000 }, ++ { 0x020, 0x0000 }, { 0x021, 0x0030 }, { 0x026, 0x0888 }, ++ { 0x04D, 0x0152 }, { 0x04F, 0xA020 }, { 0x050, 0x07CC }, ++ { 0x053, 0xE9CA }, { 0x055, 0xBD97 }, { 0x071, 0x3015 }, ++ { 0x076, 0x03AA }, { 0x07C, 0x0FDF }, { 0x0C2, 0x3030 }, ++ { 0x0C3, 0x8000 }, { 0x0E2, 0x5550 }, { 0x0E3, 0x12A4 }, ++ { 0x0E4, 0x7D00 }, { 0x0E6, 0x0C83 }, { 0x101, 0xFCC0 }, ++ { 0x104, 0x0C10 } + }; + +-static int mvebu_a3700_comphy_smc(unsigned long function, unsigned long lane, +- unsigned long mode) ++/* 40M1G25 mode init data */ ++static u16 gbe_phy_init[512] = { ++ /* 0 1 2 3 4 5 6 7 */ ++ /*-----------------------------------------------------------*/ ++ /* 8 9 A B C D E F */ ++ 0x3110, 0xFD83, 0x6430, 0x412F, 0x82C0, 0x06FA, 0x4500, 0x6D26, /* 00 */ ++ 0xAFC0, 0x8000, 0xC000, 0x0000, 0x2000, 0x49CC, 0x0BC9, 0x2A52, /* 08 */ ++ 0x0BD2, 0x0CDE, 0x13D2, 0x0CE8, 0x1149, 0x10E0, 0x0000, 0x0000, /* 10 */ ++ 0x0000, 0x0000, 0x0000, 0x0001, 0x0000, 0x4134, 0x0D2D, 0xFFFF, /* 18 */ ++ 0xFFE0, 0x4030, 0x1016, 0x0030, 0x0000, 0x0800, 0x0866, 0x0000, /* 20 */ ++ 0x0000, 0x0000, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, /* 28 */ ++ 0xFFFF, 0xFFFF, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, /* 30 */ ++ 0x0000, 0x0000, 0x000F, 0x6A62, 0x1988, 0x3100, 0x3100, 0x3100, /* 38 */ ++ 0x3100, 0xA708, 0x2430, 0x0830, 0x1030, 0x4610, 0xFF00, 0xFF00, /* 40 */ ++ 0x0060, 0x1000, 0x0400, 0x0040, 0x00F0, 0x0155, 0x1100, 0xA02A, /* 48 */ ++ 0x06FA, 0x0080, 0xB008, 0xE3ED, 0x5002, 0xB592, 0x7A80, 0x0001, /* 50 */ ++ 0x020A, 0x8820, 0x6014, 0x8054, 0xACAA, 0xFC88, 0x2A02, 0x45CF, /* 58 */ ++ 0x000F, 0x1817, 0x2860, 0x064F, 0x0000, 0x0204, 0x1800, 0x6000, /* 60 */ ++ 0x810F, 0x4F23, 0x4000, 0x4498, 0x0850, 0x0000, 0x000E, 0x1002, /* 68 */ ++ 0x9D3A, 0x3009, 0xD066, 0x0491, 0x0001, 0x6AB0, 0x0399, 0x3780, /* 70 */ ++ 0x0040, 0x5AC0, 0x4A80, 0x0000, 0x01DF, 0x0000, 0x0007, 0x0000, /* 78 */ ++ 0x2D54, 0x00A1, 0x4000, 0x0100, 0xA20A, 0x0000, 0x0000, 0x0000, /* 80 */ ++ 0x0000, 0x0000, 0x0000, 0x7400, 0x0E81, 0x1000, 0x1242, 0x0210, /* 88 */ ++ 0x80DF, 0x0F1F, 0x2F3F, 0x4F5F, 0x6F7F, 0x0F1F, 0x2F3F, 0x4F5F, /* 90 */ ++ 0x6F7F, 0x4BAD, 0x0000, 0x0000, 0x0800, 0x0000, 0x2400, 0xB651, /* 98 */ ++ 0xC9E0, 0x4247, 0x0A24, 0x0000, 0xAF19, 0x1004, 0x0000, 0x0000, /* A0 */ ++ 0x0000, 0x0013, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, /* A8 */ ++ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, /* B0 */ ++ 0x0000, 0x0000, 0x0000, 0x0060, 0x0000, 0x0000, 0x0000, 0x0000, /* B8 */ ++ 0x0000, 0x0000, 0x3010, 0xFA00, 0x0000, 0x0000, 0x0000, 0x0003, /* C0 */ ++ 0x1618, 0x8200, 0x8000, 0x0400, 0x050F, 0x0000, 0x0000, 0x0000, /* C8 */ ++ 0x4C93, 0x0000, 0x1000, 0x1120, 0x0010, 0x1242, 0x1242, 0x1E00, /* D0 */ ++ 0x0000, 0x0000, 0x0000, 0x00F8, 0x0000, 0x0041, 0x0800, 0x0000, /* D8 */ ++ 0x82A0, 0x572E, 0x2490, 0x14A9, 0x4E00, 0x0000, 0x0803, 0x0541, /* E0 */ ++ 0x0C15, 0x0000, 0x0000, 0x0400, 0x2626, 0x0000, 0x0000, 0x4200, /* E8 */ ++ 0x0000, 0xAA55, 0x1020, 0x0000, 0x0000, 0x5010, 0x0000, 0x0000, /* F0 */ ++ 0x0000, 0x0000, 0x5000, 0x0000, 0x0000, 0x0000, 0x02F2, 0x0000, /* F8 */ ++ 0x101F, 0xFDC0, 0x4000, 0x8010, 0x0110, 0x0006, 0x0000, 0x0000, /*100 */ ++ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, /*108 */ ++ 0x04CF, 0x0000, 0x04CF, 0x0000, 0x04CF, 0x0000, 0x04C6, 0x0000, /*110 */ ++ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, /*118 */ ++ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, /*120 */ ++ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, /*128 */ ++ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, /*130 */ ++ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, /*138 */ ++ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, /*140 */ ++ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, /*148 */ ++ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, /*150 */ ++ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, /*158 */ ++ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, /*160 */ ++ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, /*168 */ ++ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, /*170 */ ++ 0x0000, 0x0000, 0x0000, 0x00F0, 0x08A2, 0x3112, 0x0A14, 0x0000, /*178 */ ++ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, /*180 */ ++ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, /*188 */ ++ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, /*190 */ ++ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, /*198 */ ++ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, /*1A0 */ ++ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, /*1A8 */ ++ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, /*1B0 */ ++ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, /*1B8 */ ++ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, /*1C0 */ ++ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, /*1C8 */ ++ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, /*1D0 */ ++ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, /*1D8 */ ++ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, /*1E0 */ ++ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, /*1E8 */ ++ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, /*1F0 */ ++ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000 /*1F8 */ ++}; ++ ++static inline void comphy_reg_set(void __iomem *addr, u32 data, u32 mask) + { +- struct arm_smccc_res res; +- s32 ret; ++ u32 val; + +- arm_smccc_smc(function, lane, mode, 0, 0, 0, 0, 0, &res); +- ret = res.a0; ++ val = readl(addr); ++ val = (val & ~mask) | (data & mask); ++ writel(val, addr); ++} + +- switch (ret) { +- case SMCCC_RET_SUCCESS: +- return 0; +- case SMCCC_RET_NOT_SUPPORTED: +- return -EOPNOTSUPP; ++static inline void comphy_reg_set16(void __iomem *addr, u16 data, u16 mask) ++{ ++ u16 val; ++ ++ val = readw(addr); ++ val = (val & ~mask) | (data & mask); ++ writew(val, addr); ++} ++ ++/* Used for accessing lane 2 registers (SATA/USB3 PHY) */ ++static void comphy_set_indirect(struct mvebu_a3700_comphy_priv *priv, ++ u32 offset, u16 data, u16 mask) ++{ ++ writel(offset, ++ priv->lane2_phy_indirect + COMPHY_LANE2_INDIR_ADDR); ++ comphy_reg_set(priv->lane2_phy_indirect + COMPHY_LANE2_INDIR_DATA, ++ data, mask); ++} ++ ++static void comphy_lane_reg_set(struct mvebu_a3700_comphy_lane *lane, ++ u16 reg, u16 data, u16 mask) ++{ ++ if (lane->id == 2) { ++ /* lane 2 PHY registers are accessed indirectly */ ++ comphy_set_indirect(lane->priv, ++ reg + COMPHY_LANE2_REGS_BASE, ++ data, mask); ++ } else { ++ void __iomem *base = lane->id == 1 ? ++ lane->priv->lane1_phy_regs : ++ lane->priv->lane0_phy_regs; ++ ++ comphy_reg_set16(base + COMPHY_LANE_REG_DIRECT(reg), ++ data, mask); ++ } ++} ++ ++static int comphy_lane_reg_poll(struct mvebu_a3700_comphy_lane *lane, ++ u16 reg, u16 bits, ++ ulong sleep_us, ulong timeout_us) ++{ ++ int ret; ++ ++ if (lane->id == 2) { ++ u32 data; ++ ++ /* lane 2 PHY registers are accessed indirectly */ ++ writel(reg + COMPHY_LANE2_REGS_BASE, ++ lane->priv->lane2_phy_indirect + ++ COMPHY_LANE2_INDIR_ADDR); ++ ++ ret = readl_poll_timeout(lane->priv->lane2_phy_indirect + ++ COMPHY_LANE2_INDIR_DATA, ++ data, (data & bits) == bits, ++ sleep_us, timeout_us); ++ } else { ++ void __iomem *base = lane->id == 1 ? ++ lane->priv->lane1_phy_regs : ++ lane->priv->lane0_phy_regs; ++ u16 data; ++ ++ ret = readw_poll_timeout(base + COMPHY_LANE_REG_DIRECT(reg), ++ data, (data & bits) == bits, ++ sleep_us, timeout_us); ++ } ++ ++ return ret; ++} ++ ++static void comphy_periph_reg_set(struct mvebu_a3700_comphy_lane *lane, ++ u8 reg, u32 data, u32 mask) ++{ ++ comphy_reg_set(lane->priv->comphy_regs + COMPHY_PHY_REG(lane->id, reg), ++ data, mask); ++} ++ ++static int comphy_periph_reg_poll(struct mvebu_a3700_comphy_lane *lane, ++ u8 reg, u32 bits, ++ ulong sleep_us, ulong timeout_us) ++{ ++ u32 data; ++ ++ return readl_poll_timeout(lane->priv->comphy_regs + ++ COMPHY_PHY_REG(lane->id, reg), ++ data, (data & bits) == bits, ++ sleep_us, timeout_us); ++} ++ ++/* PHY selector configures with corresponding modes */ ++static int ++mvebu_a3700_comphy_set_phy_selector(struct mvebu_a3700_comphy_lane *lane) ++{ ++ u32 old, new, clr = 0, set = 0; ++ unsigned long flags; ++ ++ switch (lane->mode) { ++ case PHY_MODE_SATA: ++ /* SATA must be in Lane2 */ ++ if (lane->id == 2) ++ clr = COMPHY_SELECTOR_USB3_PHY_SEL_BIT; ++ else ++ goto error; ++ break; ++ ++ case PHY_MODE_ETHERNET: ++ if (lane->id == 0) ++ clr = COMPHY_SELECTOR_USB3_GBE1_SEL_BIT; ++ else if (lane->id == 1) ++ clr = COMPHY_SELECTOR_PCIE_GBE0_SEL_BIT; ++ else ++ goto error; ++ break; ++ ++ case PHY_MODE_USB_HOST_SS: ++ if (lane->id == 2) ++ set = COMPHY_SELECTOR_USB3_PHY_SEL_BIT; ++ else if (lane->id == 0) ++ set = COMPHY_SELECTOR_USB3_GBE1_SEL_BIT; ++ else ++ goto error; ++ break; ++ ++ case PHY_MODE_PCIE: ++ /* PCIE must be in Lane1 */ ++ if (lane->id == 1) ++ set = COMPHY_SELECTOR_PCIE_GBE0_SEL_BIT; ++ else ++ goto error; ++ break; ++ ++ default: ++ goto error; ++ } ++ ++ spin_lock_irqsave(&lane->priv->lock, flags); ++ ++ old = readl(lane->priv->comphy_regs + COMPHY_SELECTOR_PHY_REG); ++ new = (old & ~clr) | set; ++ writel(new, lane->priv->comphy_regs + COMPHY_SELECTOR_PHY_REG); ++ ++ spin_unlock_irqrestore(&lane->priv->lock, flags); ++ ++ dev_dbg(lane->dev, ++ "COMPHY[%d] mode[%d] changed PHY selector 0x%08x -> 0x%08x\n", ++ lane->id, lane->mode, old, new); ++ ++ return 0; ++error: ++ dev_err(lane->dev, "COMPHY[%d] mode[%d] is invalid\n", lane->id, ++ lane->mode); ++ return -EINVAL; ++} ++ ++static int ++mvebu_a3700_comphy_sata_power_on(struct mvebu_a3700_comphy_lane *lane) ++{ ++ u32 mask, data, ref_clk; ++ int ret; ++ ++ /* Configure phy selector for SATA */ ++ ret = mvebu_a3700_comphy_set_phy_selector(lane); ++ if (ret) ++ return ret; ++ ++ /* Clear phy isolation mode to make it work in normal mode */ ++ comphy_lane_reg_set(lane, COMPHY_ISOLATION_CTRL, ++ 0x0, PHY_ISOLATE_MODE); ++ ++ /* 0. Check the Polarity invert bits */ ++ data = 0x0; ++ if (lane->invert_tx) ++ data |= TXD_INVERT_BIT; ++ if (lane->invert_rx) ++ data |= RXD_INVERT_BIT; ++ mask = TXD_INVERT_BIT | RXD_INVERT_BIT; ++ comphy_lane_reg_set(lane, COMPHY_SYNC_PATTERN, data, mask); ++ ++ /* 1. Select 40-bit data width */ ++ comphy_lane_reg_set(lane, COMPHY_DIG_LOOPBACK_EN, ++ DATA_WIDTH_40BIT, SEL_DATA_WIDTH_MASK); ++ ++ /* 2. Select reference clock(25M) and PHY mode (SATA) */ ++ if (lane->priv->xtal_is_40m) ++ ref_clk = REF_FREF_SEL_SERDES_40MHZ; ++ else ++ ref_clk = REF_FREF_SEL_SERDES_25MHZ; ++ ++ data = ref_clk | COMPHY_MODE_SATA; ++ mask = REF_FREF_SEL_MASK | COMPHY_MODE_MASK; ++ comphy_lane_reg_set(lane, COMPHY_POWER_PLL_CTRL, data, mask); ++ ++ /* 3. Use maximum PLL rate (no power save) */ ++ comphy_lane_reg_set(lane, COMPHY_KVCO_CAL_CTRL, ++ USE_MAX_PLL_RATE_BIT, USE_MAX_PLL_RATE_BIT); ++ ++ /* 4. Reset reserved bit */ ++ comphy_set_indirect(lane->priv, COMPHY_RESERVED_REG, ++ 0x0, PHYCTRL_FRM_PIN_BIT); ++ ++ /* 5. Set vendor-specific configuration (It is done in sata driver) */ ++ /* XXX: in U-Boot below sequence was executed in this place, in Linux ++ * not. Now it is done only in U-Boot before this comphy ++ * initialization - tests shows that it works ok, but in case of any ++ * future problem it is left for reference. ++ * reg_set(MVEBU_REGS_BASE + 0xe00a0, 0, 0xffffffff); ++ * reg_set(MVEBU_REGS_BASE + 0xe00a4, BIT(6), BIT(6)); ++ */ ++ ++ /* Wait for > 55 us to allow PLL be enabled */ ++ udelay(PLL_SET_DELAY_US); ++ ++ /* Polling status */ ++ ret = comphy_lane_reg_poll(lane, COMPHY_DIG_LOOPBACK_EN, ++ PLL_READY_TX_BIT, COMPHY_PLL_SLEEP, ++ COMPHY_PLL_TIMEOUT); ++ if (ret) { ++ dev_err(lane->dev, "Failed to lock SATA PLL\n"); ++ return ret; ++ } ++ ++ return 0; ++} ++ ++static void comphy_gbe_phy_init(struct mvebu_a3700_comphy_lane *lane, ++ bool is_1gbps) ++{ ++ int addr, fix_idx; ++ u16 val; ++ ++ fix_idx = 0; ++ for (addr = 0; addr < 512; addr++) { ++ /* ++ * All PHY register values are defined in full for 3.125Gbps ++ * SERDES speed. The values required for 1.25 Gbps are almost ++ * the same and only few registers should be "fixed" in ++ * comparison to 3.125 Gbps values. These register values are ++ * stored in "gbe_phy_init_fix" array. ++ */ ++ if (!is_1gbps && gbe_phy_init_fix[fix_idx].addr == addr) { ++ /* Use new value */ ++ val = gbe_phy_init_fix[fix_idx].value; ++ if (fix_idx < ARRAY_SIZE(gbe_phy_init_fix)) ++ fix_idx++; ++ } else { ++ val = gbe_phy_init[addr]; ++ } ++ ++ comphy_lane_reg_set(lane, addr, val, 0xFFFF); ++ } ++} ++ ++static int ++mvebu_a3700_comphy_ethernet_power_on(struct mvebu_a3700_comphy_lane *lane) ++{ ++ u32 mask, data, speed_sel; ++ int ret; ++ ++ /* Set selector */ ++ ret = mvebu_a3700_comphy_set_phy_selector(lane); ++ if (ret) ++ return ret; ++ ++ /* ++ * 1. Reset PHY by setting PHY input port PIN_RESET=1. ++ * 2. Set PHY input port PIN_TX_IDLE=1, PIN_PU_IVREF=1 to keep ++ * PHY TXP/TXN output to idle state during PHY initialization ++ * 3. Set PHY input port PIN_PU_PLL=0, PIN_PU_RX=0, PIN_PU_TX=0. ++ */ ++ data = PIN_PU_IVREF_BIT | PIN_TX_IDLE_BIT | PIN_RESET_COMPHY_BIT; ++ mask = data | PIN_RESET_CORE_BIT | PIN_PU_PLL_BIT | PIN_PU_RX_BIT | ++ PIN_PU_TX_BIT | PHY_RX_INIT_BIT; ++ comphy_periph_reg_set(lane, COMPHY_PHY_CFG1, data, mask); ++ ++ /* 4. Release reset to the PHY by setting PIN_RESET=0. */ ++ data = 0x0; ++ mask = PIN_RESET_COMPHY_BIT; ++ comphy_periph_reg_set(lane, COMPHY_PHY_CFG1, data, mask); ++ ++ /* ++ * 5. Set PIN_PHY_GEN_TX[3:0] and PIN_PHY_GEN_RX[3:0] to decide COMPHY ++ * bit rate ++ */ ++ switch (lane->submode) { ++ case PHY_INTERFACE_MODE_SGMII: ++ case PHY_INTERFACE_MODE_1000BASEX: ++ /* SGMII 1G, SerDes speed 1.25G */ ++ speed_sel = SERDES_SPEED_1_25_G; ++ break; ++ case PHY_INTERFACE_MODE_2500BASEX: ++ /* 2500Base-X, SerDes speed 3.125G */ ++ speed_sel = SERDES_SPEED_3_125_G; ++ break; + default: ++ /* Other rates are not supported */ ++ dev_err(lane->dev, ++ "unsupported phy speed %d on comphy lane%d\n", ++ lane->submode, lane->id); + return -EINVAL; + } ++ data = GEN_RX_SEL_VALUE(speed_sel) | GEN_TX_SEL_VALUE(speed_sel); ++ mask = GEN_RX_SEL_MASK | GEN_TX_SEL_MASK; ++ comphy_periph_reg_set(lane, COMPHY_PHY_CFG1, data, mask); ++ ++ /* ++ * 6. Wait 10mS for bandgap and reference clocks to stabilize; then ++ * start SW programming. ++ */ ++ mdelay(10); ++ ++ /* 7. Program COMPHY register PHY_MODE */ ++ data = COMPHY_MODE_SERDES; ++ mask = COMPHY_MODE_MASK; ++ comphy_lane_reg_set(lane, COMPHY_POWER_PLL_CTRL, data, mask); ++ ++ /* ++ * 8. Set COMPHY register REFCLK_SEL to select the correct REFCLK ++ * source ++ */ ++ data = 0x0; ++ mask = PHY_REF_CLK_SEL; ++ comphy_lane_reg_set(lane, COMPHY_MISC_CTRL0, data, mask); ++ ++ /* ++ * 9. Set correct reference clock frequency in COMPHY register ++ * REF_FREF_SEL. ++ */ ++ if (lane->priv->xtal_is_40m) ++ data = REF_FREF_SEL_SERDES_50MHZ; ++ else ++ data = REF_FREF_SEL_SERDES_25MHZ; ++ ++ mask = REF_FREF_SEL_MASK; ++ comphy_lane_reg_set(lane, COMPHY_POWER_PLL_CTRL, data, mask); ++ ++ /* 10. Program COMPHY register PHY_GEN_MAX[1:0] ++ * This step is mentioned in the flow received from verification team. ++ * However the PHY_GEN_MAX value is only meaningful for other interfaces ++ * (not SERDES). For instance, it selects SATA speed 1.5/3/6 Gbps or ++ * PCIe speed 2.5/5 Gbps ++ */ ++ ++ /* ++ * 11. Program COMPHY register SEL_BITS to set correct parallel data ++ * bus width ++ */ ++ data = DATA_WIDTH_10BIT; ++ mask = SEL_DATA_WIDTH_MASK; ++ comphy_lane_reg_set(lane, COMPHY_DIG_LOOPBACK_EN, data, mask); ++ ++ /* ++ * 12. As long as DFE function needs to be enabled in any mode, ++ * COMPHY register DFE_UPDATE_EN[5:0] shall be programmed to 0x3F ++ * for real chip during COMPHY power on. ++ * The step 14 exists (and empty) in the original initialization flow ++ * obtained from the verification team. According to the functional ++ * specification DFE_UPDATE_EN already has the default value 0x3F ++ */ ++ ++ /* ++ * 13. Program COMPHY GEN registers. ++ * These registers should be programmed based on the lab testing result ++ * to achieve optimal performance. Please contact the CEA group to get ++ * the related GEN table during real chip bring-up. We only required to ++ * run though the entire registers programming flow defined by ++ * "comphy_gbe_phy_init" when the REF clock is 40 MHz. For REF clock ++ * 25 MHz the default values stored in PHY registers are OK. ++ */ ++ dev_dbg(lane->dev, "Running C-DPI phy init %s mode\n", ++ lane->submode == PHY_INTERFACE_MODE_2500BASEX ? "2G5" : "1G"); ++ if (lane->priv->xtal_is_40m) ++ comphy_gbe_phy_init(lane, ++ lane->submode != PHY_INTERFACE_MODE_2500BASEX); ++ ++ /* ++ * 14. [Simulation Only] should not be used for real chip. ++ * By pass power up calibration by programming EXT_FORCE_CAL_DONE ++ * (R02h[9]) to 1 to shorten COMPHY simulation time. ++ */ ++ ++ /* ++ * 15. [Simulation Only: should not be used for real chip] ++ * Program COMPHY register FAST_DFE_TIMER_EN=1 to shorten RX training ++ * simulation time. ++ */ ++ ++ /* ++ * 16. Check the PHY Polarity invert bit ++ */ ++ data = 0x0; ++ if (lane->invert_tx) ++ data |= TXD_INVERT_BIT; ++ if (lane->invert_rx) ++ data |= RXD_INVERT_BIT; ++ mask = TXD_INVERT_BIT | RXD_INVERT_BIT; ++ comphy_lane_reg_set(lane, COMPHY_SYNC_PATTERN, data, mask); ++ ++ /* ++ * 17. Set PHY input ports PIN_PU_PLL, PIN_PU_TX and PIN_PU_RX to 1 to ++ * start PHY power up sequence. All the PHY register programming should ++ * be done before PIN_PU_PLL=1. There should be no register programming ++ * for normal PHY operation from this point. ++ */ ++ data = PIN_PU_PLL_BIT | PIN_PU_RX_BIT | PIN_PU_TX_BIT; ++ mask = data; ++ comphy_periph_reg_set(lane, COMPHY_PHY_CFG1, data, mask); ++ ++ /* ++ * 18. Wait for PHY power up sequence to finish by checking output ports ++ * PIN_PLL_READY_TX=1 and PIN_PLL_READY_RX=1. ++ */ ++ ret = comphy_periph_reg_poll(lane, COMPHY_PHY_STAT1, ++ PHY_PLL_READY_TX_BIT | ++ PHY_PLL_READY_RX_BIT, ++ COMPHY_PLL_SLEEP, COMPHY_PLL_TIMEOUT); ++ if (ret) { ++ dev_err(lane->dev, "Failed to lock PLL for SERDES PHY %d\n", ++ lane->id); ++ return ret; ++ } ++ ++ /* ++ * 19. Set COMPHY input port PIN_TX_IDLE=0 ++ */ ++ comphy_periph_reg_set(lane, COMPHY_PHY_CFG1, 0x0, PIN_TX_IDLE_BIT); ++ ++ /* ++ * 20. After valid data appear on PIN_RXDATA bus, set PIN_RX_INIT=1. To ++ * start RX initialization. PIN_RX_INIT_DONE will be cleared to 0 by the ++ * PHY After RX initialization is done, PIN_RX_INIT_DONE will be set to ++ * 1 by COMPHY Set PIN_RX_INIT=0 after PIN_RX_INIT_DONE= 1. Please ++ * refer to RX initialization part for details. ++ */ ++ comphy_periph_reg_set(lane, COMPHY_PHY_CFG1, ++ PHY_RX_INIT_BIT, PHY_RX_INIT_BIT); ++ ++ ret = comphy_periph_reg_poll(lane, COMPHY_PHY_STAT1, ++ PHY_PLL_READY_TX_BIT | ++ PHY_PLL_READY_RX_BIT, ++ COMPHY_PLL_SLEEP, COMPHY_PLL_TIMEOUT); ++ if (ret) { ++ dev_err(lane->dev, "Failed to lock PLL for SERDES PHY %d\n", ++ lane->id); ++ return ret; ++ } ++ ++ ret = comphy_periph_reg_poll(lane, COMPHY_PHY_STAT1, ++ PHY_RX_INIT_DONE_BIT, ++ COMPHY_PLL_SLEEP, COMPHY_PLL_TIMEOUT); ++ if (ret) { ++ dev_err(lane->dev, "Failed to init RX of SERDES PHY %d\n", ++ lane->id); ++ return ret; ++ } ++ ++ return 0; + } + +-static int mvebu_a3700_comphy_get_fw_mode(int lane, ++static int ++mvebu_a3700_comphy_usb3_power_on(struct mvebu_a3700_comphy_lane *lane) ++{ ++ u32 mask, data, cfg, ref_clk; ++ int ret; ++ ++ /* Set phy seclector */ ++ ret = mvebu_a3700_comphy_set_phy_selector(lane); ++ if (ret) ++ return ret; ++ ++ /* ++ * 0. Set PHY OTG Control(0x5d034), bit 4, Power up OTG module The ++ * register belong to UTMI module, so it is set in UTMI phy driver. ++ */ ++ ++ /* ++ * 1. Set PRD_TXDEEMPH (3.5db de-emph) ++ */ ++ data = PRD_TXDEEMPH0_MASK; ++ mask = PRD_TXDEEMPH0_MASK | PRD_TXMARGIN_MASK | PRD_TXSWING_MASK | ++ CFG_TX_ALIGN_POS_MASK; ++ comphy_lane_reg_set(lane, COMPHY_PIPE_LANE_CFG0, data, mask); ++ ++ /* ++ * 2. Set BIT0: enable transmitter in high impedance mode ++ * Set BIT[3:4]: delay 2 clock cycles for HiZ off latency ++ * Set BIT6: Tx detect Rx at HiZ mode ++ * Unset BIT15: set to 0 to set USB3 De-emphasize level to -3.5db ++ * together with bit 0 of COMPHY_PIPE_LANE_CFG0 register ++ */ ++ data = TX_DET_RX_MODE | GEN2_TX_DATA_DLY_DEFT | TX_ELEC_IDLE_MODE_EN; ++ mask = PRD_TXDEEMPH1_MASK | TX_DET_RX_MODE | GEN2_TX_DATA_DLY_MASK | ++ TX_ELEC_IDLE_MODE_EN; ++ comphy_lane_reg_set(lane, COMPHY_PIPE_LANE_CFG1, data, mask); ++ ++ /* ++ * 3. Set Spread Spectrum Clock Enabled ++ */ ++ comphy_lane_reg_set(lane, COMPHY_PIPE_LANE_CFG4, ++ SPREAD_SPECTRUM_CLK_EN, SPREAD_SPECTRUM_CLK_EN); ++ ++ /* ++ * 4. Set Override Margining Controls From the MAC: ++ * Use margining signals from lane configuration ++ */ ++ comphy_lane_reg_set(lane, COMPHY_PIPE_TEST_MODE_CTRL, ++ MODE_MARGIN_OVERRIDE, 0xFFFF); ++ ++ /* ++ * 5. Set Lane-to-Lane Bundle Clock Sampling Period = per PCLK cycles ++ * set Mode Clock Source = PCLK is generated from REFCLK ++ */ ++ data = 0x0; ++ mask = MODE_CLK_SRC | BUNDLE_PERIOD_SEL | BUNDLE_PERIOD_SCALE_MASK | ++ BUNDLE_SAMPLE_CTRL | PLL_READY_DLY_MASK; ++ comphy_lane_reg_set(lane, COMPHY_PIPE_CLK_SRC_LO, data, mask); ++ ++ /* ++ * 6. Set G2 Spread Spectrum Clock Amplitude at 4K ++ */ ++ comphy_lane_reg_set(lane, COMPHY_GEN2_SET2, ++ GS2_TX_SSC_AMP_4128, GS2_TX_SSC_AMP_MASK); ++ ++ /* ++ * 7. Unset G3 Spread Spectrum Clock Amplitude ++ * set G3 TX and RX Register Master Current Select ++ */ ++ data = GS2_VREG_RXTX_MAS_ISET_60U; ++ mask = GS2_TX_SSC_AMP_MASK | GS2_VREG_RXTX_MAS_ISET_MASK | ++ GS2_RSVD_6_0_MASK; ++ comphy_lane_reg_set(lane, COMPHY_GEN3_SET2, data, mask); ++ ++ /* ++ * 8. Check crystal jumper setting and program the Power and PLL Control ++ * accordingly Change RX wait ++ */ ++ if (lane->priv->xtal_is_40m) { ++ ref_clk = REF_FREF_SEL_PCIE_USB3_40MHZ; ++ cfg = CFG_PM_RXDLOZ_WAIT_12_UNIT; ++ } else { ++ ref_clk = REF_FREF_SEL_PCIE_USB3_25MHZ; ++ cfg = CFG_PM_RXDLOZ_WAIT_7_UNIT; ++ } ++ ++ data = PU_IVREF_BIT | PU_PLL_BIT | PU_RX_BIT | PU_TX_BIT | ++ PU_TX_INTP_BIT | PU_DFE_BIT | COMPHY_MODE_USB3 | ref_clk; ++ mask = PU_IVREF_BIT | PU_PLL_BIT | PU_RX_BIT | PU_TX_BIT | ++ PU_TX_INTP_BIT | PU_DFE_BIT | PLL_LOCK_BIT | COMPHY_MODE_MASK | ++ REF_FREF_SEL_MASK; ++ comphy_lane_reg_set(lane, COMPHY_POWER_PLL_CTRL, data, mask); ++ ++ data = CFG_PM_RXDEN_WAIT_1_UNIT | cfg; ++ mask = CFG_PM_OSCCLK_WAIT_MASK | CFG_PM_RXDEN_WAIT_MASK | ++ CFG_PM_RXDLOZ_WAIT_MASK; ++ comphy_lane_reg_set(lane, COMPHY_PIPE_PWR_MGM_TIM1, data, mask); ++ ++ /* ++ * 9. Enable idle sync ++ */ ++ comphy_lane_reg_set(lane, COMPHY_IDLE_SYNC_EN, ++ IDLE_SYNC_EN, IDLE_SYNC_EN); ++ ++ /* ++ * 10. Enable the output of 500M clock ++ */ ++ comphy_lane_reg_set(lane, COMPHY_MISC_CTRL0, CLK500M_EN, CLK500M_EN); ++ ++ /* ++ * 11. Set 20-bit data width ++ */ ++ comphy_lane_reg_set(lane, COMPHY_DIG_LOOPBACK_EN, ++ DATA_WIDTH_20BIT, 0xFFFF); ++ ++ /* ++ * 12. Override Speed_PLL value and use MAC PLL ++ */ ++ data = SPEED_PLL_VALUE_16 | USE_MAX_PLL_RATE_BIT; ++ mask = 0xFFFF; ++ comphy_lane_reg_set(lane, COMPHY_KVCO_CAL_CTRL, data, mask); ++ ++ /* ++ * 13. Check the Polarity invert bit ++ */ ++ data = 0x0; ++ if (lane->invert_tx) ++ data |= TXD_INVERT_BIT; ++ if (lane->invert_rx) ++ data |= RXD_INVERT_BIT; ++ mask = TXD_INVERT_BIT | RXD_INVERT_BIT; ++ comphy_lane_reg_set(lane, COMPHY_SYNC_PATTERN, data, mask); ++ ++ /* ++ * 14. Set max speed generation to USB3.0 5Gbps ++ */ ++ comphy_lane_reg_set(lane, COMPHY_SYNC_MASK_GEN, ++ PHY_GEN_MAX_USB3_5G, PHY_GEN_MAX_MASK); ++ ++ /* ++ * 15. Set capacitor value for FFE gain peaking to 0xF ++ */ ++ comphy_lane_reg_set(lane, COMPHY_GEN2_SET3, ++ GS3_FFE_CAP_SEL_VALUE, GS3_FFE_CAP_SEL_MASK); ++ ++ /* ++ * 16. Release SW reset ++ */ ++ data = MODE_CORE_CLK_FREQ_SEL | MODE_PIPE_WIDTH_32 | MODE_REFDIV_BY_4; ++ mask = 0xFFFF; ++ comphy_lane_reg_set(lane, COMPHY_PIPE_RST_CLK_CTRL, data, mask); ++ ++ /* Wait for > 55 us to allow PCLK be enabled */ ++ udelay(PLL_SET_DELAY_US); ++ ++ ret = comphy_lane_reg_poll(lane, COMPHY_PIPE_LANE_STAT1, TXDCLK_PCLK_EN, ++ COMPHY_PLL_SLEEP, COMPHY_PLL_TIMEOUT); ++ if (ret) { ++ dev_err(lane->dev, "Failed to lock USB3 PLL\n"); ++ return ret; ++ } ++ ++ return 0; ++} ++ ++static int ++mvebu_a3700_comphy_pcie_power_on(struct mvebu_a3700_comphy_lane *lane) ++{ ++ u32 mask, data, ref_clk; ++ int ret; ++ ++ /* Configure phy selector for PCIe */ ++ ret = mvebu_a3700_comphy_set_phy_selector(lane); ++ if (ret) ++ return ret; ++ ++ /* 1. Enable max PLL. */ ++ comphy_lane_reg_set(lane, COMPHY_PIPE_LANE_CFG1, ++ USE_MAX_PLL_RATE_EN, USE_MAX_PLL_RATE_EN); ++ ++ /* 2. Select 20 bit SERDES interface. */ ++ comphy_lane_reg_set(lane, COMPHY_PIPE_CLK_SRC_LO, ++ CFG_SEL_20B, CFG_SEL_20B); ++ ++ /* 3. Force to use reg setting for PCIe mode */ ++ comphy_lane_reg_set(lane, COMPHY_MISC_CTRL1, ++ SEL_BITS_PCIE_FORCE, SEL_BITS_PCIE_FORCE); ++ ++ /* 4. Change RX wait */ ++ data = CFG_PM_RXDEN_WAIT_1_UNIT | CFG_PM_RXDLOZ_WAIT_12_UNIT; ++ mask = CFG_PM_OSCCLK_WAIT_MASK | CFG_PM_RXDEN_WAIT_MASK | ++ CFG_PM_RXDLOZ_WAIT_MASK; ++ comphy_lane_reg_set(lane, COMPHY_PIPE_PWR_MGM_TIM1, data, mask); ++ ++ /* 5. Enable idle sync */ ++ comphy_lane_reg_set(lane, COMPHY_IDLE_SYNC_EN, ++ IDLE_SYNC_EN, IDLE_SYNC_EN); ++ ++ /* 6. Enable the output of 100M/125M/500M clock */ ++ data = CLK500M_EN | TXDCLK_2X_SEL | CLK100M_125M_EN; ++ mask = data; ++ comphy_lane_reg_set(lane, COMPHY_MISC_CTRL0, data, mask); ++ ++ /* ++ * 7. Enable TX, PCIE global register, 0xd0074814, it is done in ++ * PCI-E driver ++ */ ++ ++ /* ++ * 8. Check crystal jumper setting and program the Power and PLL ++ * Control accordingly ++ */ ++ ++ if (lane->priv->xtal_is_40m) ++ ref_clk = REF_FREF_SEL_PCIE_USB3_40MHZ; ++ else ++ ref_clk = REF_FREF_SEL_PCIE_USB3_25MHZ; ++ ++ data = PU_IVREF_BIT | PU_PLL_BIT | PU_RX_BIT | PU_TX_BIT | ++ PU_TX_INTP_BIT | PU_DFE_BIT | COMPHY_MODE_PCIE | ref_clk; ++ mask = 0xFFFF; ++ comphy_lane_reg_set(lane, COMPHY_POWER_PLL_CTRL, data, mask); ++ ++ /* 9. Override Speed_PLL value and use MAC PLL */ ++ comphy_lane_reg_set(lane, COMPHY_KVCO_CAL_CTRL, ++ SPEED_PLL_VALUE_16 | USE_MAX_PLL_RATE_BIT, ++ 0xFFFF); ++ ++ /* 10. Check the Polarity invert bit */ ++ data = 0x0; ++ if (lane->invert_tx) ++ data |= TXD_INVERT_BIT; ++ if (lane->invert_rx) ++ data |= RXD_INVERT_BIT; ++ mask = TXD_INVERT_BIT | RXD_INVERT_BIT; ++ comphy_lane_reg_set(lane, COMPHY_SYNC_PATTERN, data, mask); ++ ++ /* 11. Release SW reset */ ++ data = MODE_CORE_CLK_FREQ_SEL | MODE_PIPE_WIDTH_32; ++ mask = data | PIPE_SOFT_RESET | MODE_REFDIV_MASK; ++ comphy_lane_reg_set(lane, COMPHY_PIPE_RST_CLK_CTRL, data, mask); ++ ++ /* Wait for > 55 us to allow PCLK be enabled */ ++ udelay(PLL_SET_DELAY_US); ++ ++ ret = comphy_lane_reg_poll(lane, COMPHY_PIPE_LANE_STAT1, TXDCLK_PCLK_EN, ++ COMPHY_PLL_SLEEP, COMPHY_PLL_TIMEOUT); ++ if (ret) { ++ dev_err(lane->dev, "Failed to lock PCIE PLL\n"); ++ return ret; ++ } ++ ++ return 0; ++} ++ ++static void ++mvebu_a3700_comphy_usb3_power_off(struct mvebu_a3700_comphy_lane *lane) ++{ ++ /* ++ * Currently the USB3 MAC sets the USB3 PHY to low state, so we do not ++ * need to power off USB3 PHY again. ++ */ ++} ++ ++static void ++mvebu_a3700_comphy_sata_power_off(struct mvebu_a3700_comphy_lane *lane) ++{ ++ /* Set phy isolation mode */ ++ comphy_lane_reg_set(lane, COMPHY_ISOLATION_CTRL, ++ PHY_ISOLATE_MODE, PHY_ISOLATE_MODE); ++ ++ /* Power off PLL, Tx, Rx */ ++ comphy_lane_reg_set(lane, COMPHY_POWER_PLL_CTRL, ++ 0x0, PU_PLL_BIT | PU_RX_BIT | PU_TX_BIT); ++} ++ ++static void ++mvebu_a3700_comphy_ethernet_power_off(struct mvebu_a3700_comphy_lane *lane) ++{ ++ u32 mask, data; ++ ++ data = PIN_RESET_CORE_BIT | PIN_RESET_COMPHY_BIT | PIN_PU_IVREF_BIT | ++ PHY_RX_INIT_BIT; ++ mask = data; ++ comphy_periph_reg_set(lane, COMPHY_PHY_CFG1, data, mask); ++} ++ ++static void ++mvebu_a3700_comphy_pcie_power_off(struct mvebu_a3700_comphy_lane *lane) ++{ ++ /* Power off PLL, Tx, Rx */ ++ comphy_lane_reg_set(lane, COMPHY_POWER_PLL_CTRL, ++ 0x0, PU_PLL_BIT | PU_RX_BIT | PU_TX_BIT); ++} ++ ++static int mvebu_a3700_comphy_reset(struct phy *phy) ++{ ++ struct mvebu_a3700_comphy_lane *lane = phy_get_drvdata(phy); ++ u16 mask, data; ++ ++ dev_dbg(lane->dev, "resetting lane %d\n", lane->id); ++ ++ /* COMPHY reset for internal logic */ ++ comphy_lane_reg_set(lane, COMPHY_SFT_RESET, ++ SFT_RST_NO_REG, SFT_RST_NO_REG); ++ ++ /* COMPHY register reset (cleared automatically) */ ++ comphy_lane_reg_set(lane, COMPHY_SFT_RESET, SFT_RST, SFT_RST); ++ ++ /* PIPE soft and register reset */ ++ data = PIPE_SOFT_RESET | PIPE_REG_RESET; ++ mask = data; ++ comphy_lane_reg_set(lane, COMPHY_PIPE_RST_CLK_CTRL, data, mask); ++ ++ /* Release PIPE register reset */ ++ comphy_lane_reg_set(lane, COMPHY_PIPE_RST_CLK_CTRL, ++ 0x0, PIPE_REG_RESET); ++ ++ /* Reset SB configuration register (only for lanes 0 and 1) */ ++ if (lane->id == 0 || lane->id == 1) { ++ u32 mask, data; ++ ++ data = PIN_RESET_CORE_BIT | PIN_RESET_COMPHY_BIT | ++ PIN_PU_PLL_BIT | PIN_PU_RX_BIT | PIN_PU_TX_BIT; ++ mask = data | PIN_PU_IVREF_BIT | PIN_TX_IDLE_BIT; ++ comphy_periph_reg_set(lane, COMPHY_PHY_CFG1, data, mask); ++ } ++ ++ return 0; ++} ++ ++static bool mvebu_a3700_comphy_check_mode(int lane, + enum phy_mode mode, + int submode) + { +@@ -122,7 +1170,7 @@ static int mvebu_a3700_comphy_get_fw_mod + + /* Unused PHY mux value is 0x0 */ + if (mode == PHY_MODE_INVALID) +- return -EINVAL; ++ return false; + + for (i = 0; i < n; i++) { + if (mvebu_a3700_comphy_modes[i].lane == lane && +@@ -132,27 +1180,30 @@ static int mvebu_a3700_comphy_get_fw_mod + } + + if (i == n) +- return -EINVAL; ++ return false; + +- return mvebu_a3700_comphy_modes[i].fw_mode; ++ return true; + } + + static int mvebu_a3700_comphy_set_mode(struct phy *phy, enum phy_mode mode, + int submode) + { + struct mvebu_a3700_comphy_lane *lane = phy_get_drvdata(phy); +- int fw_mode; +- +- if (submode == PHY_INTERFACE_MODE_1000BASEX) +- submode = PHY_INTERFACE_MODE_SGMII; + +- fw_mode = mvebu_a3700_comphy_get_fw_mode(lane->id, mode, +- submode); +- if (fw_mode < 0) { ++ if (!mvebu_a3700_comphy_check_mode(lane->id, mode, submode)) { + dev_err(lane->dev, "invalid COMPHY mode\n"); +- return fw_mode; ++ return -EINVAL; + } + ++ /* Mode cannot be changed while the PHY is powered on */ ++ if (phy->power_count && ++ (lane->mode != mode || lane->submode != submode)) ++ return -EBUSY; ++ ++ /* If changing mode, ensure reset is called */ ++ if (lane->mode != PHY_MODE_INVALID && lane->mode != mode) ++ lane->needs_reset = true; ++ + /* Just remember the mode, ->power_on() will do the real setup */ + lane->mode = mode; + lane->submode = submode; +@@ -163,76 +1214,68 @@ static int mvebu_a3700_comphy_set_mode(s + static int mvebu_a3700_comphy_power_on(struct phy *phy) + { + struct mvebu_a3700_comphy_lane *lane = phy_get_drvdata(phy); +- u32 fw_param; +- int fw_mode; +- int fw_port; + int ret; + +- fw_mode = mvebu_a3700_comphy_get_fw_mode(lane->id, +- lane->mode, lane->submode); +- if (fw_mode < 0) { ++ if (!mvebu_a3700_comphy_check_mode(lane->id, lane->mode, ++ lane->submode)) { + dev_err(lane->dev, "invalid COMPHY mode\n"); +- return fw_mode; ++ return -EINVAL; ++ } ++ ++ if (lane->needs_reset) { ++ ret = mvebu_a3700_comphy_reset(phy); ++ if (ret) ++ return ret; ++ ++ lane->needs_reset = false; + } + + switch (lane->mode) { + case PHY_MODE_USB_HOST_SS: + dev_dbg(lane->dev, "set lane %d to USB3 host mode\n", lane->id); +- fw_param = COMPHY_FW_MODE(fw_mode); +- break; ++ return mvebu_a3700_comphy_usb3_power_on(lane); + case PHY_MODE_SATA: + dev_dbg(lane->dev, "set lane %d to SATA mode\n", lane->id); +- fw_param = COMPHY_FW_MODE(fw_mode); +- break; ++ return mvebu_a3700_comphy_sata_power_on(lane); + case PHY_MODE_ETHERNET: +- fw_port = (lane->id == 0) ? 1 : 0; +- switch (lane->submode) { +- case PHY_INTERFACE_MODE_SGMII: +- dev_dbg(lane->dev, "set lane %d to SGMII mode\n", +- lane->id); +- fw_param = COMPHY_FW_NET(fw_mode, fw_port, +- COMPHY_FW_SPEED_1_25G); +- break; +- case PHY_INTERFACE_MODE_2500BASEX: +- dev_dbg(lane->dev, "set lane %d to 2500BASEX mode\n", +- lane->id); +- fw_param = COMPHY_FW_NET(fw_mode, fw_port, +- COMPHY_FW_SPEED_3_125G); +- break; +- default: +- dev_err(lane->dev, "unsupported PHY submode (%d)\n", +- lane->submode); +- return -ENOTSUPP; +- } +- break; ++ dev_dbg(lane->dev, "set lane %d to Ethernet mode\n", lane->id); ++ return mvebu_a3700_comphy_ethernet_power_on(lane); + case PHY_MODE_PCIE: + dev_dbg(lane->dev, "set lane %d to PCIe mode\n", lane->id); +- fw_param = COMPHY_FW_PCIE(fw_mode, COMPHY_FW_SPEED_5G, +- phy->attrs.bus_width); +- break; ++ return mvebu_a3700_comphy_pcie_power_on(lane); + default: + dev_err(lane->dev, "unsupported PHY mode (%d)\n", lane->mode); +- return -ENOTSUPP; ++ return -EOPNOTSUPP; + } +- +- ret = mvebu_a3700_comphy_smc(COMPHY_SIP_POWER_ON, lane->id, fw_param); +- if (ret == -EOPNOTSUPP) +- dev_err(lane->dev, +- "unsupported SMC call, try updating your firmware\n"); +- +- return ret; + } + + static int mvebu_a3700_comphy_power_off(struct phy *phy) + { + struct mvebu_a3700_comphy_lane *lane = phy_get_drvdata(phy); + +- return mvebu_a3700_comphy_smc(COMPHY_SIP_POWER_OFF, lane->id, 0); ++ switch (lane->mode) { ++ case PHY_MODE_USB_HOST_SS: ++ mvebu_a3700_comphy_usb3_power_off(lane); ++ return 0; ++ case PHY_MODE_SATA: ++ mvebu_a3700_comphy_sata_power_off(lane); ++ return 0; ++ case PHY_MODE_ETHERNET: ++ mvebu_a3700_comphy_ethernet_power_off(lane); ++ return 0; ++ case PHY_MODE_PCIE: ++ mvebu_a3700_comphy_pcie_power_off(lane); ++ return 0; ++ default: ++ dev_err(lane->dev, "invalid COMPHY mode\n"); ++ return -EINVAL; ++ } + } + + static const struct phy_ops mvebu_a3700_comphy_ops = { + .power_on = mvebu_a3700_comphy_power_on, + .power_off = mvebu_a3700_comphy_power_off, ++ .reset = mvebu_a3700_comphy_reset, + .set_mode = mvebu_a3700_comphy_set_mode, + .owner = THIS_MODULE, + }; +@@ -256,13 +1299,75 @@ static struct phy *mvebu_a3700_comphy_xl + return ERR_PTR(-EINVAL); + } + ++ lane->invert_tx = args->args[1] & BIT(0); ++ lane->invert_rx = args->args[1] & BIT(1); ++ + return phy; + } + + static int mvebu_a3700_comphy_probe(struct platform_device *pdev) + { ++ struct mvebu_a3700_comphy_priv *priv; + struct phy_provider *provider; + struct device_node *child; ++ struct resource *res; ++ struct clk *clk; ++ int ret; ++ ++ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); ++ if (!priv) ++ return -ENOMEM; ++ ++ spin_lock_init(&priv->lock); ++ ++ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "comphy"); ++ priv->comphy_regs = devm_ioremap_resource(&pdev->dev, res); ++ if (IS_ERR(priv->comphy_regs)) ++ return PTR_ERR(priv->comphy_regs); ++ ++ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, ++ "lane1_pcie_gbe"); ++ priv->lane1_phy_regs = devm_ioremap_resource(&pdev->dev, res); ++ if (IS_ERR(priv->lane1_phy_regs)) ++ return PTR_ERR(priv->lane1_phy_regs); ++ ++ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, ++ "lane0_usb3_gbe"); ++ priv->lane0_phy_regs = devm_ioremap_resource(&pdev->dev, res); ++ if (IS_ERR(priv->lane0_phy_regs)) ++ return PTR_ERR(priv->lane0_phy_regs); ++ ++ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, ++ "lane2_sata_usb3"); ++ priv->lane2_phy_indirect = devm_ioremap_resource(&pdev->dev, res); ++ if (IS_ERR(priv->lane2_phy_indirect)) ++ return PTR_ERR(priv->lane2_phy_indirect); ++ ++ /* ++ * Driver needs to know if reference xtal clock is 40MHz or 25MHz. ++ * Old DT bindings do not have xtal clk present. So do not fail here ++ * and expects that default 25MHz reference clock is used. ++ */ ++ clk = clk_get(&pdev->dev, "xtal"); ++ if (IS_ERR(clk)) { ++ if (PTR_ERR(clk) == -EPROBE_DEFER) ++ return -EPROBE_DEFER; ++ dev_warn(&pdev->dev, "missing 'xtal' clk (%ld)\n", ++ PTR_ERR(clk)); ++ } else { ++ ret = clk_prepare_enable(clk); ++ if (ret) { ++ dev_warn(&pdev->dev, "enabling xtal clk failed (%d)\n", ++ ret); ++ } else { ++ if (clk_get_rate(clk) == 40000000) ++ priv->xtal_is_40m = true; ++ clk_disable_unprepare(clk); ++ } ++ clk_put(clk); ++ } ++ ++ dev_set_drvdata(&pdev->dev, priv); + + for_each_available_child_of_node(pdev->dev.of_node, child) { + struct mvebu_a3700_comphy_lane *lane; +@@ -277,7 +1382,7 @@ static int mvebu_a3700_comphy_probe(stru + continue; + } + +- if (lane_id >= MVEBU_A3700_COMPHY_LANES) { ++ if (lane_id >= 3) { + dev_err(&pdev->dev, "invalid 'reg' property\n"); + continue; + } +@@ -295,11 +1400,21 @@ static int mvebu_a3700_comphy_probe(stru + return PTR_ERR(phy); + } + ++ lane->priv = priv; + lane->dev = &pdev->dev; + lane->mode = PHY_MODE_INVALID; + lane->submode = PHY_INTERFACE_MODE_NA; + lane->id = lane_id; ++ lane->invert_tx = false; ++ lane->invert_rx = false; + phy_set_drvdata(phy, lane); ++ ++ /* ++ * To avoid relying on the bootloader/firmware configuration, ++ * power off all comphys. ++ */ ++ mvebu_a3700_comphy_reset(phy); ++ lane->needs_reset = false; + } + + provider = devm_of_phy_provider_register(&pdev->dev, +@@ -323,5 +1438,7 @@ static struct platform_driver mvebu_a370 + module_platform_driver(mvebu_a3700_comphy_driver); + + MODULE_AUTHOR("Miquèl Raynal <miquel.raynal@bootlin.com>"); ++MODULE_AUTHOR("Pali Rohár <pali@kernel.org>"); ++MODULE_AUTHOR("Marek Behún <kabel@kernel.org>"); + MODULE_DESCRIPTION("Common PHY driver for A3700"); + MODULE_LICENSE("GPL v2"); diff --git a/target/linux/generic/pending-5.15/851-0003-arm64-dts-marvell-armada-37xx-Add-xtal-clock-to-comp.patch b/target/linux/generic/pending-5.15/851-0003-arm64-dts-marvell-armada-37xx-Add-xtal-clock-to-comp.patch new file mode 100644 index 0000000000..33203a154d --- /dev/null +++ b/target/linux/generic/pending-5.15/851-0003-arm64-dts-marvell-armada-37xx-Add-xtal-clock-to-comp.patch @@ -0,0 +1,31 @@ +From 66c51c39fd4bf05e99debf0e71de5704231c57dc Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?Pali=20Roh=C3=A1r?= <pali@kernel.org> +Date: Thu, 23 Sep 2021 19:26:26 +0200 +Subject: [PATCH] arm64: dts: marvell: armada-37xx: Add xtal clock to comphy + node +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +Kernel driver phy-mvebu-a3700-comphy.c needs to know the rate of the +reference xtal clock. So add missing xtal clock source into comphy device +tree node. If the property is not present, the driver defaults to 25 MHz +xtal rate (which, as far as we know, is used by all the existing boards). + +Signed-off-by: Pali Rohár <pali@kernel.org> +Signed-off-by: Marek Behún <kabel@kernel.org> +--- + arch/arm64/boot/dts/marvell/armada-37xx.dtsi | 2 ++ + 1 file changed, 2 insertions(+) + +--- a/arch/arm64/boot/dts/marvell/armada-37xx.dtsi ++++ b/arch/arm64/boot/dts/marvell/armada-37xx.dtsi +@@ -265,6 +265,8 @@ + "lane2_sata_usb3"; + #address-cells = <1>; + #size-cells = <0>; ++ clocks = <&xtalclk>; ++ clock-names = "xtal"; + + comphy0: phy@0 { + reg = <0>; diff --git a/target/linux/generic/pending-5.15/851-0004-Revert-ata-ahci-mvebu-Make-SATA-PHY-optional-for-Arm.patch b/target/linux/generic/pending-5.15/851-0004-Revert-ata-ahci-mvebu-Make-SATA-PHY-optional-for-Arm.patch new file mode 100644 index 0000000000..3c994d2548 --- /dev/null +++ b/target/linux/generic/pending-5.15/851-0004-Revert-ata-ahci-mvebu-Make-SATA-PHY-optional-for-Arm.patch @@ -0,0 +1,61 @@ +From 750bb44dbbe9dfb4ba3e1f8a746b831b39ba3cd9 Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?Pali=20Roh=C3=A1r?= <pali@kernel.org> +Date: Thu, 23 Sep 2021 19:35:57 +0200 +Subject: [PATCH] Revert "ata: ahci: mvebu: Make SATA PHY optional for Armada + 3720" +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +This reverts commit 45aefe3d2251e4e229d7662052739f96ad1d08d9. + +Armada 3720 PHY driver (phy-mvebu-a3700-comphy.c) does not return +-EOPNOTSUPP from phy_power_on() callback anymore. + +So remove AHCI_HFLAG_IGN_NOTSUPP_POWER_ON flag from Armada 3720 plat data. + +AHCI_HFLAG_IGN_NOTSUPP_POWER_ON is not used by any other ahci driver, so +remove this flag completely. + +Signed-off-by: Pali Rohár <pali@kernel.org> +Signed-off-by: Marek Behún <kabel@kernel.org> +Acked-by: Miquel Raynal <miquel.raynal@bootlin.com> +--- + drivers/ata/ahci.h | 2 -- + drivers/ata/ahci_mvebu.c | 2 +- + drivers/ata/libahci_platform.c | 2 +- + 3 files changed, 2 insertions(+), 4 deletions(-) + +--- a/drivers/ata/ahci.h ++++ b/drivers/ata/ahci.h +@@ -240,8 +240,6 @@ enum { + as default lpm_policy */ + AHCI_HFLAG_SUSPEND_PHYS = (1 << 26), /* handle PHYs during + suspend/resume */ +- AHCI_HFLAG_IGN_NOTSUPP_POWER_ON = (1 << 27), /* ignore -EOPNOTSUPP +- from phy_power_on() */ + AHCI_HFLAG_NO_SXS = (1 << 28), /* SXS not supported */ + + /* ap->flags bits */ +--- a/drivers/ata/ahci_mvebu.c ++++ b/drivers/ata/ahci_mvebu.c +@@ -227,7 +227,7 @@ static const struct ahci_mvebu_plat_data + + static const struct ahci_mvebu_plat_data ahci_mvebu_armada_3700_plat_data = { + .plat_config = ahci_mvebu_armada_3700_config, +- .flags = AHCI_HFLAG_SUSPEND_PHYS | AHCI_HFLAG_IGN_NOTSUPP_POWER_ON, ++ .flags = AHCI_HFLAG_SUSPEND_PHYS, + }; + + static const struct of_device_id ahci_mvebu_of_match[] = { +--- a/drivers/ata/libahci_platform.c ++++ b/drivers/ata/libahci_platform.c +@@ -59,7 +59,7 @@ int ahci_platform_enable_phys(struct ahc + } + + rc = phy_power_on(hpriv->phys[i]); +- if (rc && !(rc == -EOPNOTSUPP && (hpriv->flags & AHCI_HFLAG_IGN_NOTSUPP_POWER_ON))) { ++ if (rc) { + phy_exit(hpriv->phys[i]); + goto disable_phys; + } diff --git a/target/linux/generic/pending-5.15/851-0005-Revert-usb-host-xhci-mvebu-make-USB-3.0-PHY-optional.patch b/target/linux/generic/pending-5.15/851-0005-Revert-usb-host-xhci-mvebu-make-USB-3.0-PHY-optional.patch new file mode 100644 index 0000000000..b8a3e880ce --- /dev/null +++ b/target/linux/generic/pending-5.15/851-0005-Revert-usb-host-xhci-mvebu-make-USB-3.0-PHY-optional.patch @@ -0,0 +1,163 @@ +From 9f0dfb279b1dd505d5e10b10e4a78a62030978d8 Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?Pali=20Roh=C3=A1r?= <pali@kernel.org> +Date: Thu, 23 Sep 2021 19:40:06 +0200 +Subject: [PATCH] Revert "usb: host: xhci: mvebu: make USB 3.0 PHY optional for + Armada 3720" +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +This reverts commit 3241929b67d28c83945d3191c6816a3271fd6b85. + +Armada 3720 phy driver (phy-mvebu-a3700-comphy.c) does not return +-EOPNOTSUPP from phy_power_on() callback anymore. + +So remove XHCI_SKIP_PHY_INIT flag from xhci_mvebu_a3700_plat_setup() and +then also whole xhci_mvebu_a3700_plat_setup() function which is there just +to handle -EOPNOTSUPP for XHCI_SKIP_PHY_INIT. + +xhci plat_setup callback is not used by any other xhci plat driver, so +remove this callback completely. + +Signed-off-by: Pali Rohár <pali@kernel.org> +Signed-off-by: Marek Behún <kabel@kernel.org> +Acked-by: Miquel Raynal <miquel.raynal@bootlin.com> +--- + drivers/usb/host/xhci-mvebu.c | 42 ----------------------------------- + drivers/usb/host/xhci-mvebu.h | 6 ----- + drivers/usb/host/xhci-plat.c | 20 +---------------- + drivers/usb/host/xhci-plat.h | 1 - + 4 files changed, 1 insertion(+), 68 deletions(-) + +--- a/drivers/usb/host/xhci-mvebu.c ++++ b/drivers/usb/host/xhci-mvebu.c +@@ -8,7 +8,6 @@ + #include <linux/mbus.h> + #include <linux/of.h> + #include <linux/platform_device.h> +-#include <linux/phy/phy.h> + + #include <linux/usb.h> + #include <linux/usb/hcd.h> +@@ -74,47 +73,6 @@ int xhci_mvebu_mbus_init_quirk(struct us + + return 0; + } +- +-int xhci_mvebu_a3700_plat_setup(struct usb_hcd *hcd) +-{ +- struct xhci_hcd *xhci = hcd_to_xhci(hcd); +- struct device *dev = hcd->self.controller; +- struct phy *phy; +- int ret; +- +- /* Old bindings miss the PHY handle */ +- phy = of_phy_get(dev->of_node, "usb3-phy"); +- if (IS_ERR(phy) && PTR_ERR(phy) == -EPROBE_DEFER) +- return -EPROBE_DEFER; +- else if (IS_ERR(phy)) +- goto phy_out; +- +- ret = phy_init(phy); +- if (ret) +- goto phy_put; +- +- ret = phy_set_mode(phy, PHY_MODE_USB_HOST_SS); +- if (ret) +- goto phy_exit; +- +- ret = phy_power_on(phy); +- if (ret == -EOPNOTSUPP) { +- /* Skip initializatin of XHCI PHY when it is unsupported by firmware */ +- dev_warn(dev, "PHY unsupported by firmware\n"); +- xhci->quirks |= XHCI_SKIP_PHY_INIT; +- } +- if (ret) +- goto phy_exit; +- +- phy_power_off(phy); +-phy_exit: +- phy_exit(phy); +-phy_put: +- of_phy_put(phy); +-phy_out: +- +- return 0; +-} + + int xhci_mvebu_a3700_init_quirk(struct usb_hcd *hcd) + { +--- a/drivers/usb/host/xhci-mvebu.h ++++ b/drivers/usb/host/xhci-mvebu.h +@@ -12,18 +12,12 @@ struct usb_hcd; + + #if IS_ENABLED(CONFIG_USB_XHCI_MVEBU) + int xhci_mvebu_mbus_init_quirk(struct usb_hcd *hcd); +-int xhci_mvebu_a3700_plat_setup(struct usb_hcd *hcd); + int xhci_mvebu_a3700_init_quirk(struct usb_hcd *hcd); + #else + static inline int xhci_mvebu_mbus_init_quirk(struct usb_hcd *hcd) + { + return 0; + } +- +-static inline int xhci_mvebu_a3700_plat_setup(struct usb_hcd *hcd) +-{ +- return 0; +-} + + static inline int xhci_mvebu_a3700_init_quirk(struct usb_hcd *hcd) + { +--- a/drivers/usb/host/xhci-plat.c ++++ b/drivers/usb/host/xhci-plat.c +@@ -44,16 +44,6 @@ static void xhci_priv_plat_start(struct + priv->plat_start(hcd); + } + +-static int xhci_priv_plat_setup(struct usb_hcd *hcd) +-{ +- struct xhci_plat_priv *priv = hcd_to_xhci_priv(hcd); +- +- if (!priv->plat_setup) +- return 0; +- +- return priv->plat_setup(hcd); +-} +- + static int xhci_priv_init_quirk(struct usb_hcd *hcd) + { + struct xhci_plat_priv *priv = hcd_to_xhci_priv(hcd); +@@ -121,7 +111,6 @@ static const struct xhci_plat_priv xhci_ + }; + + static const struct xhci_plat_priv xhci_plat_marvell_armada3700 = { +- .plat_setup = xhci_mvebu_a3700_plat_setup, + .init_quirk = xhci_mvebu_a3700_init_quirk, + }; + +@@ -341,14 +330,7 @@ static int xhci_plat_probe(struct platfo + + hcd->tpl_support = of_usb_host_tpl_support(sysdev->of_node); + xhci->shared_hcd->tpl_support = hcd->tpl_support; +- +- if (priv) { +- ret = xhci_priv_plat_setup(hcd); +- if (ret) +- goto disable_usb_phy; +- } +- +- if ((xhci->quirks & XHCI_SKIP_PHY_INIT) || (priv && (priv->quirks & XHCI_SKIP_PHY_INIT))) ++ if (priv && (priv->quirks & XHCI_SKIP_PHY_INIT)) + hcd->skip_phy_initialization = 1; + + if (priv && (priv->quirks & XHCI_SG_TRB_CACHE_SIZE_QUIRK)) +--- a/drivers/usb/host/xhci-plat.h ++++ b/drivers/usb/host/xhci-plat.h +@@ -13,7 +13,6 @@ + struct xhci_plat_priv { + const char *firmware_name; + unsigned long long quirks; +- int (*plat_setup)(struct usb_hcd *); + void (*plat_start)(struct usb_hcd *); + int (*init_quirk)(struct usb_hcd *); + int (*suspend_quirk)(struct usb_hcd *); diff --git a/target/linux/generic/pending-5.15/851-0006-Revert-PCI-aardvark-Fix-initialization-with-old-Marv.patch b/target/linux/generic/pending-5.15/851-0006-Revert-PCI-aardvark-Fix-initialization-with-old-Marv.patch new file mode 100644 index 0000000000..857ee66a31 --- /dev/null +++ b/target/linux/generic/pending-5.15/851-0006-Revert-PCI-aardvark-Fix-initialization-with-old-Marv.patch @@ -0,0 +1,36 @@ +From 9a352062b7e3857742389dff6f64393481dc755e Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?Pali=20Roh=C3=A1r?= <pali@kernel.org> +Date: Thu, 23 Sep 2021 19:37:05 +0200 +Subject: [PATCH] Revert "PCI: aardvark: Fix initialization with old Marvell's + Arm Trusted Firmware" +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +This reverts commit b0c6ae0f8948a2be6bf4e8b4bbab9ca1343289b6. + +Armada 3720 phy driver (phy-mvebu-a3700-comphy.c) does not return +-EOPNOTSUPP from phy_power_on() callback anymore. + +So remove dead code which handles -EOPNOTSUPP return value. + +Signed-off-by: Pali Rohár <pali@kernel.org> +Signed-off-by: Marek Behún <kabel@kernel.org> +Acked-by: Miquel Raynal <miquel.raynal@bootlin.com> +--- + drivers/pci/controller/pci-aardvark.c | 4 +--- + 1 file changed, 1 insertion(+), 3 deletions(-) + +--- a/drivers/pci/controller/pci-aardvark.c ++++ b/drivers/pci/controller/pci-aardvark.c +@@ -1637,9 +1637,7 @@ static int advk_pcie_enable_phy(struct a + } + + ret = phy_power_on(pcie->phy); +- if (ret == -EOPNOTSUPP) { +- dev_warn(&pcie->pdev->dev, "PHY unsupported by firmware\n"); +- } else if (ret) { ++ if (ret) { + phy_exit(pcie->phy); + return ret; + } diff --git a/target/linux/generic/pending-5.15/920-mangle_bootargs.patch b/target/linux/generic/pending-5.15/920-mangle_bootargs.patch new file mode 100644 index 0000000000..fc64a4205e --- /dev/null +++ b/target/linux/generic/pending-5.15/920-mangle_bootargs.patch @@ -0,0 +1,71 @@ +From: Imre Kaloz <kaloz@openwrt.org> +Subject: init: add CONFIG_MANGLE_BOOTARGS and disable it by default + +Enabling this option renames the bootloader supplied root= +and rootfstype= variables, which might have to be know but +would break the automatisms OpenWrt uses. + +Signed-off-by: Imre Kaloz <kaloz@openwrt.org> +--- + init/Kconfig | 9 +++++++++ + init/main.c | 24 ++++++++++++++++++++++++ + 2 files changed, 33 insertions(+) + +--- a/init/Kconfig ++++ b/init/Kconfig +@@ -1800,6 +1800,15 @@ config EMBEDDED + an embedded system so certain expert options are available + for configuration. + ++config MANGLE_BOOTARGS ++ bool "Rename offending bootargs" ++ depends on EXPERT ++ help ++ Sometimes the bootloader passed bogus root= and rootfstype= ++ parameters to the kernel, and while you want to ignore them, ++ you need to know the values f.e. to support dual firmware ++ layouts on the flash. ++ + config HAVE_PERF_EVENTS + bool + help +--- a/init/main.c ++++ b/init/main.c +@@ -608,6 +608,29 @@ static inline void setup_nr_cpu_ids(void + static inline void smp_prepare_cpus(unsigned int maxcpus) { } + #endif + ++#ifdef CONFIG_MANGLE_BOOTARGS ++static void __init mangle_bootargs(char *command_line) ++{ ++ char *rootdev; ++ char *rootfs; ++ ++ rootdev = strstr(command_line, "root=/dev/mtdblock"); ++ ++ if (rootdev) ++ strncpy(rootdev, "mangled_rootblock=", 18); ++ ++ rootfs = strstr(command_line, "rootfstype"); ++ ++ if (rootfs) ++ strncpy(rootfs, "mangled_fs", 10); ++ ++} ++#else ++static void __init mangle_bootargs(char *command_line) ++{ ++} ++#endif ++ + /* + * We need to store the untouched command line for future reference. + * We also need to store the touched command line since the parameter +@@ -869,6 +892,7 @@ asmlinkage __visible void __init __no_sa + pr_notice("%s", linux_banner); + early_security_init(); + setup_arch(&command_line); ++ mangle_bootargs(command_line); + setup_boot_config(command_line); + setup_command_line(command_line); + setup_nr_cpu_ids(); |