diff options
author | Ansuel Smith <ansuelsmth@gmail.com> | 2021-11-04 21:52:43 +0100 |
---|---|---|
committer | Daniel Golle <daniel@makrotopia.org> | 2022-03-27 00:07:34 +0000 |
commit | 9a038e7fd12eae3695875232962f96af8252f3ba (patch) | |
tree | 3ac43d08d68ead1856e271791f5f5ba75f555e95 /target/linux/generic/backport-5.15 | |
parent | 13960fb0e0babcd99530fcb234073af0c0a5e2f5 (diff) | |
download | upstream-9a038e7fd12eae3695875232962f96af8252f3ba.tar.gz upstream-9a038e7fd12eae3695875232962f96af8252f3ba.tar.bz2 upstream-9a038e7fd12eae3695875232962f96af8252f3ba.zip |
generic: 5.15: copy config and patch from 5.10
Copy config and patches from kernel 5.10 to kernel 5.15
Signed-off-by: Ansuel Smith <ansuelsmth@gmail.com>
Diffstat (limited to 'target/linux/generic/backport-5.15')
258 files changed, 37341 insertions, 0 deletions
diff --git a/target/linux/generic/backport-5.15/010-Kbuild-don-t-hardcode-path-to-awk-in-scripts-ld-vers.patch b/target/linux/generic/backport-5.15/010-Kbuild-don-t-hardcode-path-to-awk-in-scripts-ld-vers.patch new file mode 100644 index 0000000000..7ac4f9d240 --- /dev/null +++ b/target/linux/generic/backport-5.15/010-Kbuild-don-t-hardcode-path-to-awk-in-scripts-ld-vers.patch @@ -0,0 +1,30 @@ +From 13b1ecc3401653a355798eb1dee10cc1608202f4 Mon Sep 17 00:00:00 2001 +From: Felix Fietkau <nbd@nbd.name> +Date: Mon, 18 Jan 2016 12:27:49 +0100 +Subject: [PATCH 33/34] Kbuild: don't hardcode path to awk in + scripts/ld-version.sh + +On some systems /usr/bin/awk does not exist, or is broken. Find it via +$PATH instead. + +Signed-off-by: Felix Fietkau <nbd@nbd.name> +--- + scripts/ld-version.sh | 4 +++- + 1 file changed, 3 insertions(+), 1 deletion(-) + +--- a/scripts/ld-version.sh ++++ b/scripts/ld-version.sh +@@ -1,6 +1,7 @@ +-#!/usr/bin/awk -f ++#!/bin/sh + # SPDX-License-Identifier: GPL-2.0 + # extract linker version number from stdin and turn into single number ++exec awk ' + { + gsub(".*\\)", ""); + gsub(".*version ", ""); +@@ -9,3 +10,4 @@ + print a[1]*100000000 + a[2]*1000000 + a[3]*10000; + exit + } ++' diff --git a/target/linux/generic/backport-5.15/011-kbuild-export-SUBARCH.patch b/target/linux/generic/backport-5.15/011-kbuild-export-SUBARCH.patch new file mode 100644 index 0000000000..bb99e4ddbf --- /dev/null +++ b/target/linux/generic/backport-5.15/011-kbuild-export-SUBARCH.patch @@ -0,0 +1,21 @@ +From 173019b66dcc9d68ad9333aa744dad1e369b5aa8 Mon Sep 17 00:00:00 2001 +From: Felix Fietkau <nbd@nbd.name> +Date: Sun, 9 Jul 2017 00:26:53 +0200 +Subject: [PATCH 34/34] kernel: add compile fix for linux 4.9 on x86 + +Signed-off-by: Felix Fietkau <nbd@nbd.name> +--- + Makefile | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +--- a/Makefile ++++ b/Makefile +@@ -507,7 +507,7 @@ KBUILD_LDFLAGS_MODULE := + KBUILD_LDFLAGS := + CLANG_FLAGS := + +-export ARCH SRCARCH CONFIG_SHELL BASH HOSTCC KBUILD_HOSTCFLAGS CROSS_COMPILE LD CC ++export ARCH SRCARCH SUBARCH CONFIG_SHELL BASH HOSTCC KBUILD_HOSTCFLAGS CROSS_COMPILE LD CC + export CPP AR NM STRIP OBJCOPY OBJDUMP READELF PAHOLE RESOLVE_BTFIDS LEX YACC AWK INSTALLKERNEL + export PERL PYTHON PYTHON3 CHECK CHECKFLAGS MAKE UTS_MACHINE HOSTCXX + export KGZIP KBZIP2 KLZOP LZMA LZ4 XZ ZSTD diff --git a/target/linux/generic/backport-5.15/026-power-reset-linkstation-poweroff-add-missing-put_dev.patch b/target/linux/generic/backport-5.15/026-power-reset-linkstation-poweroff-add-missing-put_dev.patch new file mode 100644 index 0000000000..66e75bf514 --- /dev/null +++ b/target/linux/generic/backport-5.15/026-power-reset-linkstation-poweroff-add-missing-put_dev.patch @@ -0,0 +1,27 @@ +From 1027a42c25cbf8cfc4ade6503c5110aae04866af Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?Daniel=20Gonz=C3=A1lez=20Cabanelas?= <dgcbueu@gmail.com> +Date: Fri, 16 Oct 2020 20:22:37 +0200 +Subject: [PATCH] power: reset: linkstation-poweroff: add missing put_device() +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +The of_mdio_find_bus() takes a reference to the underlying device +structure, we should release that reference using a put_device() call. + +Signed-off-by: Daniel González Cabanelas <dgcbueu@gmail.com> +Signed-off-by: Sebastian Reichel <sre@kernel.org> +--- + drivers/power/reset/linkstation-poweroff.c | 1 + + 1 file changed, 1 insertion(+) + +--- a/drivers/power/reset/linkstation-poweroff.c ++++ b/drivers/power/reset/linkstation-poweroff.c +@@ -113,6 +113,7 @@ static int __init linkstation_poweroff_i + return -EPROBE_DEFER; + + phydev = phy_find_first(bus); ++ put_device(&bus->dev); + if (!phydev) + return -EPROBE_DEFER; + diff --git a/target/linux/generic/backport-5.15/050-v5.16-00-MIPS-uasm-Enable-muhu-opcode-for-MIPS-R6.patch b/target/linux/generic/backport-5.15/050-v5.16-00-MIPS-uasm-Enable-muhu-opcode-for-MIPS-R6.patch new file mode 100644 index 0000000000..82feb7421d --- /dev/null +++ b/target/linux/generic/backport-5.15/050-v5.16-00-MIPS-uasm-Enable-muhu-opcode-for-MIPS-R6.patch @@ -0,0 +1,65 @@ +From: Johan Almbladh <johan.almbladh@anyfinetworks.com> +Date: Tue, 5 Oct 2021 18:54:02 +0200 +Subject: [PATCH] MIPS: uasm: Enable muhu opcode for MIPS R6 + +Enable the 'muhu' instruction, complementing the existing 'mulu', needed +to implement a MIPS32 BPF JIT. + +Also fix a typo in the existing definition of 'dmulu'. + +Signed-off-by: Tony Ambardar <Tony.Ambardar@gmail.com> + +This patch is a dependency for my 32-bit MIPS eBPF JIT. + +Signed-off-by: Johan Almbladh <johan.almbladh@anyfinetworks.com> +--- + +--- a/arch/mips/include/asm/uasm.h ++++ b/arch/mips/include/asm/uasm.h +@@ -145,6 +145,7 @@ Ip_u1(_mtlo); + Ip_u3u1u2(_mul); + Ip_u1u2(_multu); + Ip_u3u1u2(_mulu); ++Ip_u3u1u2(_muhu); + Ip_u3u1u2(_nor); + Ip_u3u1u2(_or); + Ip_u2u1u3(_ori); +--- a/arch/mips/mm/uasm-mips.c ++++ b/arch/mips/mm/uasm-mips.c +@@ -90,7 +90,7 @@ static const struct insn insn_table[insn + RS | RT | RD}, + [insn_dmtc0] = {M(cop0_op, dmtc_op, 0, 0, 0, 0), RT | RD | SET}, + [insn_dmultu] = {M(spec_op, 0, 0, 0, 0, dmultu_op), RS | RT}, +- [insn_dmulu] = {M(spec_op, 0, 0, 0, dmult_dmul_op, dmultu_op), ++ [insn_dmulu] = {M(spec_op, 0, 0, 0, dmultu_dmulu_op, dmultu_op), + RS | RT | RD}, + [insn_drotr] = {M(spec_op, 1, 0, 0, 0, dsrl_op), RT | RD | RE}, + [insn_drotr32] = {M(spec_op, 1, 0, 0, 0, dsrl32_op), RT | RD | RE}, +@@ -150,6 +150,8 @@ static const struct insn insn_table[insn + [insn_mtlo] = {M(spec_op, 0, 0, 0, 0, mtlo_op), RS}, + [insn_mulu] = {M(spec_op, 0, 0, 0, multu_mulu_op, multu_op), + RS | RT | RD}, ++ [insn_muhu] = {M(spec_op, 0, 0, 0, multu_muhu_op, multu_op), ++ RS | RT | RD}, + #ifndef CONFIG_CPU_MIPSR6 + [insn_mul] = {M(spec2_op, 0, 0, 0, 0, mul_op), RS | RT | RD}, + #else +--- a/arch/mips/mm/uasm.c ++++ b/arch/mips/mm/uasm.c +@@ -59,7 +59,7 @@ enum opcode { + insn_lddir, insn_ldpte, insn_ldx, insn_lh, insn_lhu, insn_ll, insn_lld, + insn_lui, insn_lw, insn_lwu, insn_lwx, insn_mfc0, insn_mfhc0, insn_mfhi, + insn_mflo, insn_modu, insn_movn, insn_movz, insn_mtc0, insn_mthc0, +- insn_mthi, insn_mtlo, insn_mul, insn_multu, insn_mulu, insn_nor, ++ insn_mthi, insn_mtlo, insn_mul, insn_multu, insn_mulu, insn_muhu, insn_nor, + insn_or, insn_ori, insn_pref, insn_rfe, insn_rotr, insn_sb, insn_sc, + insn_scd, insn_seleqz, insn_selnez, insn_sd, insn_sh, insn_sll, + insn_sllv, insn_slt, insn_slti, insn_sltiu, insn_sltu, insn_sra, +@@ -344,6 +344,7 @@ I_u1(_mtlo) + I_u3u1u2(_mul) + I_u1u2(_multu) + I_u3u1u2(_mulu) ++I_u3u1u2(_muhu) + I_u3u1u2(_nor) + I_u3u1u2(_or) + I_u2u1u3(_ori) diff --git a/target/linux/generic/backport-5.15/050-v5.16-01-mips-uasm-Add-workaround-for-Loongson-2F-nop-CPU-err.patch b/target/linux/generic/backport-5.15/050-v5.16-01-mips-uasm-Add-workaround-for-Loongson-2F-nop-CPU-err.patch new file mode 100644 index 0000000000..3a4d573f80 --- /dev/null +++ b/target/linux/generic/backport-5.15/050-v5.16-01-mips-uasm-Add-workaround-for-Loongson-2F-nop-CPU-err.patch @@ -0,0 +1,31 @@ +From: Johan Almbladh <johan.almbladh@anyfinetworks.com> +Date: Tue, 5 Oct 2021 18:54:03 +0200 +Subject: [PATCH] mips: uasm: Add workaround for Loongson-2F nop CPU errata + +This patch implements a workaround for the Loongson-2F nop in generated, +code, if the existing option CONFIG_CPU_NOP_WORKAROUND is set. Before, +the binutils option -mfix-loongson2f-nop was enabled, but no workaround +was done when emitting MIPS code. Now, the nop pseudo instruction is +emitted as "or ax,ax,zero" instead of the default "sll zero,zero,0". This +is consistent with the workaround implemented by binutils. + +Link: https://sourceware.org/legacy-ml/binutils/2009-11/msg00387.html + +Signed-off-by: Johan Almbladh <johan.almbladh@anyfinetworks.com> +Reviewed-by: Jiaxun Yang <jiaxun.yang@flygoat.com> +--- + +--- a/arch/mips/include/asm/uasm.h ++++ b/arch/mips/include/asm/uasm.h +@@ -249,7 +249,11 @@ static inline void uasm_l##lb(struct uas + #define uasm_i_bnezl(buf, rs, off) uasm_i_bnel(buf, rs, 0, off) + #define uasm_i_ehb(buf) uasm_i_sll(buf, 0, 0, 3) + #define uasm_i_move(buf, a, b) UASM_i_ADDU(buf, a, 0, b) ++#ifdef CONFIG_CPU_NOP_WORKAROUNDS ++#define uasm_i_nop(buf) uasm_i_or(buf, 1, 1, 0) ++#else + #define uasm_i_nop(buf) uasm_i_sll(buf, 0, 0, 0) ++#endif + #define uasm_i_ssnop(buf) uasm_i_sll(buf, 0, 0, 1) + + static inline void uasm_i_drotr_safe(u32 **p, unsigned int a1, diff --git a/target/linux/generic/backport-5.15/050-v5.16-02-mips-bpf-Add-eBPF-JIT-for-32-bit-MIPS.patch b/target/linux/generic/backport-5.15/050-v5.16-02-mips-bpf-Add-eBPF-JIT-for-32-bit-MIPS.patch new file mode 100644 index 0000000000..7980659961 --- /dev/null +++ b/target/linux/generic/backport-5.15/050-v5.16-02-mips-bpf-Add-eBPF-JIT-for-32-bit-MIPS.patch @@ -0,0 +1,3078 @@ +From: Johan Almbladh <johan.almbladh@anyfinetworks.com> +Date: Tue, 5 Oct 2021 18:54:04 +0200 +Subject: [PATCH] mips: bpf: Add eBPF JIT for 32-bit MIPS + +This is an implementation of an eBPF JIT for 32-bit MIPS I-V and MIPS32. +The implementation supports all 32-bit and 64-bit ALU and JMP operations, +including the recently-added atomics. 64-bit div/mod and 64-bit atomics +are implemented using function calls to math64 and atomic64 functions, +respectively. All 32-bit operations are implemented natively by the JIT, +except if the CPU lacks ll/sc instructions. + +Register mapping +================ +All 64-bit eBPF registers are mapped to native 32-bit MIPS register pairs, +and does not use any stack scratch space for register swapping. This means +that all eBPF register data is kept in CPU registers all the time, and +this simplifies the register management a lot. It also reduces the JIT's +pressure on temporary registers since we do not have to move data around. + +Native register pairs are ordered according to CPU endiannes, following +the O32 calling convention for passing 64-bit arguments and return values. +The eBPF return value, arguments and callee-saved registers are mapped to +their native MIPS equivalents. + +Since the 32 highest bits in the eBPF FP (frame pointer) register are +always zero, only one general-purpose register is actually needed for the +mapping. The MIPS fp register is used for this purpose. The high bits are +mapped to MIPS register r0. This saves us one CPU register, which is much +needed for temporaries, while still allowing us to treat the R10 (FP) +register just like any other eBPF register in the JIT. + +The MIPS gp (global pointer) and at (assembler temporary) registers are +used as internal temporary registers for constant blinding. CPU registers +t6-t9 are used internally by the JIT when constructing more complex 64-bit +operations. This is precisely what is needed - two registers to store an +operand value, and two more as scratch registers when performing the +operation. + +The register mapping is shown below. + + R0 - $v1, $v0 return value + R1 - $a1, $a0 argument 1, passed in registers + R2 - $a3, $a2 argument 2, passed in registers + R3 - $t1, $t0 argument 3, passed on stack + R4 - $t3, $t2 argument 4, passed on stack + R5 - $t4, $t3 argument 5, passed on stack + R6 - $s1, $s0 callee-saved + R7 - $s3, $s2 callee-saved + R8 - $s5, $s4 callee-saved + R9 - $s7, $s6 callee-saved + FP - $r0, $fp 32-bit frame pointer + AX - $gp, $at constant-blinding + $t6 - $t9 unallocated, JIT temporaries + +Jump offsets +============ +The JIT tries to map all conditional JMP operations to MIPS conditional +PC-relative branches. The MIPS branch offset field is 18 bits, in bytes, +which is equivalent to the eBPF 16-bit instruction offset. However, since +the JIT may emit more than one CPU instruction per eBPF instruction, the +field width may overflow. If that happens, the JIT converts the long +conditional jump to a short PC-relative branch with the condition +inverted, jumping over a long unconditional absolute jmp (j). + +This conversion will change the instruction offset mapping used for jumps, +and may in turn result in more branch offset overflows. The JIT therefore +dry-runs the translation until no more branches are converted and the +offsets do not change anymore. There is an upper bound on this of course, +and if the JIT hits that limit, the last two iterations are run with all +branches being converted. + +Tail call count +=============== +The current tail call count is stored in the 16-byte area of the caller's +stack frame that is reserved for the callee in the o32 ABI. The value is +initialized in the prologue, and propagated to the tail-callee by skipping +the initialization instructions when emitting the tail call. + +Signed-off-by: Johan Almbladh <johan.almbladh@anyfinetworks.com> +--- + create mode 100644 arch/mips/net/bpf_jit_comp.c + create mode 100644 arch/mips/net/bpf_jit_comp.h + create mode 100644 arch/mips/net/bpf_jit_comp32.c + +--- a/arch/mips/net/Makefile ++++ b/arch/mips/net/Makefile +@@ -2,4 +2,9 @@ + # MIPS networking code + + obj-$(CONFIG_MIPS_CBPF_JIT) += bpf_jit.o bpf_jit_asm.o +-obj-$(CONFIG_MIPS_EBPF_JIT) += ebpf_jit.o ++ ++ifeq ($(CONFIG_32BIT),y) ++ obj-$(CONFIG_MIPS_EBPF_JIT) += bpf_jit_comp.o bpf_jit_comp32.o ++else ++ obj-$(CONFIG_MIPS_EBPF_JIT) += ebpf_jit.o ++endif +--- /dev/null ++++ b/arch/mips/net/bpf_jit_comp.c +@@ -0,0 +1,1020 @@ ++// SPDX-License-Identifier: GPL-2.0-only ++/* ++ * Just-In-Time compiler for eBPF bytecode on MIPS. ++ * Implementation of JIT functions common to 32-bit and 64-bit CPUs. ++ * ++ * Copyright (c) 2021 Anyfi Networks AB. ++ * Author: Johan Almbladh <johan.almbladh@gmail.com> ++ * ++ * Based on code and ideas from ++ * Copyright (c) 2017 Cavium, Inc. ++ * Copyright (c) 2017 Shubham Bansal <illusionist.neo@gmail.com> ++ * Copyright (c) 2011 Mircea Gherzan <mgherzan@gmail.com> ++ */ ++ ++/* ++ * Code overview ++ * ============= ++ * ++ * - bpf_jit_comp.h ++ * Common definitions and utilities. ++ * ++ * - bpf_jit_comp.c ++ * Implementation of JIT top-level logic and exported JIT API functions. ++ * Implementation of internal operations shared by 32-bit and 64-bit code. ++ * JMP and ALU JIT control code, register control code, shared ALU and ++ * JMP/JMP32 JIT operations. ++ * ++ * - bpf_jit_comp32.c ++ * Implementation of functions to JIT prologue, epilogue and a single eBPF ++ * instruction for 32-bit MIPS CPUs. The functions use shared operations ++ * where possible, and implement the rest for 32-bit MIPS such as ALU64 ++ * operations. ++ * ++ * - bpf_jit_comp64.c ++ * Ditto, for 64-bit MIPS CPUs. ++ * ++ * Zero and sign extension ++ * ======================== ++ * 32-bit MIPS instructions on 64-bit MIPS registers use sign extension, ++ * but the eBPF instruction set mandates zero extension. We let the verifier ++ * insert explicit zero-extensions after 32-bit ALU operations, both for ++ * 32-bit and 64-bit MIPS JITs. Conditional JMP32 operations on 64-bit MIPs ++ * are JITed with sign extensions inserted when so expected. ++ * ++ * ALU operations ++ * ============== ++ * ALU operations on 32/64-bit MIPS and ALU64 operations on 64-bit MIPS are ++ * JITed in the following steps. ALU64 operations on 32-bit MIPS are more ++ * complicated and therefore only processed by special implementations in ++ * step (3). ++ * ++ * 1) valid_alu_i: ++ * Determine if an immediate operation can be emitted as such, or if ++ * we must fall back to the register version. ++ * ++ * 2) rewrite_alu_i: ++ * Convert BPF operation and immediate value to a canonical form for ++ * JITing. In some degenerate cases this form may be a no-op. ++ * ++ * 3) emit_alu_{i,i64,r,64}: ++ * Emit instructions for an ALU or ALU64 immediate or register operation. ++ * ++ * JMP operations ++ * ============== ++ * JMP and JMP32 operations require an JIT instruction offset table for ++ * translating the jump offset. This table is computed by dry-running the ++ * JIT without actually emitting anything. However, the computed PC-relative ++ * offset may overflow the 18-bit offset field width of the native MIPS ++ * branch instruction. In such cases, the long jump is converted into the ++ * following sequence. ++ * ++ * <branch> !<cond> +2 Inverted PC-relative branch ++ * nop Delay slot ++ * j <offset> Unconditional absolute long jump ++ * nop Delay slot ++ * ++ * Since this converted sequence alters the offset table, all offsets must ++ * be re-calculated. This may in turn trigger new branch conversions, so ++ * the process is repeated until no further changes are made. Normally it ++ * completes in 1-2 iterations. If JIT_MAX_ITERATIONS should reached, we ++ * fall back to converting every remaining jump operation. The branch ++ * conversion is independent of how the JMP or JMP32 condition is JITed. ++ * ++ * JMP32 and JMP operations are JITed as follows. ++ * ++ * 1) setup_jmp_{i,r}: ++ * Convert jump conditional and offset into a form that can be JITed. ++ * This form may be a no-op, a canonical form, or an inverted PC-relative ++ * jump if branch conversion is necessary. ++ * ++ * 2) valid_jmp_i: ++ * Determine if an immediate operations can be emitted as such, or if ++ * we must fall back to the register version. Applies to JMP32 for 32-bit ++ * MIPS, and both JMP and JMP32 for 64-bit MIPS. ++ * ++ * 3) emit_jmp_{i,i64,r,r64}: ++ * Emit instructions for an JMP or JMP32 immediate or register operation. ++ * ++ * 4) finish_jmp_{i,r}: ++ * Emit any instructions needed to finish the jump. This includes a nop ++ * for the delay slot if a branch was emitted, and a long absolute jump ++ * if the branch was converted. ++ */ ++ ++#include <linux/limits.h> ++#include <linux/bitops.h> ++#include <linux/errno.h> ++#include <linux/filter.h> ++#include <linux/bpf.h> ++#include <linux/slab.h> ++#include <asm/bitops.h> ++#include <asm/cacheflush.h> ++#include <asm/cpu-features.h> ++#include <asm/isa-rev.h> ++#include <asm/uasm.h> ++ ++#include "bpf_jit_comp.h" ++ ++/* Convenience macros for descriptor access */ ++#define CONVERTED(desc) ((desc) & JIT_DESC_CONVERT) ++#define INDEX(desc) ((desc) & ~JIT_DESC_CONVERT) ++ ++/* ++ * Push registers on the stack, starting at a given depth from the stack ++ * pointer and increasing. The next depth to be written is returned. ++ */ ++int push_regs(struct jit_context *ctx, u32 mask, u32 excl, int depth) ++{ ++ int reg; ++ ++ for (reg = 0; reg < BITS_PER_BYTE * sizeof(mask); reg++) ++ if (mask & BIT(reg)) { ++ if ((excl & BIT(reg)) == 0) { ++ if (sizeof(long) == 4) ++ emit(ctx, sw, reg, depth, MIPS_R_SP); ++ else /* sizeof(long) == 8 */ ++ emit(ctx, sd, reg, depth, MIPS_R_SP); ++ } ++ depth += sizeof(long); ++ } ++ ++ ctx->stack_used = max((int)ctx->stack_used, depth); ++ return depth; ++} ++ ++/* ++ * Pop registers from the stack, starting at a given depth from the stack ++ * pointer and increasing. The next depth to be read is returned. ++ */ ++int pop_regs(struct jit_context *ctx, u32 mask, u32 excl, int depth) ++{ ++ int reg; ++ ++ for (reg = 0; reg < BITS_PER_BYTE * sizeof(mask); reg++) ++ if (mask & BIT(reg)) { ++ if ((excl & BIT(reg)) == 0) { ++ if (sizeof(long) == 4) ++ emit(ctx, lw, reg, depth, MIPS_R_SP); ++ else /* sizeof(long) == 8 */ ++ emit(ctx, ld, reg, depth, MIPS_R_SP); ++ } ++ depth += sizeof(long); ++ } ++ ++ return depth; ++} ++ ++/* Compute the 28-bit jump target address from a BPF program location */ ++int get_target(struct jit_context *ctx, u32 loc) ++{ ++ u32 index = INDEX(ctx->descriptors[loc]); ++ unsigned long pc = (unsigned long)&ctx->target[ctx->jit_index]; ++ unsigned long addr = (unsigned long)&ctx->target[index]; ++ ++ if (!ctx->target) ++ return 0; ++ ++ if ((addr ^ pc) & ~MIPS_JMP_MASK) ++ return -1; ++ ++ return addr & MIPS_JMP_MASK; ++} ++ ++/* Compute the PC-relative offset to relative BPF program offset */ ++int get_offset(const struct jit_context *ctx, int off) ++{ ++ return (INDEX(ctx->descriptors[ctx->bpf_index + off]) - ++ ctx->jit_index - 1) * sizeof(u32); ++} ++ ++/* dst = imm (register width) */ ++void emit_mov_i(struct jit_context *ctx, u8 dst, s32 imm) ++{ ++ if (imm >= -0x8000 && imm <= 0x7fff) { ++ emit(ctx, addiu, dst, MIPS_R_ZERO, imm); ++ } else { ++ emit(ctx, lui, dst, (s16)((u32)imm >> 16)); ++ emit(ctx, ori, dst, dst, (u16)(imm & 0xffff)); ++ } ++ clobber_reg(ctx, dst); ++} ++ ++/* dst = src (register width) */ ++void emit_mov_r(struct jit_context *ctx, u8 dst, u8 src) ++{ ++ emit(ctx, ori, dst, src, 0); ++ clobber_reg(ctx, dst); ++} ++ ++/* Validate ALU immediate range */ ++bool valid_alu_i(u8 op, s32 imm) ++{ ++ switch (BPF_OP(op)) { ++ case BPF_NEG: ++ case BPF_LSH: ++ case BPF_RSH: ++ case BPF_ARSH: ++ /* All legal eBPF values are valid */ ++ return true; ++ case BPF_ADD: ++ /* imm must be 16 bits */ ++ return imm >= -0x8000 && imm <= 0x7fff; ++ case BPF_SUB: ++ /* -imm must be 16 bits */ ++ return imm >= -0x7fff && imm <= 0x8000; ++ case BPF_AND: ++ case BPF_OR: ++ case BPF_XOR: ++ /* imm must be 16 bits unsigned */ ++ return imm >= 0 && imm <= 0xffff; ++ case BPF_MUL: ++ /* imm must be zero or a positive power of two */ ++ return imm == 0 || (imm > 0 && is_power_of_2(imm)); ++ case BPF_DIV: ++ case BPF_MOD: ++ /* imm must be an 17-bit power of two */ ++ return (u32)imm <= 0x10000 && is_power_of_2((u32)imm); ++ } ++ return false; ++} ++ ++/* Rewrite ALU immediate operation */ ++bool rewrite_alu_i(u8 op, s32 imm, u8 *alu, s32 *val) ++{ ++ bool act = true; ++ ++ switch (BPF_OP(op)) { ++ case BPF_LSH: ++ case BPF_RSH: ++ case BPF_ARSH: ++ case BPF_ADD: ++ case BPF_SUB: ++ case BPF_OR: ++ case BPF_XOR: ++ /* imm == 0 is a no-op */ ++ act = imm != 0; ++ break; ++ case BPF_MUL: ++ if (imm == 1) { ++ /* dst * 1 is a no-op */ ++ act = false; ++ } else if (imm == 0) { ++ /* dst * 0 is dst & 0 */ ++ op = BPF_AND; ++ } else { ++ /* dst * (1 << n) is dst << n */ ++ op = BPF_LSH; ++ imm = ilog2(abs(imm)); ++ } ++ break; ++ case BPF_DIV: ++ if (imm == 1) { ++ /* dst / 1 is a no-op */ ++ act = false; ++ } else { ++ /* dst / (1 << n) is dst >> n */ ++ op = BPF_RSH; ++ imm = ilog2(imm); ++ } ++ break; ++ case BPF_MOD: ++ /* dst % (1 << n) is dst & ((1 << n) - 1) */ ++ op = BPF_AND; ++ imm--; ++ break; ++ } ++ ++ *alu = op; ++ *val = imm; ++ return act; ++} ++ ++/* ALU immediate operation (32-bit) */ ++void emit_alu_i(struct jit_context *ctx, u8 dst, s32 imm, u8 op) ++{ ++ switch (BPF_OP(op)) { ++ /* dst = -dst */ ++ case BPF_NEG: ++ emit(ctx, subu, dst, MIPS_R_ZERO, dst); ++ break; ++ /* dst = dst & imm */ ++ case BPF_AND: ++ emit(ctx, andi, dst, dst, (u16)imm); ++ break; ++ /* dst = dst | imm */ ++ case BPF_OR: ++ emit(ctx, ori, dst, dst, (u16)imm); ++ break; ++ /* dst = dst ^ imm */ ++ case BPF_XOR: ++ emit(ctx, xori, dst, dst, (u16)imm); ++ break; ++ /* dst = dst << imm */ ++ case BPF_LSH: ++ emit(ctx, sll, dst, dst, imm); ++ break; ++ /* dst = dst >> imm */ ++ case BPF_RSH: ++ emit(ctx, srl, dst, dst, imm); ++ break; ++ /* dst = dst >> imm (arithmetic) */ ++ case BPF_ARSH: ++ emit(ctx, sra, dst, dst, imm); ++ break; ++ /* dst = dst + imm */ ++ case BPF_ADD: ++ emit(ctx, addiu, dst, dst, imm); ++ break; ++ /* dst = dst - imm */ ++ case BPF_SUB: ++ emit(ctx, addiu, dst, dst, -imm); ++ break; ++ } ++ clobber_reg(ctx, dst); ++} ++ ++/* ALU register operation (32-bit) */ ++void emit_alu_r(struct jit_context *ctx, u8 dst, u8 src, u8 op) ++{ ++ switch (BPF_OP(op)) { ++ /* dst = dst & src */ ++ case BPF_AND: ++ emit(ctx, and, dst, dst, src); ++ break; ++ /* dst = dst | src */ ++ case BPF_OR: ++ emit(ctx, or, dst, dst, src); ++ break; ++ /* dst = dst ^ src */ ++ case BPF_XOR: ++ emit(ctx, xor, dst, dst, src); ++ break; ++ /* dst = dst << src */ ++ case BPF_LSH: ++ emit(ctx, sllv, dst, dst, src); ++ break; ++ /* dst = dst >> src */ ++ case BPF_RSH: ++ emit(ctx, srlv, dst, dst, src); ++ break; ++ /* dst = dst >> src (arithmetic) */ ++ case BPF_ARSH: ++ emit(ctx, srav, dst, dst, src); ++ break; ++ /* dst = dst + src */ ++ case BPF_ADD: ++ emit(ctx, addu, dst, dst, src); ++ break; ++ /* dst = dst - src */ ++ case BPF_SUB: ++ emit(ctx, subu, dst, dst, src); ++ break; ++ /* dst = dst * src */ ++ case BPF_MUL: ++ if (cpu_has_mips32r1 || cpu_has_mips32r6) { ++ emit(ctx, mul, dst, dst, src); ++ } else { ++ emit(ctx, multu, dst, src); ++ emit(ctx, mflo, dst); ++ } ++ break; ++ /* dst = dst / src */ ++ case BPF_DIV: ++ if (cpu_has_mips32r6) { ++ emit(ctx, divu_r6, dst, dst, src); ++ } else { ++ emit(ctx, divu, dst, src); ++ emit(ctx, mflo, dst); ++ } ++ break; ++ /* dst = dst % src */ ++ case BPF_MOD: ++ if (cpu_has_mips32r6) { ++ emit(ctx, modu, dst, dst, src); ++ } else { ++ emit(ctx, divu, dst, src); ++ emit(ctx, mfhi, dst); ++ } ++ break; ++ } ++ clobber_reg(ctx, dst); ++} ++ ++/* Atomic read-modify-write (32-bit) */ ++void emit_atomic_r(struct jit_context *ctx, u8 dst, u8 src, s16 off, u8 code) ++{ ++ emit(ctx, ll, MIPS_R_T9, off, dst); ++ switch (code) { ++ case BPF_ADD: ++ emit(ctx, addu, MIPS_R_T8, MIPS_R_T9, src); ++ break; ++ case BPF_AND: ++ emit(ctx, and, MIPS_R_T8, MIPS_R_T9, src); ++ break; ++ case BPF_OR: ++ emit(ctx, or, MIPS_R_T8, MIPS_R_T9, src); ++ break; ++ case BPF_XOR: ++ emit(ctx, xor, MIPS_R_T8, MIPS_R_T9, src); ++ break; ++ } ++ emit(ctx, sc, MIPS_R_T8, off, dst); ++ emit(ctx, beqz, MIPS_R_T8, -16); ++ emit(ctx, nop); /* Delay slot */ ++} ++ ++/* Atomic compare-and-exchange (32-bit) */ ++void emit_cmpxchg_r(struct jit_context *ctx, u8 dst, u8 src, u8 res, s16 off) ++{ ++ emit(ctx, ll, MIPS_R_T9, off, dst); ++ emit(ctx, bne, MIPS_R_T9, res, 12); ++ emit(ctx, move, MIPS_R_T8, src); /* Delay slot */ ++ emit(ctx, sc, MIPS_R_T8, off, dst); ++ emit(ctx, beqz, MIPS_R_T8, -20); ++ emit(ctx, move, res, MIPS_R_T9); /* Delay slot */ ++ clobber_reg(ctx, res); ++} ++ ++/* Swap bytes and truncate a register word or half word */ ++void emit_bswap_r(struct jit_context *ctx, u8 dst, u32 width) ++{ ++ u8 tmp = MIPS_R_T8; ++ u8 msk = MIPS_R_T9; ++ ++ switch (width) { ++ /* Swap bytes in a word */ ++ case 32: ++ if (cpu_has_mips32r2 || cpu_has_mips32r6) { ++ emit(ctx, wsbh, dst, dst); ++ emit(ctx, rotr, dst, dst, 16); ++ } else { ++ emit(ctx, sll, tmp, dst, 16); /* tmp = dst << 16 */ ++ emit(ctx, srl, dst, dst, 16); /* dst = dst >> 16 */ ++ emit(ctx, or, dst, dst, tmp); /* dst = dst | tmp */ ++ ++ emit(ctx, lui, msk, 0xff); /* msk = 0x00ff0000 */ ++ emit(ctx, ori, msk, msk, 0xff); /* msk = msk | 0xff */ ++ ++ emit(ctx, and, tmp, dst, msk); /* tmp = dst & msk */ ++ emit(ctx, sll, tmp, tmp, 8); /* tmp = tmp << 8 */ ++ emit(ctx, srl, dst, dst, 8); /* dst = dst >> 8 */ ++ emit(ctx, and, dst, dst, msk); /* dst = dst & msk */ ++ emit(ctx, or, dst, dst, tmp); /* reg = dst | tmp */ ++ } ++ break; ++ /* Swap bytes in a half word */ ++ case 16: ++ if (cpu_has_mips32r2 || cpu_has_mips32r6) { ++ emit(ctx, wsbh, dst, dst); ++ emit(ctx, andi, dst, dst, 0xffff); ++ } else { ++ emit(ctx, andi, tmp, dst, 0xff00); /* t = d & 0xff00 */ ++ emit(ctx, srl, tmp, tmp, 8); /* t = t >> 8 */ ++ emit(ctx, andi, dst, dst, 0x00ff); /* d = d & 0x00ff */ ++ emit(ctx, sll, dst, dst, 8); /* d = d << 8 */ ++ emit(ctx, or, dst, dst, tmp); /* d = d | t */ ++ } ++ break; ++ } ++ clobber_reg(ctx, dst); ++} ++ ++/* Validate jump immediate range */ ++bool valid_jmp_i(u8 op, s32 imm) ++{ ++ switch (op) { ++ case JIT_JNOP: ++ /* Immediate value not used */ ++ return true; ++ case BPF_JEQ: ++ case BPF_JNE: ++ /* No immediate operation */ ++ return false; ++ case BPF_JSET: ++ case JIT_JNSET: ++ /* imm must be 16 bits unsigned */ ++ return imm >= 0 && imm <= 0xffff; ++ case BPF_JGE: ++ case BPF_JLT: ++ case BPF_JSGE: ++ case BPF_JSLT: ++ /* imm must be 16 bits */ ++ return imm >= -0x8000 && imm <= 0x7fff; ++ case BPF_JGT: ++ case BPF_JLE: ++ case BPF_JSGT: ++ case BPF_JSLE: ++ /* imm + 1 must be 16 bits */ ++ return imm >= -0x8001 && imm <= 0x7ffe; ++ } ++ return false; ++} ++ ++/* Invert a conditional jump operation */ ++static u8 invert_jmp(u8 op) ++{ ++ switch (op) { ++ case BPF_JA: return JIT_JNOP; ++ case BPF_JEQ: return BPF_JNE; ++ case BPF_JNE: return BPF_JEQ; ++ case BPF_JSET: return JIT_JNSET; ++ case BPF_JGT: return BPF_JLE; ++ case BPF_JGE: return BPF_JLT; ++ case BPF_JLT: return BPF_JGE; ++ case BPF_JLE: return BPF_JGT; ++ case BPF_JSGT: return BPF_JSLE; ++ case BPF_JSGE: return BPF_JSLT; ++ case BPF_JSLT: return BPF_JSGE; ++ case BPF_JSLE: return BPF_JSGT; ++ } ++ return 0; ++} ++ ++/* Prepare a PC-relative jump operation */ ++static void setup_jmp(struct jit_context *ctx, u8 bpf_op, ++ s16 bpf_off, u8 *jit_op, s32 *jit_off) ++{ ++ u32 *descp = &ctx->descriptors[ctx->bpf_index]; ++ int op = bpf_op; ++ int offset = 0; ++ ++ /* Do not compute offsets on the first pass */ ++ if (INDEX(*descp) == 0) ++ goto done; ++ ++ /* Skip jumps never taken */ ++ if (bpf_op == JIT_JNOP) ++ goto done; ++ ++ /* Convert jumps always taken */ ++ if (bpf_op == BPF_JA) ++ *descp |= JIT_DESC_CONVERT; ++ ++ /* ++ * Current ctx->jit_index points to the start of the branch preamble. ++ * Since the preamble differs among different branch conditionals, ++ * the current index cannot be used to compute the branch offset. ++ * Instead, we use the offset table value for the next instruction, ++ * which gives the index immediately after the branch delay slot. ++ */ ++ if (!CONVERTED(*descp)) { ++ int target = ctx->bpf_index + bpf_off + 1; ++ int origin = ctx->bpf_index + 1; ++ ++ offset = (INDEX(ctx->descriptors[target]) - ++ INDEX(ctx->descriptors[origin]) + 1) * sizeof(u32); ++ } ++ ++ /* ++ * The PC-relative branch offset field on MIPS is 18 bits signed, ++ * so if the computed offset is larger than this we generate a an ++ * absolute jump that we skip with an inverted conditional branch. ++ */ ++ if (CONVERTED(*descp) || offset < -0x20000 || offset > 0x1ffff) { ++ offset = 3 * sizeof(u32); ++ op = invert_jmp(bpf_op); ++ ctx->changes += !CONVERTED(*descp); ++ *descp |= JIT_DESC_CONVERT; ++ } ++ ++done: ++ *jit_off = offset; ++ *jit_op = op; ++} ++ ++/* Prepare a PC-relative jump operation with immediate conditional */ ++void setup_jmp_i(struct jit_context *ctx, s32 imm, u8 width, ++ u8 bpf_op, s16 bpf_off, u8 *jit_op, s32 *jit_off) ++{ ++ bool always = false; ++ bool never = false; ++ ++ switch (bpf_op) { ++ case BPF_JEQ: ++ case BPF_JNE: ++ break; ++ case BPF_JSET: ++ case BPF_JLT: ++ never = imm == 0; ++ break; ++ case BPF_JGE: ++ always = imm == 0; ++ break; ++ case BPF_JGT: ++ never = (u32)imm == U32_MAX; ++ break; ++ case BPF_JLE: ++ always = (u32)imm == U32_MAX; ++ break; ++ case BPF_JSGT: ++ never = imm == S32_MAX && width == 32; ++ break; ++ case BPF_JSGE: ++ always = imm == S32_MIN && width == 32; ++ break; ++ case BPF_JSLT: ++ never = imm == S32_MIN && width == 32; ++ break; ++ case BPF_JSLE: ++ always = imm == S32_MAX && width == 32; ++ break; ++ } ++ ++ if (never) ++ bpf_op = JIT_JNOP; ++ if (always) ++ bpf_op = BPF_JA; ++ setup_jmp(ctx, bpf_op, bpf_off, jit_op, jit_off); ++} ++ ++/* Prepare a PC-relative jump operation with register conditional */ ++void setup_jmp_r(struct jit_context *ctx, bool same_reg, ++ u8 bpf_op, s16 bpf_off, u8 *jit_op, s32 *jit_off) ++{ ++ switch (bpf_op) { ++ case BPF_JSET: ++ break; ++ case BPF_JEQ: ++ case BPF_JGE: ++ case BPF_JLE: ++ case BPF_JSGE: ++ case BPF_JSLE: ++ if (same_reg) ++ bpf_op = BPF_JA; ++ break; ++ case BPF_JNE: ++ case BPF_JLT: ++ case BPF_JGT: ++ case BPF_JSGT: ++ case BPF_JSLT: ++ if (same_reg) ++ bpf_op = JIT_JNOP; ++ break; ++ } ++ setup_jmp(ctx, bpf_op, bpf_off, jit_op, jit_off); ++} ++ ++/* Finish a PC-relative jump operation */ ++int finish_jmp(struct jit_context *ctx, u8 jit_op, s16 bpf_off) ++{ ++ /* Emit conditional branch delay slot */ ++ if (jit_op != JIT_JNOP) ++ emit(ctx, nop); ++ /* ++ * Emit an absolute long jump with delay slot, ++ * if the PC-relative branch was converted. ++ */ ++ if (CONVERTED(ctx->descriptors[ctx->bpf_index])) { ++ int target = get_target(ctx, ctx->bpf_index + bpf_off + 1); ++ ++ if (target < 0) ++ return -1; ++ emit(ctx, j, target); ++ emit(ctx, nop); ++ } ++ return 0; ++} ++ ++/* Jump immediate (32-bit) */ ++void emit_jmp_i(struct jit_context *ctx, u8 dst, s32 imm, s32 off, u8 op) ++{ ++ switch (op) { ++ /* No-op, used internally for branch optimization */ ++ case JIT_JNOP: ++ break; ++ /* PC += off if dst & imm */ ++ case BPF_JSET: ++ emit(ctx, andi, MIPS_R_T9, dst, (u16)imm); ++ emit(ctx, bnez, MIPS_R_T9, off); ++ break; ++ /* PC += off if (dst & imm) == 0 (not in BPF, used for long jumps) */ ++ case JIT_JNSET: ++ emit(ctx, andi, MIPS_R_T9, dst, (u16)imm); ++ emit(ctx, beqz, MIPS_R_T9, off); ++ break; ++ /* PC += off if dst > imm */ ++ case BPF_JGT: ++ emit(ctx, sltiu, MIPS_R_T9, dst, imm + 1); ++ emit(ctx, beqz, MIPS_R_T9, off); ++ break; ++ /* PC += off if dst >= imm */ ++ case BPF_JGE: ++ emit(ctx, sltiu, MIPS_R_T9, dst, imm); ++ emit(ctx, beqz, MIPS_R_T9, off); ++ break; ++ /* PC += off if dst < imm */ ++ case BPF_JLT: ++ emit(ctx, sltiu, MIPS_R_T9, dst, imm); ++ emit(ctx, bnez, MIPS_R_T9, off); ++ break; ++ /* PC += off if dst <= imm */ ++ case BPF_JLE: ++ emit(ctx, sltiu, MIPS_R_T9, dst, imm + 1); ++ emit(ctx, bnez, MIPS_R_T9, off); ++ break; ++ /* PC += off if dst > imm (signed) */ ++ case BPF_JSGT: ++ emit(ctx, slti, MIPS_R_T9, dst, imm + 1); ++ emit(ctx, beqz, MIPS_R_T9, off); ++ break; ++ /* PC += off if dst >= imm (signed) */ ++ case BPF_JSGE: ++ emit(ctx, slti, MIPS_R_T9, dst, imm); ++ emit(ctx, beqz, MIPS_R_T9, off); ++ break; ++ /* PC += off if dst < imm (signed) */ ++ case BPF_JSLT: ++ emit(ctx, slti, MIPS_R_T9, dst, imm); ++ emit(ctx, bnez, MIPS_R_T9, off); ++ break; ++ /* PC += off if dst <= imm (signed) */ ++ case BPF_JSLE: ++ emit(ctx, slti, MIPS_R_T9, dst, imm + 1); ++ emit(ctx, bnez, MIPS_R_T9, off); ++ break; ++ } ++} ++ ++/* Jump register (32-bit) */ ++void emit_jmp_r(struct jit_context *ctx, u8 dst, u8 src, s32 off, u8 op) ++{ ++ switch (op) { ++ /* No-op, used internally for branch optimization */ ++ case JIT_JNOP: ++ break; ++ /* PC += off if dst == src */ ++ case BPF_JEQ: ++ emit(ctx, beq, dst, src, off); ++ break; ++ /* PC += off if dst != src */ ++ case BPF_JNE: ++ emit(ctx, bne, dst, src, off); ++ break; ++ /* PC += off if dst & src */ ++ case BPF_JSET: ++ emit(ctx, and, MIPS_R_T9, dst, src); ++ emit(ctx, bnez, MIPS_R_T9, off); ++ break; ++ /* PC += off if (dst & imm) == 0 (not in BPF, used for long jumps) */ ++ case JIT_JNSET: ++ emit(ctx, and, MIPS_R_T9, dst, src); ++ emit(ctx, beqz, MIPS_R_T9, off); ++ break; ++ /* PC += off if dst > src */ ++ case BPF_JGT: ++ emit(ctx, sltu, MIPS_R_T9, src, dst); ++ emit(ctx, bnez, MIPS_R_T9, off); ++ break; ++ /* PC += off if dst >= src */ ++ case BPF_JGE: ++ emit(ctx, sltu, MIPS_R_T9, dst, src); ++ emit(ctx, beqz, MIPS_R_T9, off); ++ break; ++ /* PC += off if dst < src */ ++ case BPF_JLT: ++ emit(ctx, sltu, MIPS_R_T9, dst, src); ++ emit(ctx, bnez, MIPS_R_T9, off); ++ break; ++ /* PC += off if dst <= src */ ++ case BPF_JLE: ++ emit(ctx, sltu, MIPS_R_T9, src, dst); ++ emit(ctx, beqz, MIPS_R_T9, off); ++ break; ++ /* PC += off if dst > src (signed) */ ++ case BPF_JSGT: ++ emit(ctx, slt, MIPS_R_T9, src, dst); ++ emit(ctx, bnez, MIPS_R_T9, off); ++ break; ++ /* PC += off if dst >= src (signed) */ ++ case BPF_JSGE: ++ emit(ctx, slt, MIPS_R_T9, dst, src); ++ emit(ctx, beqz, MIPS_R_T9, off); ++ break; ++ /* PC += off if dst < src (signed) */ ++ case BPF_JSLT: ++ emit(ctx, slt, MIPS_R_T9, dst, src); ++ emit(ctx, bnez, MIPS_R_T9, off); ++ break; ++ /* PC += off if dst <= src (signed) */ ++ case BPF_JSLE: ++ emit(ctx, slt, MIPS_R_T9, src, dst); ++ emit(ctx, beqz, MIPS_R_T9, off); ++ break; ++ } ++} ++ ++/* Jump always */ ++int emit_ja(struct jit_context *ctx, s16 off) ++{ ++ int target = get_target(ctx, ctx->bpf_index + off + 1); ++ ++ if (target < 0) ++ return -1; ++ emit(ctx, j, target); ++ emit(ctx, nop); ++ return 0; ++} ++ ++/* Jump to epilogue */ ++int emit_exit(struct jit_context *ctx) ++{ ++ int target = get_target(ctx, ctx->program->len); ++ ++ if (target < 0) ++ return -1; ++ emit(ctx, j, target); ++ emit(ctx, nop); ++ return 0; ++} ++ ++/* Build the program body from eBPF bytecode */ ++static int build_body(struct jit_context *ctx) ++{ ++ const struct bpf_prog *prog = ctx->program; ++ unsigned int i; ++ ++ ctx->stack_used = 0; ++ for (i = 0; i < prog->len; i++) { ++ const struct bpf_insn *insn = &prog->insnsi[i]; ++ u32 *descp = &ctx->descriptors[i]; ++ int ret; ++ ++ access_reg(ctx, insn->src_reg); ++ access_reg(ctx, insn->dst_reg); ++ ++ ctx->bpf_index = i; ++ if (ctx->target == NULL) { ++ ctx->changes += INDEX(*descp) != ctx->jit_index; ++ *descp &= JIT_DESC_CONVERT; ++ *descp |= ctx->jit_index; ++ } ++ ++ ret = build_insn(insn, ctx); ++ if (ret < 0) ++ return ret; ++ ++ if (ret > 0) { ++ i++; ++ if (ctx->target == NULL) ++ descp[1] = ctx->jit_index; ++ } ++ } ++ ++ /* Store the end offset, where the epilogue begins */ ++ ctx->descriptors[prog->len] = ctx->jit_index; ++ return 0; ++} ++ ++/* Set the branch conversion flag on all instructions */ ++static void set_convert_flag(struct jit_context *ctx, bool enable) ++{ ++ const struct bpf_prog *prog = ctx->program; ++ u32 flag = enable ? JIT_DESC_CONVERT : 0; ++ unsigned int i; ++ ++ for (i = 0; i <= prog->len; i++) ++ ctx->descriptors[i] = INDEX(ctx->descriptors[i]) | flag; ++} ++ ++static void jit_fill_hole(void *area, unsigned int size) ++{ ++ u32 *p; ++ ++ /* We are guaranteed to have aligned memory. */ ++ for (p = area; size >= sizeof(u32); size -= sizeof(u32)) ++ uasm_i_break(&p, BRK_BUG); /* Increments p */ ++} ++ ++bool bpf_jit_needs_zext(void) ++{ ++ return true; ++} ++ ++struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) ++{ ++ struct bpf_prog *tmp, *orig_prog = prog; ++ struct bpf_binary_header *header = NULL; ++ struct jit_context ctx; ++ bool tmp_blinded = false; ++ unsigned int tmp_idx; ++ unsigned int image_size; ++ u8 *image_ptr; ++ int tries; ++ ++ /* ++ * If BPF JIT was not enabled then we must fall back to ++ * the interpreter. ++ */ ++ if (!prog->jit_requested) ++ return orig_prog; ++ /* ++ * If constant blinding was enabled and we failed during blinding ++ * then we must fall back to the interpreter. Otherwise, we save ++ * the new JITed code. ++ */ ++ tmp = bpf_jit_blind_constants(prog); ++ if (IS_ERR(tmp)) ++ return orig_prog; ++ if (tmp != prog) { ++ tmp_blinded = true; ++ prog = tmp; ++ } ++ ++ memset(&ctx, 0, sizeof(ctx)); ++ ctx.program = prog; ++ ++ /* ++ * Not able to allocate memory for descriptors[], then ++ * we must fall back to the interpreter ++ */ ++ ctx.descriptors = kcalloc(prog->len + 1, sizeof(*ctx.descriptors), ++ GFP_KERNEL); ++ if (ctx.descriptors == NULL) ++ goto out_err; ++ ++ /* First pass discovers used resources */ ++ if (build_body(&ctx) < 0) ++ goto out_err; ++ /* ++ * Second pass computes instruction offsets. ++ * If any PC-relative branches are out of range, a sequence of ++ * a PC-relative branch + a jump is generated, and we have to ++ * try again from the beginning to generate the new offsets. ++ * This is done until no additional conversions are necessary. ++ * The last two iterations are done with all branches being ++ * converted, to guarantee offset table convergence within a ++ * fixed number of iterations. ++ */ ++ ctx.jit_index = 0; ++ build_prologue(&ctx); ++ tmp_idx = ctx.jit_index; ++ ++ tries = JIT_MAX_ITERATIONS; ++ do { ++ ctx.jit_index = tmp_idx; ++ ctx.changes = 0; ++ if (tries == 2) ++ set_convert_flag(&ctx, true); ++ if (build_body(&ctx) < 0) ++ goto out_err; ++ } while (ctx.changes > 0 && --tries > 0); ++ ++ if (WARN_ONCE(ctx.changes > 0, "JIT offsets failed to converge")) ++ goto out_err; ++ ++ build_epilogue(&ctx, MIPS_R_RA); ++ ++ /* Now we know the size of the structure to make */ ++ image_size = sizeof(u32) * ctx.jit_index; ++ header = bpf_jit_binary_alloc(image_size, &image_ptr, ++ sizeof(u32), jit_fill_hole); ++ /* ++ * Not able to allocate memory for the structure then ++ * we must fall back to the interpretation ++ */ ++ if (header == NULL) ++ goto out_err; ++ ++ /* Actual pass to generate final JIT code */ ++ ctx.target = (u32 *)image_ptr; ++ ctx.jit_index = 0; ++ ++ /* ++ * If building the JITed code fails somehow, ++ * we fall back to the interpretation. ++ */ ++ build_prologue(&ctx); ++ if (build_body(&ctx) < 0) ++ goto out_err; ++ build_epilogue(&ctx, MIPS_R_RA); ++ ++ /* Populate line info meta data */ ++ set_convert_flag(&ctx, false); ++ bpf_prog_fill_jited_linfo(prog, &ctx.descriptors[1]); ++ ++ /* Set as read-only exec and flush instruction cache */ ++ bpf_jit_binary_lock_ro(header); ++ flush_icache_range((unsigned long)header, ++ (unsigned long)&ctx.target[ctx.jit_index]); ++ ++ if (bpf_jit_enable > 1) ++ bpf_jit_dump(prog->len, image_size, 2, ctx.target); ++ ++ prog->bpf_func = (void *)ctx.target; ++ prog->jited = 1; ++ prog->jited_len = image_size; ++ ++out: ++ if (tmp_blinded) ++ bpf_jit_prog_release_other(prog, prog == orig_prog ? ++ tmp : orig_prog); ++ kfree(ctx.descriptors); ++ return prog; ++ ++out_err: ++ prog = orig_prog; ++ if (header) ++ bpf_jit_binary_free(header); ++ goto out; ++} +--- /dev/null ++++ b/arch/mips/net/bpf_jit_comp.h +@@ -0,0 +1,211 @@ ++/* SPDX-License-Identifier: GPL-2.0-only */ ++/* ++ * Just-In-Time compiler for eBPF bytecode on 32-bit and 64-bit MIPS. ++ * ++ * Copyright (c) 2021 Anyfi Networks AB. ++ * Author: Johan Almbladh <johan.almbladh@gmail.com> ++ * ++ * Based on code and ideas from ++ * Copyright (c) 2017 Cavium, Inc. ++ * Copyright (c) 2017 Shubham Bansal <illusionist.neo@gmail.com> ++ * Copyright (c) 2011 Mircea Gherzan <mgherzan@gmail.com> ++ */ ++ ++#ifndef _BPF_JIT_COMP_H ++#define _BPF_JIT_COMP_H ++ ++/* MIPS registers */ ++#define MIPS_R_ZERO 0 /* Const zero */ ++#define MIPS_R_AT 1 /* Asm temp */ ++#define MIPS_R_V0 2 /* Result */ ++#define MIPS_R_V1 3 /* Result */ ++#define MIPS_R_A0 4 /* Argument */ ++#define MIPS_R_A1 5 /* Argument */ ++#define MIPS_R_A2 6 /* Argument */ ++#define MIPS_R_A3 7 /* Argument */ ++#define MIPS_R_A4 8 /* Arg (n64) */ ++#define MIPS_R_A5 9 /* Arg (n64) */ ++#define MIPS_R_A6 10 /* Arg (n64) */ ++#define MIPS_R_A7 11 /* Arg (n64) */ ++#define MIPS_R_T0 8 /* Temp (o32) */ ++#define MIPS_R_T1 9 /* Temp (o32) */ ++#define MIPS_R_T2 10 /* Temp (o32) */ ++#define MIPS_R_T3 11 /* Temp (o32) */ ++#define MIPS_R_T4 12 /* Temporary */ ++#define MIPS_R_T5 13 /* Temporary */ ++#define MIPS_R_T6 14 /* Temporary */ ++#define MIPS_R_T7 15 /* Temporary */ ++#define MIPS_R_S0 16 /* Saved */ ++#define MIPS_R_S1 17 /* Saved */ ++#define MIPS_R_S2 18 /* Saved */ ++#define MIPS_R_S3 19 /* Saved */ ++#define MIPS_R_S4 20 /* Saved */ ++#define MIPS_R_S5 21 /* Saved */ ++#define MIPS_R_S6 22 /* Saved */ ++#define MIPS_R_S7 23 /* Saved */ ++#define MIPS_R_T8 24 /* Temporary */ ++#define MIPS_R_T9 25 /* Temporary */ ++/* MIPS_R_K0 26 Reserved */ ++/* MIPS_R_K1 27 Reserved */ ++#define MIPS_R_GP 28 /* Global ptr */ ++#define MIPS_R_SP 29 /* Stack ptr */ ++#define MIPS_R_FP 30 /* Frame ptr */ ++#define MIPS_R_RA 31 /* Return */ ++ ++/* ++ * Jump address mask for immediate jumps. The four most significant bits ++ * must be equal to PC. ++ */ ++#define MIPS_JMP_MASK 0x0fffffffUL ++ ++/* Maximum number of iterations in offset table computation */ ++#define JIT_MAX_ITERATIONS 8 ++ ++/* ++ * Jump pseudo-instructions used internally ++ * for branch conversion and branch optimization. ++ */ ++#define JIT_JNSET 0xe0 ++#define JIT_JNOP 0xf0 ++ ++/* Descriptor flag for PC-relative branch conversion */ ++#define JIT_DESC_CONVERT BIT(31) ++ ++/* JIT context for an eBPF program */ ++struct jit_context { ++ struct bpf_prog *program; /* The eBPF program being JITed */ ++ u32 *descriptors; /* eBPF to JITed CPU insn descriptors */ ++ u32 *target; /* JITed code buffer */ ++ u32 bpf_index; /* Index of current BPF program insn */ ++ u32 jit_index; /* Index of current JIT target insn */ ++ u32 changes; /* Number of PC-relative branch conv */ ++ u32 accessed; /* Bit mask of read eBPF registers */ ++ u32 clobbered; /* Bit mask of modified CPU registers */ ++ u32 stack_size; /* Total allocated stack size in bytes */ ++ u32 saved_size; /* Size of callee-saved registers */ ++ u32 stack_used; /* Stack size used for function calls */ ++}; ++ ++/* Emit the instruction if the JIT memory space has been allocated */ ++#define emit(ctx, func, ...) \ ++do { \ ++ if ((ctx)->target != NULL) { \ ++ u32 *p = &(ctx)->target[ctx->jit_index]; \ ++ uasm_i_##func(&p, ##__VA_ARGS__); \ ++ } \ ++ (ctx)->jit_index++; \ ++} while (0) ++ ++/* ++ * Mark a BPF register as accessed, it needs to be ++ * initialized by the program if expected, e.g. FP. ++ */ ++static inline void access_reg(struct jit_context *ctx, u8 reg) ++{ ++ ctx->accessed |= BIT(reg); ++} ++ ++/* ++ * Mark a CPU register as clobbered, it needs to be ++ * saved/restored by the program if callee-saved. ++ */ ++static inline void clobber_reg(struct jit_context *ctx, u8 reg) ++{ ++ ctx->clobbered |= BIT(reg); ++} ++ ++/* ++ * Push registers on the stack, starting at a given depth from the stack ++ * pointer and increasing. The next depth to be written is returned. ++ */ ++int push_regs(struct jit_context *ctx, u32 mask, u32 excl, int depth); ++ ++/* ++ * Pop registers from the stack, starting at a given depth from the stack ++ * pointer and increasing. The next depth to be read is returned. ++ */ ++int pop_regs(struct jit_context *ctx, u32 mask, u32 excl, int depth); ++ ++/* Compute the 28-bit jump target address from a BPF program location */ ++int get_target(struct jit_context *ctx, u32 loc); ++ ++/* Compute the PC-relative offset to relative BPF program offset */ ++int get_offset(const struct jit_context *ctx, int off); ++ ++/* dst = imm (32-bit) */ ++void emit_mov_i(struct jit_context *ctx, u8 dst, s32 imm); ++ ++/* dst = src (32-bit) */ ++void emit_mov_r(struct jit_context *ctx, u8 dst, u8 src); ++ ++/* Validate ALU/ALU64 immediate range */ ++bool valid_alu_i(u8 op, s32 imm); ++ ++/* Rewrite ALU/ALU64 immediate operation */ ++bool rewrite_alu_i(u8 op, s32 imm, u8 *alu, s32 *val); ++ ++/* ALU immediate operation (32-bit) */ ++void emit_alu_i(struct jit_context *ctx, u8 dst, s32 imm, u8 op); ++ ++/* ALU register operation (32-bit) */ ++void emit_alu_r(struct jit_context *ctx, u8 dst, u8 src, u8 op); ++ ++/* Atomic read-modify-write (32-bit) */ ++void emit_atomic_r(struct jit_context *ctx, u8 dst, u8 src, s16 off, u8 code); ++ ++/* Atomic compare-and-exchange (32-bit) */ ++void emit_cmpxchg_r(struct jit_context *ctx, u8 dst, u8 src, u8 res, s16 off); ++ ++/* Swap bytes and truncate a register word or half word */ ++void emit_bswap_r(struct jit_context *ctx, u8 dst, u32 width); ++ ++/* Validate JMP/JMP32 immediate range */ ++bool valid_jmp_i(u8 op, s32 imm); ++ ++/* Prepare a PC-relative jump operation with immediate conditional */ ++void setup_jmp_i(struct jit_context *ctx, s32 imm, u8 width, ++ u8 bpf_op, s16 bpf_off, u8 *jit_op, s32 *jit_off); ++ ++/* Prepare a PC-relative jump operation with register conditional */ ++void setup_jmp_r(struct jit_context *ctx, bool same_reg, ++ u8 bpf_op, s16 bpf_off, u8 *jit_op, s32 *jit_off); ++ ++/* Finish a PC-relative jump operation */ ++int finish_jmp(struct jit_context *ctx, u8 jit_op, s16 bpf_off); ++ ++/* Conditional JMP/JMP32 immediate */ ++void emit_jmp_i(struct jit_context *ctx, u8 dst, s32 imm, s32 off, u8 op); ++ ++/* Conditional JMP/JMP32 register */ ++void emit_jmp_r(struct jit_context *ctx, u8 dst, u8 src, s32 off, u8 op); ++ ++/* Jump always */ ++int emit_ja(struct jit_context *ctx, s16 off); ++ ++/* Jump to epilogue */ ++int emit_exit(struct jit_context *ctx); ++ ++/* ++ * Build program prologue to set up the stack and registers. ++ * This function is implemented separately for 32-bit and 64-bit JITs. ++ */ ++void build_prologue(struct jit_context *ctx); ++ ++/* ++ * Build the program epilogue to restore the stack and registers. ++ * This function is implemented separately for 32-bit and 64-bit JITs. ++ */ ++void build_epilogue(struct jit_context *ctx, int dest_reg); ++ ++/* ++ * Convert an eBPF instruction to native instruction, i.e ++ * JITs an eBPF instruction. ++ * Returns : ++ * 0 - Successfully JITed an 8-byte eBPF instruction ++ * >0 - Successfully JITed a 16-byte eBPF instruction ++ * <0 - Failed to JIT. ++ * This function is implemented separately for 32-bit and 64-bit JITs. ++ */ ++int build_insn(const struct bpf_insn *insn, struct jit_context *ctx); ++ ++#endif /* _BPF_JIT_COMP_H */ +--- /dev/null ++++ b/arch/mips/net/bpf_jit_comp32.c +@@ -0,0 +1,1741 @@ ++// SPDX-License-Identifier: GPL-2.0-only ++/* ++ * Just-In-Time compiler for eBPF bytecode on MIPS. ++ * Implementation of JIT functions for 32-bit CPUs. ++ * ++ * Copyright (c) 2021 Anyfi Networks AB. ++ * Author: Johan Almbladh <johan.almbladh@gmail.com> ++ * ++ * Based on code and ideas from ++ * Copyright (c) 2017 Cavium, Inc. ++ * Copyright (c) 2017 Shubham Bansal <illusionist.neo@gmail.com> ++ * Copyright (c) 2011 Mircea Gherzan <mgherzan@gmail.com> ++ */ ++ ++#include <linux/math64.h> ++#include <linux/errno.h> ++#include <linux/filter.h> ++#include <linux/bpf.h> ++#include <asm/cpu-features.h> ++#include <asm/isa-rev.h> ++#include <asm/uasm.h> ++ ++#include "bpf_jit_comp.h" ++ ++/* MIPS a4-a7 are not available in the o32 ABI */ ++#undef MIPS_R_A4 ++#undef MIPS_R_A5 ++#undef MIPS_R_A6 ++#undef MIPS_R_A7 ++ ++/* Stack is 8-byte aligned in o32 ABI */ ++#define MIPS_STACK_ALIGNMENT 8 ++ ++/* ++ * The top 16 bytes of a stack frame is reserved for the callee in O32 ABI. ++ * This corresponds to stack space for register arguments a0-a3. ++ */ ++#define JIT_RESERVED_STACK 16 ++ ++/* Temporary 64-bit register used by JIT */ ++#define JIT_REG_TMP MAX_BPF_JIT_REG ++ ++/* ++ * Number of prologue bytes to skip when doing a tail call. ++ * Tail call count (TCC) initialization (8 bytes) always, plus ++ * R0-to-v0 assignment (4 bytes) if big endian. ++ */ ++#ifdef __BIG_ENDIAN ++#define JIT_TCALL_SKIP 12 ++#else ++#define JIT_TCALL_SKIP 8 ++#endif ++ ++/* CPU registers holding the callee return value */ ++#define JIT_RETURN_REGS \ ++ (BIT(MIPS_R_V0) | \ ++ BIT(MIPS_R_V1)) ++ ++/* CPU registers arguments passed to callee directly */ ++#define JIT_ARG_REGS \ ++ (BIT(MIPS_R_A0) | \ ++ BIT(MIPS_R_A1) | \ ++ BIT(MIPS_R_A2) | \ ++ BIT(MIPS_R_A3)) ++ ++/* CPU register arguments passed to callee on stack */ ++#define JIT_STACK_REGS \ ++ (BIT(MIPS_R_T0) | \ ++ BIT(MIPS_R_T1) | \ ++ BIT(MIPS_R_T2) | \ ++ BIT(MIPS_R_T3) | \ ++ BIT(MIPS_R_T4) | \ ++ BIT(MIPS_R_T5)) ++ ++/* Caller-saved CPU registers */ ++#define JIT_CALLER_REGS \ ++ (JIT_RETURN_REGS | \ ++ JIT_ARG_REGS | \ ++ JIT_STACK_REGS) ++ ++/* Callee-saved CPU registers */ ++#define JIT_CALLEE_REGS \ ++ (BIT(MIPS_R_S0) | \ ++ BIT(MIPS_R_S1) | \ ++ BIT(MIPS_R_S2) | \ ++ BIT(MIPS_R_S3) | \ ++ BIT(MIPS_R_S4) | \ ++ BIT(MIPS_R_S5) | \ ++ BIT(MIPS_R_S6) | \ ++ BIT(MIPS_R_S7) | \ ++ BIT(MIPS_R_GP) | \ ++ BIT(MIPS_R_FP) | \ ++ BIT(MIPS_R_RA)) ++ ++/* ++ * Mapping of 64-bit eBPF registers to 32-bit native MIPS registers. ++ * ++ * 1) Native register pairs are ordered according to CPU endiannes, following ++ * the MIPS convention for passing 64-bit arguments and return values. ++ * 2) The eBPF return value, arguments and callee-saved registers are mapped ++ * to their native MIPS equivalents. ++ * 3) Since the 32 highest bits in the eBPF FP register are always zero, ++ * only one general-purpose register is actually needed for the mapping. ++ * We use the fp register for this purpose, and map the highest bits to ++ * the MIPS register r0 (zero). ++ * 4) We use the MIPS gp and at registers as internal temporary registers ++ * for constant blinding. The gp register is callee-saved. ++ * 5) One 64-bit temporary register is mapped for use when sign-extending ++ * immediate operands. MIPS registers t6-t9 are available to the JIT ++ * for as temporaries when implementing complex 64-bit operations. ++ * ++ * With this scheme all eBPF registers are being mapped to native MIPS ++ * registers without having to use any stack scratch space. The direct ++ * register mapping (2) simplifies the handling of function calls. ++ */ ++static const u8 bpf2mips32[][2] = { ++ /* Return value from in-kernel function, and exit value from eBPF */ ++ [BPF_REG_0] = {MIPS_R_V1, MIPS_R_V0}, ++ /* Arguments from eBPF program to in-kernel function */ ++ [BPF_REG_1] = {MIPS_R_A1, MIPS_R_A0}, ++ [BPF_REG_2] = {MIPS_R_A3, MIPS_R_A2}, ++ /* Remaining arguments, to be passed on the stack per O32 ABI */ ++ [BPF_REG_3] = {MIPS_R_T1, MIPS_R_T0}, ++ [BPF_REG_4] = {MIPS_R_T3, MIPS_R_T2}, ++ [BPF_REG_5] = {MIPS_R_T5, MIPS_R_T4}, ++ /* Callee-saved registers that in-kernel function will preserve */ ++ [BPF_REG_6] = {MIPS_R_S1, MIPS_R_S0}, ++ [BPF_REG_7] = {MIPS_R_S3, MIPS_R_S2}, ++ [BPF_REG_8] = {MIPS_R_S5, MIPS_R_S4}, ++ [BPF_REG_9] = {MIPS_R_S7, MIPS_R_S6}, ++ /* Read-only frame pointer to access the eBPF stack */ ++#ifdef __BIG_ENDIAN ++ [BPF_REG_FP] = {MIPS_R_FP, MIPS_R_ZERO}, ++#else ++ [BPF_REG_FP] = {MIPS_R_ZERO, MIPS_R_FP}, ++#endif ++ /* Temporary register for blinding constants */ ++ [BPF_REG_AX] = {MIPS_R_GP, MIPS_R_AT}, ++ /* Temporary register for internal JIT use */ ++ [JIT_REG_TMP] = {MIPS_R_T7, MIPS_R_T6}, ++}; ++ ++/* Get low CPU register for a 64-bit eBPF register mapping */ ++static inline u8 lo(const u8 reg[]) ++{ ++#ifdef __BIG_ENDIAN ++ return reg[0]; ++#else ++ return reg[1]; ++#endif ++} ++ ++/* Get high CPU register for a 64-bit eBPF register mapping */ ++static inline u8 hi(const u8 reg[]) ++{ ++#ifdef __BIG_ENDIAN ++ return reg[1]; ++#else ++ return reg[0]; ++#endif ++} ++ ++/* ++ * Mark a 64-bit CPU register pair as clobbered, it needs to be ++ * saved/restored by the program if callee-saved. ++ */ ++static void clobber_reg64(struct jit_context *ctx, const u8 reg[]) ++{ ++ clobber_reg(ctx, reg[0]); ++ clobber_reg(ctx, reg[1]); ++} ++ ++/* dst = imm (sign-extended) */ ++static void emit_mov_se_i64(struct jit_context *ctx, const u8 dst[], s32 imm) ++{ ++ emit_mov_i(ctx, lo(dst), imm); ++ if (imm < 0) ++ emit(ctx, addiu, hi(dst), MIPS_R_ZERO, -1); ++ else ++ emit(ctx, move, hi(dst), MIPS_R_ZERO); ++ clobber_reg64(ctx, dst); ++} ++ ++/* Zero extension, if verifier does not do it for us */ ++static void emit_zext_ver(struct jit_context *ctx, const u8 dst[]) ++{ ++ if (!ctx->program->aux->verifier_zext) { ++ emit(ctx, move, hi(dst), MIPS_R_ZERO); ++ clobber_reg(ctx, hi(dst)); ++ } ++} ++ ++/* Load delay slot, if ISA mandates it */ ++static void emit_load_delay(struct jit_context *ctx) ++{ ++ if (!cpu_has_mips_2_3_4_5_r) ++ emit(ctx, nop); ++} ++ ++/* ALU immediate operation (64-bit) */ ++static void emit_alu_i64(struct jit_context *ctx, ++ const u8 dst[], s32 imm, u8 op) ++{ ++ u8 src = MIPS_R_T6; ++ ++ /* ++ * ADD/SUB with all but the max negative imm can be handled by ++ * inverting the operation and the imm value, saving one insn. ++ */ ++ if (imm > S32_MIN && imm < 0) ++ switch (op) { ++ case BPF_ADD: ++ op = BPF_SUB; ++ imm = -imm; ++ break; ++ case BPF_SUB: ++ op = BPF_ADD; ++ imm = -imm; ++ break; ++ } ++ ++ /* Move immediate to temporary register */ ++ emit_mov_i(ctx, src, imm); ++ ++ switch (op) { ++ /* dst = dst + imm */ ++ case BPF_ADD: ++ emit(ctx, addu, lo(dst), lo(dst), src); ++ emit(ctx, sltu, MIPS_R_T9, lo(dst), src); ++ emit(ctx, addu, hi(dst), hi(dst), MIPS_R_T9); ++ if (imm < 0) ++ emit(ctx, addiu, hi(dst), hi(dst), -1); ++ break; ++ /* dst = dst - imm */ ++ case BPF_SUB: ++ emit(ctx, sltu, MIPS_R_T9, lo(dst), src); ++ emit(ctx, subu, lo(dst), lo(dst), src); ++ emit(ctx, subu, hi(dst), hi(dst), MIPS_R_T9); ++ if (imm < 0) ++ emit(ctx, addiu, hi(dst), hi(dst), 1); ++ break; ++ /* dst = dst | imm */ ++ case BPF_OR: ++ emit(ctx, or, lo(dst), lo(dst), src); ++ if (imm < 0) ++ emit(ctx, addiu, hi(dst), MIPS_R_ZERO, -1); ++ break; ++ /* dst = dst & imm */ ++ case BPF_AND: ++ emit(ctx, and, lo(dst), lo(dst), src); ++ if (imm >= 0) ++ emit(ctx, move, hi(dst), MIPS_R_ZERO); ++ break; ++ /* dst = dst ^ imm */ ++ case BPF_XOR: ++ emit(ctx, xor, lo(dst), lo(dst), src); ++ if (imm < 0) { ++ emit(ctx, subu, hi(dst), MIPS_R_ZERO, hi(dst)); ++ emit(ctx, addiu, hi(dst), hi(dst), -1); ++ } ++ break; ++ } ++ clobber_reg64(ctx, dst); ++} ++ ++/* ALU register operation (64-bit) */ ++static void emit_alu_r64(struct jit_context *ctx, ++ const u8 dst[], const u8 src[], u8 op) ++{ ++ switch (BPF_OP(op)) { ++ /* dst = dst + src */ ++ case BPF_ADD: ++ if (src == dst) { ++ emit(ctx, srl, MIPS_R_T9, lo(dst), 31); ++ emit(ctx, addu, lo(dst), lo(dst), lo(dst)); ++ } else { ++ emit(ctx, addu, lo(dst), lo(dst), lo(src)); ++ emit(ctx, sltu, MIPS_R_T9, lo(dst), lo(src)); ++ } ++ emit(ctx, addu, hi(dst), hi(dst), hi(src)); ++ emit(ctx, addu, hi(dst), hi(dst), MIPS_R_T9); ++ break; ++ /* dst = dst - src */ ++ case BPF_SUB: ++ emit(ctx, sltu, MIPS_R_T9, lo(dst), lo(src)); ++ emit(ctx, subu, lo(dst), lo(dst), lo(src)); ++ emit(ctx, subu, hi(dst), hi(dst), hi(src)); ++ emit(ctx, subu, hi(dst), hi(dst), MIPS_R_T9); ++ break; ++ /* dst = dst | src */ ++ case BPF_OR: ++ emit(ctx, or, lo(dst), lo(dst), lo(src)); ++ emit(ctx, or, hi(dst), hi(dst), hi(src)); ++ break; ++ /* dst = dst & src */ ++ case BPF_AND: ++ emit(ctx, and, lo(dst), lo(dst), lo(src)); ++ emit(ctx, and, hi(dst), hi(dst), hi(src)); ++ break; ++ /* dst = dst ^ src */ ++ case BPF_XOR: ++ emit(ctx, xor, lo(dst), lo(dst), lo(src)); ++ emit(ctx, xor, hi(dst), hi(dst), hi(src)); ++ break; ++ } ++ clobber_reg64(ctx, dst); ++} ++ ++/* ALU invert (64-bit) */ ++static void emit_neg_i64(struct jit_context *ctx, const u8 dst[]) ++{ ++ emit(ctx, sltu, MIPS_R_T9, MIPS_R_ZERO, lo(dst)); ++ emit(ctx, subu, lo(dst), MIPS_R_ZERO, lo(dst)); ++ emit(ctx, subu, hi(dst), MIPS_R_ZERO, hi(dst)); ++ emit(ctx, subu, hi(dst), hi(dst), MIPS_R_T9); ++ ++ clobber_reg64(ctx, dst); ++} ++ ++/* ALU shift immediate (64-bit) */ ++static void emit_shift_i64(struct jit_context *ctx, ++ const u8 dst[], u32 imm, u8 op) ++{ ++ switch (BPF_OP(op)) { ++ /* dst = dst << imm */ ++ case BPF_LSH: ++ if (imm < 32) { ++ emit(ctx, srl, MIPS_R_T9, lo(dst), 32 - imm); ++ emit(ctx, sll, lo(dst), lo(dst), imm); ++ emit(ctx, sll, hi(dst), hi(dst), imm); ++ emit(ctx, or, hi(dst), hi(dst), MIPS_R_T9); ++ } else { ++ emit(ctx, sll, hi(dst), lo(dst), imm - 32); ++ emit(ctx, move, lo(dst), MIPS_R_ZERO); ++ } ++ break; ++ /* dst = dst >> imm */ ++ case BPF_RSH: ++ if (imm < 32) { ++ emit(ctx, sll, MIPS_R_T9, hi(dst), 32 - imm); ++ emit(ctx, srl, lo(dst), lo(dst), imm); ++ emit(ctx, srl, hi(dst), hi(dst), imm); ++ emit(ctx, or, lo(dst), lo(dst), MIPS_R_T9); ++ } else { ++ emit(ctx, srl, lo(dst), hi(dst), imm - 32); ++ emit(ctx, move, hi(dst), MIPS_R_ZERO); ++ } ++ break; ++ /* dst = dst >> imm (arithmetic) */ ++ case BPF_ARSH: ++ if (imm < 32) { ++ emit(ctx, sll, MIPS_R_T9, hi(dst), 32 - imm); ++ emit(ctx, srl, lo(dst), lo(dst), imm); ++ emit(ctx, sra, hi(dst), hi(dst), imm); ++ emit(ctx, or, lo(dst), lo(dst), MIPS_R_T9); ++ } else { ++ emit(ctx, sra, lo(dst), hi(dst), imm - 32); ++ emit(ctx, sra, hi(dst), hi(dst), 31); ++ } ++ break; ++ } ++ clobber_reg64(ctx, dst); ++} ++ ++/* ALU shift register (64-bit) */ ++static void emit_shift_r64(struct jit_context *ctx, ++ const u8 dst[], u8 src, u8 op) ++{ ++ u8 t1 = MIPS_R_T8; ++ u8 t2 = MIPS_R_T9; ++ ++ emit(ctx, andi, t1, src, 32); /* t1 = src & 32 */ ++ emit(ctx, beqz, t1, 16); /* PC += 16 if t1 == 0 */ ++ emit(ctx, nor, t2, src, MIPS_R_ZERO); /* t2 = ~src (delay slot) */ ++ ++ switch (BPF_OP(op)) { ++ /* dst = dst << src */ ++ case BPF_LSH: ++ /* Next: shift >= 32 */ ++ emit(ctx, sllv, hi(dst), lo(dst), src); /* dh = dl << src */ ++ emit(ctx, move, lo(dst), MIPS_R_ZERO); /* dl = 0 */ ++ emit(ctx, b, 20); /* PC += 20 */ ++ /* +16: shift < 32 */ ++ emit(ctx, srl, t1, lo(dst), 1); /* t1 = dl >> 1 */ ++ emit(ctx, srlv, t1, t1, t2); /* t1 = t1 >> t2 */ ++ emit(ctx, sllv, lo(dst), lo(dst), src); /* dl = dl << src */ ++ emit(ctx, sllv, hi(dst), hi(dst), src); /* dh = dh << src */ ++ emit(ctx, or, hi(dst), hi(dst), t1); /* dh = dh | t1 */ ++ break; ++ /* dst = dst >> src */ ++ case BPF_RSH: ++ /* Next: shift >= 32 */ ++ emit(ctx, srlv, lo(dst), hi(dst), src); /* dl = dh >> src */ ++ emit(ctx, move, hi(dst), MIPS_R_ZERO); /* dh = 0 */ ++ emit(ctx, b, 20); /* PC += 20 */ ++ /* +16: shift < 32 */ ++ emit(ctx, sll, t1, hi(dst), 1); /* t1 = dl << 1 */ ++ emit(ctx, sllv, t1, t1, t2); /* t1 = t1 << t2 */ ++ emit(ctx, srlv, lo(dst), lo(dst), src); /* dl = dl >> src */ ++ emit(ctx, srlv, hi(dst), hi(dst), src); /* dh = dh >> src */ ++ emit(ctx, or, lo(dst), lo(dst), t1); /* dl = dl | t1 */ ++ break; ++ /* dst = dst >> src (arithmetic) */ ++ case BPF_ARSH: ++ /* Next: shift >= 32 */ ++ emit(ctx, srav, lo(dst), hi(dst), src); /* dl = dh >>a src */ ++ emit(ctx, sra, hi(dst), hi(dst), 31); /* dh = dh >>a 31 */ ++ emit(ctx, b, 20); /* PC += 20 */ ++ /* +16: shift < 32 */ ++ emit(ctx, sll, t1, hi(dst), 1); /* t1 = dl << 1 */ ++ emit(ctx, sllv, t1, t1, t2); /* t1 = t1 << t2 */ ++ emit(ctx, srlv, lo(dst), lo(dst), src); /* dl = dl >>a src */ ++ emit(ctx, srav, hi(dst), hi(dst), src); /* dh = dh >> src */ ++ emit(ctx, or, lo(dst), lo(dst), t1); /* dl = dl | t1 */ ++ break; ++ } ++ ++ /* +20: Done */ ++ clobber_reg64(ctx, dst); ++} ++ ++/* ALU mul immediate (64x32-bit) */ ++static void emit_mul_i64(struct jit_context *ctx, const u8 dst[], s32 imm) ++{ ++ u8 src = MIPS_R_T6; ++ u8 tmp = MIPS_R_T9; ++ ++ switch (imm) { ++ /* dst = dst * 1 is a no-op */ ++ case 1: ++ break; ++ /* dst = dst * -1 */ ++ case -1: ++ emit_neg_i64(ctx, dst); ++ break; ++ case 0: ++ emit_mov_r(ctx, lo(dst), MIPS_R_ZERO); ++ emit_mov_r(ctx, hi(dst), MIPS_R_ZERO); ++ break; ++ /* Full 64x32 multiply */ ++ default: ++ /* hi(dst) = hi(dst) * src(imm) */ ++ emit_mov_i(ctx, src, imm); ++ if (cpu_has_mips32r1 || cpu_has_mips32r6) { ++ emit(ctx, mul, hi(dst), hi(dst), src); ++ } else { ++ emit(ctx, multu, hi(dst), src); ++ emit(ctx, mflo, hi(dst)); ++ } ++ ++ /* hi(dst) = hi(dst) - lo(dst) */ ++ if (imm < 0) ++ emit(ctx, subu, hi(dst), hi(dst), lo(dst)); ++ ++ /* tmp = lo(dst) * src(imm) >> 32 */ ++ /* lo(dst) = lo(dst) * src(imm) */ ++ if (cpu_has_mips32r6) { ++ emit(ctx, muhu, tmp, lo(dst), src); ++ emit(ctx, mulu, lo(dst), lo(dst), src); ++ } else { ++ emit(ctx, multu, lo(dst), src); ++ emit(ctx, mflo, lo(dst)); ++ emit(ctx, mfhi, tmp); ++ } ++ ++ /* hi(dst) += tmp */ ++ emit(ctx, addu, hi(dst), hi(dst), tmp); ++ clobber_reg64(ctx, dst); ++ break; ++ } ++} ++ ++/* ALU mul register (64x64-bit) */ ++static void emit_mul_r64(struct jit_context *ctx, ++ const u8 dst[], const u8 src[]) ++{ ++ u8 acc = MIPS_R_T8; ++ u8 tmp = MIPS_R_T9; ++ ++ /* acc = hi(dst) * lo(src) */ ++ if (cpu_has_mips32r1 || cpu_has_mips32r6) { ++ emit(ctx, mul, acc, hi(dst), lo(src)); ++ } else { ++ emit(ctx, multu, hi(dst), lo(src)); ++ emit(ctx, mflo, acc); ++ } ++ ++ /* tmp = lo(dst) * hi(src) */ ++ if (cpu_has_mips32r1 || cpu_has_mips32r6) { ++ emit(ctx, mul, tmp, lo(dst), hi(src)); ++ } else { ++ emit(ctx, multu, lo(dst), hi(src)); ++ emit(ctx, mflo, tmp); ++ } ++ ++ /* acc += tmp */ ++ emit(ctx, addu, acc, acc, tmp); ++ ++ /* tmp = lo(dst) * lo(src) >> 32 */ ++ /* lo(dst) = lo(dst) * lo(src) */ ++ if (cpu_has_mips32r6) { ++ emit(ctx, muhu, tmp, lo(dst), lo(src)); ++ emit(ctx, mulu, lo(dst), lo(dst), lo(src)); ++ } else { ++ emit(ctx, multu, lo(dst), lo(src)); ++ emit(ctx, mflo, lo(dst)); ++ emit(ctx, mfhi, tmp); ++ } ++ ++ /* hi(dst) = acc + tmp */ ++ emit(ctx, addu, hi(dst), acc, tmp); ++ clobber_reg64(ctx, dst); ++} ++ ++/* Helper function for 64-bit modulo */ ++static u64 jit_mod64(u64 a, u64 b) ++{ ++ u64 rem; ++ ++ div64_u64_rem(a, b, &rem); ++ return rem; ++} ++ ++/* ALU div/mod register (64-bit) */ ++static void emit_divmod_r64(struct jit_context *ctx, ++ const u8 dst[], const u8 src[], u8 op) ++{ ++ const u8 *r0 = bpf2mips32[BPF_REG_0]; /* Mapped to v0-v1 */ ++ const u8 *r1 = bpf2mips32[BPF_REG_1]; /* Mapped to a0-a1 */ ++ const u8 *r2 = bpf2mips32[BPF_REG_2]; /* Mapped to a2-a3 */ ++ int exclude, k; ++ u32 addr = 0; ++ ++ /* Push caller-saved registers on stack */ ++ push_regs(ctx, ctx->clobbered & JIT_CALLER_REGS, ++ 0, JIT_RESERVED_STACK); ++ ++ /* Put 64-bit arguments 1 and 2 in registers a0-a3 */ ++ for (k = 0; k < 2; k++) { ++ emit(ctx, move, MIPS_R_T9, src[k]); ++ emit(ctx, move, r1[k], dst[k]); ++ emit(ctx, move, r2[k], MIPS_R_T9); ++ } ++ ++ /* Emit function call */ ++ switch (BPF_OP(op)) { ++ /* dst = dst / src */ ++ case BPF_DIV: ++ addr = (u32)&div64_u64; ++ break; ++ /* dst = dst % src */ ++ case BPF_MOD: ++ addr = (u32)&jit_mod64; ++ break; ++ } ++ emit_mov_i(ctx, MIPS_R_T9, addr); ++ emit(ctx, jalr, MIPS_R_RA, MIPS_R_T9); ++ emit(ctx, nop); /* Delay slot */ ++ ++ /* Store the 64-bit result in dst */ ++ emit(ctx, move, dst[0], r0[0]); ++ emit(ctx, move, dst[1], r0[1]); ++ ++ /* Restore caller-saved registers, excluding the computed result */ ++ exclude = BIT(lo(dst)) | BIT(hi(dst)); ++ pop_regs(ctx, ctx->clobbered & JIT_CALLER_REGS, ++ exclude, JIT_RESERVED_STACK); ++ emit_load_delay(ctx); ++ ++ clobber_reg64(ctx, dst); ++ clobber_reg(ctx, MIPS_R_V0); ++ clobber_reg(ctx, MIPS_R_V1); ++ clobber_reg(ctx, MIPS_R_RA); ++} ++ ++/* Swap bytes in a register word */ ++static void emit_swap8_r(struct jit_context *ctx, u8 dst, u8 src, u8 mask) ++{ ++ u8 tmp = MIPS_R_T9; ++ ++ emit(ctx, and, tmp, src, mask); /* tmp = src & 0x00ff00ff */ ++ emit(ctx, sll, tmp, tmp, 8); /* tmp = tmp << 8 */ ++ emit(ctx, srl, dst, src, 8); /* dst = src >> 8 */ ++ emit(ctx, and, dst, dst, mask); /* dst = dst & 0x00ff00ff */ ++ emit(ctx, or, dst, dst, tmp); /* dst = dst | tmp */ ++} ++ ++/* Swap half words in a register word */ ++static void emit_swap16_r(struct jit_context *ctx, u8 dst, u8 src) ++{ ++ u8 tmp = MIPS_R_T9; ++ ++ emit(ctx, sll, tmp, src, 16); /* tmp = src << 16 */ ++ emit(ctx, srl, dst, src, 16); /* dst = src >> 16 */ ++ emit(ctx, or, dst, dst, tmp); /* dst = dst | tmp */ ++} ++ ++/* Swap bytes and truncate a register double word, word or half word */ ++static void emit_bswap_r64(struct jit_context *ctx, const u8 dst[], u32 width) ++{ ++ u8 tmp = MIPS_R_T8; ++ ++ switch (width) { ++ /* Swap bytes in a double word */ ++ case 64: ++ if (cpu_has_mips32r2 || cpu_has_mips32r6) { ++ emit(ctx, rotr, tmp, hi(dst), 16); ++ emit(ctx, rotr, hi(dst), lo(dst), 16); ++ emit(ctx, wsbh, lo(dst), tmp); ++ emit(ctx, wsbh, hi(dst), hi(dst)); ++ } else { ++ emit_swap16_r(ctx, tmp, lo(dst)); ++ emit_swap16_r(ctx, lo(dst), hi(dst)); ++ emit(ctx, move, hi(dst), tmp); ++ ++ emit(ctx, lui, tmp, 0xff); /* tmp = 0x00ff0000 */ ++ emit(ctx, ori, tmp, tmp, 0xff); /* tmp = 0x00ff00ff */ ++ emit_swap8_r(ctx, lo(dst), lo(dst), tmp); ++ emit_swap8_r(ctx, hi(dst), hi(dst), tmp); ++ } ++ break; ++ /* Swap bytes in a word */ ++ /* Swap bytes in a half word */ ++ case 32: ++ case 16: ++ emit_bswap_r(ctx, lo(dst), width); ++ emit(ctx, move, hi(dst), MIPS_R_ZERO); ++ break; ++ } ++ clobber_reg64(ctx, dst); ++} ++ ++/* Truncate a register double word, word or half word */ ++static void emit_trunc_r64(struct jit_context *ctx, const u8 dst[], u32 width) ++{ ++ switch (width) { ++ case 64: ++ break; ++ /* Zero-extend a word */ ++ case 32: ++ emit(ctx, move, hi(dst), MIPS_R_ZERO); ++ clobber_reg(ctx, hi(dst)); ++ break; ++ /* Zero-extend a half word */ ++ case 16: ++ emit(ctx, move, hi(dst), MIPS_R_ZERO); ++ emit(ctx, andi, lo(dst), lo(dst), 0xffff); ++ clobber_reg64(ctx, dst); ++ break; ++ } ++} ++ ++/* Load operation: dst = *(size*)(src + off) */ ++static void emit_ldx(struct jit_context *ctx, ++ const u8 dst[], u8 src, s16 off, u8 size) ++{ ++ switch (size) { ++ /* Load a byte */ ++ case BPF_B: ++ emit(ctx, lbu, lo(dst), off, src); ++ emit(ctx, move, hi(dst), MIPS_R_ZERO); ++ break; ++ /* Load a half word */ ++ case BPF_H: ++ emit(ctx, lhu, lo(dst), off, src); ++ emit(ctx, move, hi(dst), MIPS_R_ZERO); ++ break; ++ /* Load a word */ ++ case BPF_W: ++ emit(ctx, lw, lo(dst), off, src); ++ emit(ctx, move, hi(dst), MIPS_R_ZERO); ++ break; ++ /* Load a double word */ ++ case BPF_DW: ++ if (dst[1] == src) { ++ emit(ctx, lw, dst[0], off + 4, src); ++ emit(ctx, lw, dst[1], off, src); ++ } else { ++ emit(ctx, lw, dst[1], off, src); ++ emit(ctx, lw, dst[0], off + 4, src); ++ } ++ emit_load_delay(ctx); ++ break; ++ } ++ clobber_reg64(ctx, dst); ++} ++ ++/* Store operation: *(size *)(dst + off) = src */ ++static void emit_stx(struct jit_context *ctx, ++ const u8 dst, const u8 src[], s16 off, u8 size) ++{ ++ switch (size) { ++ /* Store a byte */ ++ case BPF_B: ++ emit(ctx, sb, lo(src), off, dst); ++ break; ++ /* Store a half word */ ++ case BPF_H: ++ emit(ctx, sh, lo(src), off, dst); ++ break; ++ /* Store a word */ ++ case BPF_W: ++ emit(ctx, sw, lo(src), off, dst); ++ break; ++ /* Store a double word */ ++ case BPF_DW: ++ emit(ctx, sw, src[1], off, dst); ++ emit(ctx, sw, src[0], off + 4, dst); ++ break; ++ } ++} ++ ++/* Atomic read-modify-write (32-bit, non-ll/sc fallback) */ ++static void emit_atomic_r32(struct jit_context *ctx, ++ u8 dst, u8 src, s16 off, u8 code) ++{ ++ u32 exclude = 0; ++ u32 addr = 0; ++ ++ /* Push caller-saved registers on stack */ ++ push_regs(ctx, ctx->clobbered & JIT_CALLER_REGS, ++ 0, JIT_RESERVED_STACK); ++ /* ++ * Argument 1: dst+off if xchg, otherwise src, passed in register a0 ++ * Argument 2: src if xchg, othersize dst+off, passed in register a1 ++ */ ++ emit(ctx, move, MIPS_R_T9, dst); ++ emit(ctx, move, MIPS_R_A0, src); ++ emit(ctx, addiu, MIPS_R_A1, MIPS_R_T9, off); ++ ++ /* Emit function call */ ++ switch (code) { ++ case BPF_ADD: ++ addr = (u32)&atomic_add; ++ break; ++ case BPF_SUB: ++ addr = (u32)&atomic_sub; ++ break; ++ case BPF_OR: ++ addr = (u32)&atomic_or; ++ break; ++ case BPF_AND: ++ addr = (u32)&atomic_and; ++ break; ++ case BPF_XOR: ++ addr = (u32)&atomic_xor; ++ break; ++ } ++ emit_mov_i(ctx, MIPS_R_T9, addr); ++ emit(ctx, jalr, MIPS_R_RA, MIPS_R_T9); ++ emit(ctx, nop); /* Delay slot */ ++ ++ /* Restore caller-saved registers, except any fetched value */ ++ pop_regs(ctx, ctx->clobbered & JIT_CALLER_REGS, ++ exclude, JIT_RESERVED_STACK); ++ emit_load_delay(ctx); ++ clobber_reg(ctx, MIPS_R_RA); ++} ++ ++/* Atomic read-modify-write (64-bit) */ ++static void emit_atomic_r64(struct jit_context *ctx, ++ u8 dst, const u8 src[], s16 off, u8 code) ++{ ++ const u8 *r1 = bpf2mips32[BPF_REG_1]; /* Mapped to a0-a1 */ ++ u32 exclude = 0; ++ u32 addr = 0; ++ ++ /* Push caller-saved registers on stack */ ++ push_regs(ctx, ctx->clobbered & JIT_CALLER_REGS, ++ 0, JIT_RESERVED_STACK); ++ /* ++ * Argument 1: 64-bit src, passed in registers a0-a1 ++ * Argument 2: 32-bit dst+off, passed in register a2 ++ */ ++ emit(ctx, move, MIPS_R_T9, dst); ++ emit(ctx, move, r1[0], src[0]); ++ emit(ctx, move, r1[1], src[1]); ++ emit(ctx, addiu, MIPS_R_A2, MIPS_R_T9, off); ++ ++ /* Emit function call */ ++ switch (code) { ++ case BPF_ADD: ++ addr = (u32)&atomic64_add; ++ break; ++ case BPF_SUB: ++ addr = (u32)&atomic64_sub; ++ break; ++ case BPF_OR: ++ addr = (u32)&atomic64_or; ++ break; ++ case BPF_AND: ++ addr = (u32)&atomic64_and; ++ break; ++ case BPF_XOR: ++ addr = (u32)&atomic64_xor; ++ break; ++ } ++ emit_mov_i(ctx, MIPS_R_T9, addr); ++ emit(ctx, jalr, MIPS_R_RA, MIPS_R_T9); ++ emit(ctx, nop); /* Delay slot */ ++ ++ /* Restore caller-saved registers, except any fetched value */ ++ pop_regs(ctx, ctx->clobbered & JIT_CALLER_REGS, ++ exclude, JIT_RESERVED_STACK); ++ emit_load_delay(ctx); ++ clobber_reg(ctx, MIPS_R_RA); ++} ++ ++/* ++ * Conditional movz or an emulated equivalent. ++ * Note that the rs register may be modified. ++ */ ++static void emit_movz_r(struct jit_context *ctx, u8 rd, u8 rs, u8 rt) ++{ ++ if (cpu_has_mips_2) { ++ emit(ctx, movz, rd, rs, rt); /* rd = rt ? rd : rs */ ++ } else if (cpu_has_mips32r6) { ++ if (rs != MIPS_R_ZERO) ++ emit(ctx, seleqz, rs, rs, rt); /* rs = 0 if rt == 0 */ ++ emit(ctx, selnez, rd, rd, rt); /* rd = 0 if rt != 0 */ ++ if (rs != MIPS_R_ZERO) ++ emit(ctx, or, rd, rd, rs); /* rd = rd | rs */ ++ } else { ++ emit(ctx, bnez, rt, 8); /* PC += 8 if rd != 0 */ ++ emit(ctx, nop); /* +0: delay slot */ ++ emit(ctx, or, rd, rs, MIPS_R_ZERO); /* +4: rd = rs */ ++ } ++ clobber_reg(ctx, rd); ++ clobber_reg(ctx, rs); ++} ++ ++/* ++ * Conditional movn or an emulated equivalent. ++ * Note that the rs register may be modified. ++ */ ++static void emit_movn_r(struct jit_context *ctx, u8 rd, u8 rs, u8 rt) ++{ ++ if (cpu_has_mips_2) { ++ emit(ctx, movn, rd, rs, rt); /* rd = rt ? rs : rd */ ++ } else if (cpu_has_mips32r6) { ++ if (rs != MIPS_R_ZERO) ++ emit(ctx, selnez, rs, rs, rt); /* rs = 0 if rt == 0 */ ++ emit(ctx, seleqz, rd, rd, rt); /* rd = 0 if rt != 0 */ ++ if (rs != MIPS_R_ZERO) ++ emit(ctx, or, rd, rd, rs); /* rd = rd | rs */ ++ } else { ++ emit(ctx, beqz, rt, 8); /* PC += 8 if rd == 0 */ ++ emit(ctx, nop); /* +0: delay slot */ ++ emit(ctx, or, rd, rs, MIPS_R_ZERO); /* +4: rd = rs */ ++ } ++ clobber_reg(ctx, rd); ++ clobber_reg(ctx, rs); ++} ++ ++/* Emulation of 64-bit sltiu rd, rs, imm, where imm may be S32_MAX + 1 */ ++static void emit_sltiu_r64(struct jit_context *ctx, u8 rd, ++ const u8 rs[], s64 imm) ++{ ++ u8 tmp = MIPS_R_T9; ++ ++ if (imm < 0) { ++ emit_mov_i(ctx, rd, imm); /* rd = imm */ ++ emit(ctx, sltu, rd, lo(rs), rd); /* rd = rsl < rd */ ++ emit(ctx, sltiu, tmp, hi(rs), -1); /* tmp = rsh < ~0U */ ++ emit(ctx, or, rd, rd, tmp); /* rd = rd | tmp */ ++ } else { /* imm >= 0 */ ++ if (imm > 0x7fff) { ++ emit_mov_i(ctx, rd, (s32)imm); /* rd = imm */ ++ emit(ctx, sltu, rd, lo(rs), rd); /* rd = rsl < rd */ ++ } else { ++ emit(ctx, sltiu, rd, lo(rs), imm); /* rd = rsl < imm */ ++ } ++ emit_movn_r(ctx, rd, MIPS_R_ZERO, hi(rs)); /* rd = 0 if rsh */ ++ } ++} ++ ++/* Emulation of 64-bit sltu rd, rs, rt */ ++static void emit_sltu_r64(struct jit_context *ctx, u8 rd, ++ const u8 rs[], const u8 rt[]) ++{ ++ u8 tmp = MIPS_R_T9; ++ ++ emit(ctx, sltu, rd, lo(rs), lo(rt)); /* rd = rsl < rtl */ ++ emit(ctx, subu, tmp, hi(rs), hi(rt)); /* tmp = rsh - rth */ ++ emit_movn_r(ctx, rd, MIPS_R_ZERO, tmp); /* rd = 0 if tmp != 0 */ ++ emit(ctx, sltu, tmp, hi(rs), hi(rt)); /* tmp = rsh < rth */ ++ emit(ctx, or, rd, rd, tmp); /* rd = rd | tmp */ ++} ++ ++/* Emulation of 64-bit slti rd, rs, imm, where imm may be S32_MAX + 1 */ ++static void emit_slti_r64(struct jit_context *ctx, u8 rd, ++ const u8 rs[], s64 imm) ++{ ++ u8 t1 = MIPS_R_T8; ++ u8 t2 = MIPS_R_T9; ++ u8 cmp; ++ ++ /* ++ * if ((rs < 0) ^ (imm < 0)) t1 = imm >u rsl ++ * else t1 = rsl <u imm ++ */ ++ emit_mov_i(ctx, rd, (s32)imm); ++ emit(ctx, sltu, t1, lo(rs), rd); /* t1 = rsl <u imm */ ++ emit(ctx, sltu, t2, rd, lo(rs)); /* t2 = imm <u rsl */ ++ emit(ctx, srl, rd, hi(rs), 31); /* rd = rsh >> 31 */ ++ if (imm < 0) ++ emit_movz_r(ctx, t1, t2, rd); /* t1 = rd ? t1 : t2 */ ++ else ++ emit_movn_r(ctx, t1, t2, rd); /* t1 = rd ? t2 : t1 */ ++ /* ++ * if ((imm < 0 && rsh != 0xffffffff) || ++ * (imm >= 0 && rsh != 0)) ++ * t1 = 0 ++ */ ++ if (imm < 0) { ++ emit(ctx, addiu, rd, hi(rs), 1); /* rd = rsh + 1 */ ++ cmp = rd; ++ } else { /* imm >= 0 */ ++ cmp = hi(rs); ++ } ++ emit_movn_r(ctx, t1, MIPS_R_ZERO, cmp); /* t1 = 0 if cmp != 0 */ ++ ++ /* ++ * if (imm < 0) rd = rsh < -1 ++ * else rd = rsh != 0 ++ * rd = rd | t1 ++ */ ++ emit(ctx, slti, rd, hi(rs), imm < 0 ? -1 : 0); /* rd = rsh < hi(imm) */ ++ emit(ctx, or, rd, rd, t1); /* rd = rd | t1 */ ++} ++ ++/* Emulation of 64-bit(slt rd, rs, rt) */ ++static void emit_slt_r64(struct jit_context *ctx, u8 rd, ++ const u8 rs[], const u8 rt[]) ++{ ++ u8 t1 = MIPS_R_T7; ++ u8 t2 = MIPS_R_T8; ++ u8 t3 = MIPS_R_T9; ++ ++ /* ++ * if ((rs < 0) ^ (rt < 0)) t1 = rtl <u rsl ++ * else t1 = rsl <u rtl ++ * if (rsh == rth) t1 = 0 ++ */ ++ emit(ctx, sltu, t1, lo(rs), lo(rt)); /* t1 = rsl <u rtl */ ++ emit(ctx, sltu, t2, lo(rt), lo(rs)); /* t2 = rtl <u rsl */ ++ emit(ctx, xor, t3, hi(rs), hi(rt)); /* t3 = rlh ^ rth */ ++ emit(ctx, srl, rd, t3, 31); /* rd = t3 >> 31 */ ++ emit_movn_r(ctx, t1, t2, rd); /* t1 = rd ? t2 : t1 */ ++ emit_movn_r(ctx, t1, MIPS_R_ZERO, t3); /* t1 = 0 if t3 != 0 */ ++ ++ /* rd = (rsh < rth) | t1 */ ++ emit(ctx, slt, rd, hi(rs), hi(rt)); /* rd = rsh <s rth */ ++ emit(ctx, or, rd, rd, t1); /* rd = rd | t1 */ ++} ++ ++/* Jump immediate (64-bit) */ ++static void emit_jmp_i64(struct jit_context *ctx, ++ const u8 dst[], s32 imm, s32 off, u8 op) ++{ ++ u8 tmp = MIPS_R_T6; ++ ++ switch (op) { ++ /* No-op, used internally for branch optimization */ ++ case JIT_JNOP: ++ break; ++ /* PC += off if dst == imm */ ++ /* PC += off if dst != imm */ ++ case BPF_JEQ: ++ case BPF_JNE: ++ if (imm >= -0x7fff && imm <= 0x8000) { ++ emit(ctx, addiu, tmp, lo(dst), -imm); ++ } else if ((u32)imm <= 0xffff) { ++ emit(ctx, xori, tmp, lo(dst), imm); ++ } else { /* Register fallback */ ++ emit_mov_i(ctx, tmp, imm); ++ emit(ctx, xor, tmp, lo(dst), tmp); ++ } ++ if (imm < 0) { /* Compare sign extension */ ++ emit(ctx, addu, MIPS_R_T9, hi(dst), 1); ++ emit(ctx, or, tmp, tmp, MIPS_R_T9); ++ } else { /* Compare zero extension */ ++ emit(ctx, or, tmp, tmp, hi(dst)); ++ } ++ if (op == BPF_JEQ) ++ emit(ctx, beqz, tmp, off); ++ else /* BPF_JNE */ ++ emit(ctx, bnez, tmp, off); ++ break; ++ /* PC += off if dst & imm */ ++ /* PC += off if (dst & imm) == 0 (not in BPF, used for long jumps) */ ++ case BPF_JSET: ++ case JIT_JNSET: ++ if ((u32)imm <= 0xffff) { ++ emit(ctx, andi, tmp, lo(dst), imm); ++ } else { /* Register fallback */ ++ emit_mov_i(ctx, tmp, imm); ++ emit(ctx, and, tmp, lo(dst), tmp); ++ } ++ if (imm < 0) /* Sign-extension pulls in high word */ ++ emit(ctx, or, tmp, tmp, hi(dst)); ++ if (op == BPF_JSET) ++ emit(ctx, bnez, tmp, off); ++ else /* JIT_JNSET */ ++ emit(ctx, beqz, tmp, off); ++ break; ++ /* PC += off if dst > imm */ ++ case BPF_JGT: ++ emit_sltiu_r64(ctx, tmp, dst, (s64)imm + 1); ++ emit(ctx, beqz, tmp, off); ++ break; ++ /* PC += off if dst >= imm */ ++ case BPF_JGE: ++ emit_sltiu_r64(ctx, tmp, dst, imm); ++ emit(ctx, beqz, tmp, off); ++ break; ++ /* PC += off if dst < imm */ ++ case BPF_JLT: ++ emit_sltiu_r64(ctx, tmp, dst, imm); ++ emit(ctx, bnez, tmp, off); ++ break; ++ /* PC += off if dst <= imm */ ++ case BPF_JLE: ++ emit_sltiu_r64(ctx, tmp, dst, (s64)imm + 1); ++ emit(ctx, bnez, tmp, off); ++ break; ++ /* PC += off if dst > imm (signed) */ ++ case BPF_JSGT: ++ emit_slti_r64(ctx, tmp, dst, (s64)imm + 1); ++ emit(ctx, beqz, tmp, off); ++ break; ++ /* PC += off if dst >= imm (signed) */ ++ case BPF_JSGE: ++ emit_slti_r64(ctx, tmp, dst, imm); ++ emit(ctx, beqz, tmp, off); ++ break; ++ /* PC += off if dst < imm (signed) */ ++ case BPF_JSLT: ++ emit_slti_r64(ctx, tmp, dst, imm); ++ emit(ctx, bnez, tmp, off); ++ break; ++ /* PC += off if dst <= imm (signed) */ ++ case BPF_JSLE: ++ emit_slti_r64(ctx, tmp, dst, (s64)imm + 1); ++ emit(ctx, bnez, tmp, off); ++ break; ++ } ++} ++ ++/* Jump register (64-bit) */ ++static void emit_jmp_r64(struct jit_context *ctx, ++ const u8 dst[], const u8 src[], s32 off, u8 op) ++{ ++ u8 t1 = MIPS_R_T6; ++ u8 t2 = MIPS_R_T7; ++ ++ switch (op) { ++ /* No-op, used internally for branch optimization */ ++ case JIT_JNOP: ++ break; ++ /* PC += off if dst == src */ ++ /* PC += off if dst != src */ ++ case BPF_JEQ: ++ case BPF_JNE: ++ emit(ctx, subu, t1, lo(dst), lo(src)); ++ emit(ctx, subu, t2, hi(dst), hi(src)); ++ emit(ctx, or, t1, t1, t2); ++ if (op == BPF_JEQ) ++ emit(ctx, beqz, t1, off); ++ else /* BPF_JNE */ ++ emit(ctx, bnez, t1, off); ++ break; ++ /* PC += off if dst & src */ ++ /* PC += off if (dst & imm) == 0 (not in BPF, used for long jumps) */ ++ case BPF_JSET: ++ case JIT_JNSET: ++ emit(ctx, and, t1, lo(dst), lo(src)); ++ emit(ctx, and, t2, hi(dst), hi(src)); ++ emit(ctx, or, t1, t1, t2); ++ if (op == BPF_JSET) ++ emit(ctx, bnez, t1, off); ++ else /* JIT_JNSET */ ++ emit(ctx, beqz, t1, off); ++ break; ++ /* PC += off if dst > src */ ++ case BPF_JGT: ++ emit_sltu_r64(ctx, t1, src, dst); ++ emit(ctx, bnez, t1, off); ++ break; ++ /* PC += off if dst >= src */ ++ case BPF_JGE: ++ emit_sltu_r64(ctx, t1, dst, src); ++ emit(ctx, beqz, t1, off); ++ break; ++ /* PC += off if dst < src */ ++ case BPF_JLT: ++ emit_sltu_r64(ctx, t1, dst, src); ++ emit(ctx, bnez, t1, off); ++ break; ++ /* PC += off if dst <= src */ ++ case BPF_JLE: ++ emit_sltu_r64(ctx, t1, src, dst); ++ emit(ctx, beqz, t1, off); ++ break; ++ /* PC += off if dst > src (signed) */ ++ case BPF_JSGT: ++ emit_slt_r64(ctx, t1, src, dst); ++ emit(ctx, bnez, t1, off); ++ break; ++ /* PC += off if dst >= src (signed) */ ++ case BPF_JSGE: ++ emit_slt_r64(ctx, t1, dst, src); ++ emit(ctx, beqz, t1, off); ++ break; ++ /* PC += off if dst < src (signed) */ ++ case BPF_JSLT: ++ emit_slt_r64(ctx, t1, dst, src); ++ emit(ctx, bnez, t1, off); ++ break; ++ /* PC += off if dst <= src (signed) */ ++ case BPF_JSLE: ++ emit_slt_r64(ctx, t1, src, dst); ++ emit(ctx, beqz, t1, off); ++ break; ++ } ++} ++ ++/* Function call */ ++static int emit_call(struct jit_context *ctx, const struct bpf_insn *insn) ++{ ++ bool fixed; ++ u64 addr; ++ ++ /* Decode the call address */ ++ if (bpf_jit_get_func_addr(ctx->program, insn, false, ++ &addr, &fixed) < 0) ++ return -1; ++ if (!fixed) ++ return -1; ++ ++ /* Push stack arguments */ ++ push_regs(ctx, JIT_STACK_REGS, 0, JIT_RESERVED_STACK); ++ ++ /* Emit function call */ ++ emit_mov_i(ctx, MIPS_R_T9, addr); ++ emit(ctx, jalr, MIPS_R_RA, MIPS_R_T9); ++ emit(ctx, nop); /* Delay slot */ ++ ++ clobber_reg(ctx, MIPS_R_RA); ++ clobber_reg(ctx, MIPS_R_V0); ++ clobber_reg(ctx, MIPS_R_V1); ++ return 0; ++} ++ ++/* Function tail call */ ++static int emit_tail_call(struct jit_context *ctx) ++{ ++ u8 ary = lo(bpf2mips32[BPF_REG_2]); ++ u8 ind = lo(bpf2mips32[BPF_REG_3]); ++ u8 t1 = MIPS_R_T8; ++ u8 t2 = MIPS_R_T9; ++ int off; ++ ++ /* ++ * Tail call: ++ * eBPF R1 - function argument (context ptr), passed in a0-a1 ++ * eBPF R2 - ptr to object with array of function entry points ++ * eBPF R3 - array index of function to be called ++ * stack[sz] - remaining tail call count, initialized in prologue ++ */ ++ ++ /* if (ind >= ary->map.max_entries) goto out */ ++ off = offsetof(struct bpf_array, map.max_entries); ++ if (off > 0x7fff) ++ return -1; ++ emit(ctx, lw, t1, off, ary); /* t1 = ary->map.max_entries*/ ++ emit_load_delay(ctx); /* Load delay slot */ ++ emit(ctx, sltu, t1, ind, t1); /* t1 = ind < t1 */ ++ emit(ctx, beqz, t1, get_offset(ctx, 1)); /* PC += off(1) if t1 == 0 */ ++ /* (next insn delay slot) */ ++ /* if (TCC-- <= 0) goto out */ ++ emit(ctx, lw, t2, ctx->stack_size, MIPS_R_SP); /* t2 = *(SP + size) */ ++ emit_load_delay(ctx); /* Load delay slot */ ++ emit(ctx, blez, t2, get_offset(ctx, 1)); /* PC += off(1) if t2 < 0 */ ++ emit(ctx, addiu, t2, t2, -1); /* t2-- (delay slot) */ ++ emit(ctx, sw, t2, ctx->stack_size, MIPS_R_SP); /* *(SP + size) = t2 */ ++ ++ /* prog = ary->ptrs[ind] */ ++ off = offsetof(struct bpf_array, ptrs); ++ if (off > 0x7fff) ++ return -1; ++ emit(ctx, sll, t1, ind, 2); /* t1 = ind << 2 */ ++ emit(ctx, addu, t1, t1, ary); /* t1 += ary */ ++ emit(ctx, lw, t2, off, t1); /* t2 = *(t1 + off) */ ++ emit_load_delay(ctx); /* Load delay slot */ ++ ++ /* if (prog == 0) goto out */ ++ emit(ctx, beqz, t2, get_offset(ctx, 1)); /* PC += off(1) if t2 == 0 */ ++ emit(ctx, nop); /* Delay slot */ ++ ++ /* func = prog->bpf_func + 8 (prologue skip offset) */ ++ off = offsetof(struct bpf_prog, bpf_func); ++ if (off > 0x7fff) ++ return -1; ++ emit(ctx, lw, t1, off, t2); /* t1 = *(t2 + off) */ ++ emit_load_delay(ctx); /* Load delay slot */ ++ emit(ctx, addiu, t1, t1, JIT_TCALL_SKIP); /* t1 += skip (8 or 12) */ ++ ++ /* goto func */ ++ build_epilogue(ctx, t1); ++ return 0; ++} ++ ++/* ++ * Stack frame layout for a JITed program (stack grows down). ++ * ++ * Higher address : Caller's stack frame : ++ * :----------------------------: ++ * : 64-bit eBPF args r3-r5 : ++ * :----------------------------: ++ * : Reserved / tail call count : ++ * +============================+ <--- MIPS sp before call ++ * | Callee-saved registers, | ++ * | including RA and FP | ++ * +----------------------------+ <--- eBPF FP (MIPS zero,fp) ++ * | Local eBPF variables | ++ * | allocated by program | ++ * +----------------------------+ ++ * | Reserved for caller-saved | ++ * | registers | ++ * +----------------------------+ ++ * | Reserved for 64-bit eBPF | ++ * | args r3-r5 & args passed | ++ * | on stack in kernel calls | ++ * Lower address +============================+ <--- MIPS sp ++ */ ++ ++/* Build program prologue to set up the stack and registers */ ++void build_prologue(struct jit_context *ctx) ++{ ++ const u8 *r1 = bpf2mips32[BPF_REG_1]; ++ const u8 *fp = bpf2mips32[BPF_REG_FP]; ++ int stack, saved, locals, reserved; ++ ++ /* ++ * The first two instructions initialize TCC in the reserved (for us) ++ * 16-byte area in the parent's stack frame. On a tail call, the ++ * calling function jumps into the prologue after these instructions. ++ */ ++ emit(ctx, ori, MIPS_R_T9, MIPS_R_ZERO, ++ min(MAX_TAIL_CALL_CNT + 1, 0xffff)); ++ emit(ctx, sw, MIPS_R_T9, 0, MIPS_R_SP); ++ ++ /* ++ * Register eBPF R1 contains the 32-bit context pointer argument. ++ * A 32-bit argument is always passed in MIPS register a0, regardless ++ * of CPU endianness. Initialize R1 accordingly and zero-extend. ++ */ ++#ifdef __BIG_ENDIAN ++ emit(ctx, move, lo(r1), MIPS_R_A0); ++#endif ++ ++ /* === Entry-point for tail calls === */ ++ ++ /* Zero-extend the 32-bit argument */ ++ emit(ctx, move, hi(r1), MIPS_R_ZERO); ++ ++ /* If the eBPF frame pointer was accessed it must be saved */ ++ if (ctx->accessed & BIT(BPF_REG_FP)) ++ clobber_reg64(ctx, fp); ++ ++ /* Compute the stack space needed for callee-saved registers */ ++ saved = hweight32(ctx->clobbered & JIT_CALLEE_REGS) * sizeof(u32); ++ saved = ALIGN(saved, MIPS_STACK_ALIGNMENT); ++ ++ /* Stack space used by eBPF program local data */ ++ locals = ALIGN(ctx->program->aux->stack_depth, MIPS_STACK_ALIGNMENT); ++ ++ /* ++ * If we are emitting function calls, reserve extra stack space for ++ * caller-saved registers and function arguments passed on the stack. ++ * The required space is computed automatically during resource ++ * usage discovery (pass 1). ++ */ ++ reserved = ctx->stack_used; ++ ++ /* Allocate the stack frame */ ++ stack = ALIGN(saved + locals + reserved, MIPS_STACK_ALIGNMENT); ++ emit(ctx, addiu, MIPS_R_SP, MIPS_R_SP, -stack); ++ ++ /* Store callee-saved registers on stack */ ++ push_regs(ctx, ctx->clobbered & JIT_CALLEE_REGS, 0, stack - saved); ++ ++ /* Initialize the eBPF frame pointer if accessed */ ++ if (ctx->accessed & BIT(BPF_REG_FP)) ++ emit(ctx, addiu, lo(fp), MIPS_R_SP, stack - saved); ++ ++ ctx->saved_size = saved; ++ ctx->stack_size = stack; ++} ++ ++/* Build the program epilogue to restore the stack and registers */ ++void build_epilogue(struct jit_context *ctx, int dest_reg) ++{ ++ /* Restore callee-saved registers from stack */ ++ pop_regs(ctx, ctx->clobbered & JIT_CALLEE_REGS, 0, ++ ctx->stack_size - ctx->saved_size); ++ /* ++ * A 32-bit return value is always passed in MIPS register v0, ++ * but on big-endian targets the low part of R0 is mapped to v1. ++ */ ++#ifdef __BIG_ENDIAN ++ emit(ctx, move, MIPS_R_V0, MIPS_R_V1); ++#endif ++ ++ /* Jump to the return address and adjust the stack pointer */ ++ emit(ctx, jr, dest_reg); ++ emit(ctx, addiu, MIPS_R_SP, MIPS_R_SP, ctx->stack_size); ++} ++ ++/* Build one eBPF instruction */ ++int build_insn(const struct bpf_insn *insn, struct jit_context *ctx) ++{ ++ const u8 *dst = bpf2mips32[insn->dst_reg]; ++ const u8 *src = bpf2mips32[insn->src_reg]; ++ const u8 *tmp = bpf2mips32[JIT_REG_TMP]; ++ u8 code = insn->code; ++ s16 off = insn->off; ++ s32 imm = insn->imm; ++ s32 val, rel; ++ u8 alu, jmp; ++ ++ switch (code) { ++ /* ALU operations */ ++ /* dst = imm */ ++ case BPF_ALU | BPF_MOV | BPF_K: ++ emit_mov_i(ctx, lo(dst), imm); ++ emit_zext_ver(ctx, dst); ++ break; ++ /* dst = src */ ++ case BPF_ALU | BPF_MOV | BPF_X: ++ if (imm == 1) { ++ /* Special mov32 for zext */ ++ emit_mov_i(ctx, hi(dst), 0); ++ } else { ++ emit_mov_r(ctx, lo(dst), lo(src)); ++ emit_zext_ver(ctx, dst); ++ } ++ break; ++ /* dst = -dst */ ++ case BPF_ALU | BPF_NEG: ++ emit_alu_i(ctx, lo(dst), 0, BPF_NEG); ++ emit_zext_ver(ctx, dst); ++ break; ++ /* dst = dst & imm */ ++ /* dst = dst | imm */ ++ /* dst = dst ^ imm */ ++ /* dst = dst << imm */ ++ /* dst = dst >> imm */ ++ /* dst = dst >> imm (arithmetic) */ ++ /* dst = dst + imm */ ++ /* dst = dst - imm */ ++ /* dst = dst * imm */ ++ /* dst = dst / imm */ ++ /* dst = dst % imm */ ++ case BPF_ALU | BPF_OR | BPF_K: ++ case BPF_ALU | BPF_AND | BPF_K: ++ case BPF_ALU | BPF_XOR | BPF_K: ++ case BPF_ALU | BPF_LSH | BPF_K: ++ case BPF_ALU | BPF_RSH | BPF_K: ++ case BPF_ALU | BPF_ARSH | BPF_K: ++ case BPF_ALU | BPF_ADD | BPF_K: ++ case BPF_ALU | BPF_SUB | BPF_K: ++ case BPF_ALU | BPF_MUL | BPF_K: ++ case BPF_ALU | BPF_DIV | BPF_K: ++ case BPF_ALU | BPF_MOD | BPF_K: ++ if (!valid_alu_i(BPF_OP(code), imm)) { ++ emit_mov_i(ctx, MIPS_R_T6, imm); ++ emit_alu_r(ctx, lo(dst), MIPS_R_T6, BPF_OP(code)); ++ } else if (rewrite_alu_i(BPF_OP(code), imm, &alu, &val)) { ++ emit_alu_i(ctx, lo(dst), val, alu); ++ } ++ emit_zext_ver(ctx, dst); ++ break; ++ /* dst = dst & src */ ++ /* dst = dst | src */ ++ /* dst = dst ^ src */ ++ /* dst = dst << src */ ++ /* dst = dst >> src */ ++ /* dst = dst >> src (arithmetic) */ ++ /* dst = dst + src */ ++ /* dst = dst - src */ ++ /* dst = dst * src */ ++ /* dst = dst / src */ ++ /* dst = dst % src */ ++ case BPF_ALU | BPF_AND | BPF_X: ++ case BPF_ALU | BPF_OR | BPF_X: ++ case BPF_ALU | BPF_XOR | BPF_X: ++ case BPF_ALU | BPF_LSH | BPF_X: ++ case BPF_ALU | BPF_RSH | BPF_X: ++ case BPF_ALU | BPF_ARSH | BPF_X: ++ case BPF_ALU | BPF_ADD | BPF_X: ++ case BPF_ALU | BPF_SUB | BPF_X: ++ case BPF_ALU | BPF_MUL | BPF_X: ++ case BPF_ALU | BPF_DIV | BPF_X: ++ case BPF_ALU | BPF_MOD | BPF_X: ++ emit_alu_r(ctx, lo(dst), lo(src), BPF_OP(code)); ++ emit_zext_ver(ctx, dst); ++ break; ++ /* dst = imm (64-bit) */ ++ case BPF_ALU64 | BPF_MOV | BPF_K: ++ emit_mov_se_i64(ctx, dst, imm); ++ break; ++ /* dst = src (64-bit) */ ++ case BPF_ALU64 | BPF_MOV | BPF_X: ++ emit_mov_r(ctx, lo(dst), lo(src)); ++ emit_mov_r(ctx, hi(dst), hi(src)); ++ break; ++ /* dst = -dst (64-bit) */ ++ case BPF_ALU64 | BPF_NEG: ++ emit_neg_i64(ctx, dst); ++ break; ++ /* dst = dst & imm (64-bit) */ ++ case BPF_ALU64 | BPF_AND | BPF_K: ++ emit_alu_i64(ctx, dst, imm, BPF_OP(code)); ++ break; ++ /* dst = dst | imm (64-bit) */ ++ /* dst = dst ^ imm (64-bit) */ ++ /* dst = dst + imm (64-bit) */ ++ /* dst = dst - imm (64-bit) */ ++ case BPF_ALU64 | BPF_OR | BPF_K: ++ case BPF_ALU64 | BPF_XOR | BPF_K: ++ case BPF_ALU64 | BPF_ADD | BPF_K: ++ case BPF_ALU64 | BPF_SUB | BPF_K: ++ if (imm) ++ emit_alu_i64(ctx, dst, imm, BPF_OP(code)); ++ break; ++ /* dst = dst << imm (64-bit) */ ++ /* dst = dst >> imm (64-bit) */ ++ /* dst = dst >> imm (64-bit, arithmetic) */ ++ case BPF_ALU64 | BPF_LSH | BPF_K: ++ case BPF_ALU64 | BPF_RSH | BPF_K: ++ case BPF_ALU64 | BPF_ARSH | BPF_K: ++ if (imm) ++ emit_shift_i64(ctx, dst, imm, BPF_OP(code)); ++ break; ++ /* dst = dst * imm (64-bit) */ ++ case BPF_ALU64 | BPF_MUL | BPF_K: ++ emit_mul_i64(ctx, dst, imm); ++ break; ++ /* dst = dst / imm (64-bit) */ ++ /* dst = dst % imm (64-bit) */ ++ case BPF_ALU64 | BPF_DIV | BPF_K: ++ case BPF_ALU64 | BPF_MOD | BPF_K: ++ /* ++ * Sign-extend the immediate value into a temporary register, ++ * and then do the operation on this register. ++ */ ++ emit_mov_se_i64(ctx, tmp, imm); ++ emit_divmod_r64(ctx, dst, tmp, BPF_OP(code)); ++ break; ++ /* dst = dst & src (64-bit) */ ++ /* dst = dst | src (64-bit) */ ++ /* dst = dst ^ src (64-bit) */ ++ /* dst = dst + src (64-bit) */ ++ /* dst = dst - src (64-bit) */ ++ case BPF_ALU64 | BPF_AND | BPF_X: ++ case BPF_ALU64 | BPF_OR | BPF_X: ++ case BPF_ALU64 | BPF_XOR | BPF_X: ++ case BPF_ALU64 | BPF_ADD | BPF_X: ++ case BPF_ALU64 | BPF_SUB | BPF_X: ++ emit_alu_r64(ctx, dst, src, BPF_OP(code)); ++ break; ++ /* dst = dst << src (64-bit) */ ++ /* dst = dst >> src (64-bit) */ ++ /* dst = dst >> src (64-bit, arithmetic) */ ++ case BPF_ALU64 | BPF_LSH | BPF_X: ++ case BPF_ALU64 | BPF_RSH | BPF_X: ++ case BPF_ALU64 | BPF_ARSH | BPF_X: ++ emit_shift_r64(ctx, dst, lo(src), BPF_OP(code)); ++ break; ++ /* dst = dst * src (64-bit) */ ++ case BPF_ALU64 | BPF_MUL | BPF_X: ++ emit_mul_r64(ctx, dst, src); ++ break; ++ /* dst = dst / src (64-bit) */ ++ /* dst = dst % src (64-bit) */ ++ case BPF_ALU64 | BPF_DIV | BPF_X: ++ case BPF_ALU64 | BPF_MOD | BPF_X: ++ emit_divmod_r64(ctx, dst, src, BPF_OP(code)); ++ break; ++ /* dst = htole(dst) */ ++ /* dst = htobe(dst) */ ++ case BPF_ALU | BPF_END | BPF_FROM_LE: ++ case BPF_ALU | BPF_END | BPF_FROM_BE: ++ if (BPF_SRC(code) == ++#ifdef __BIG_ENDIAN ++ BPF_FROM_LE ++#else ++ BPF_FROM_BE ++#endif ++ ) ++ emit_bswap_r64(ctx, dst, imm); ++ else ++ emit_trunc_r64(ctx, dst, imm); ++ break; ++ /* dst = imm64 */ ++ case BPF_LD | BPF_IMM | BPF_DW: ++ emit_mov_i(ctx, lo(dst), imm); ++ emit_mov_i(ctx, hi(dst), insn[1].imm); ++ return 1; ++ /* LDX: dst = *(size *)(src + off) */ ++ case BPF_LDX | BPF_MEM | BPF_W: ++ case BPF_LDX | BPF_MEM | BPF_H: ++ case BPF_LDX | BPF_MEM | BPF_B: ++ case BPF_LDX | BPF_MEM | BPF_DW: ++ emit_ldx(ctx, dst, lo(src), off, BPF_SIZE(code)); ++ break; ++ /* ST: *(size *)(dst + off) = imm */ ++ case BPF_ST | BPF_MEM | BPF_W: ++ case BPF_ST | BPF_MEM | BPF_H: ++ case BPF_ST | BPF_MEM | BPF_B: ++ case BPF_ST | BPF_MEM | BPF_DW: ++ switch (BPF_SIZE(code)) { ++ case BPF_DW: ++ /* Sign-extend immediate value into temporary reg */ ++ emit_mov_se_i64(ctx, tmp, imm); ++ break; ++ case BPF_W: ++ case BPF_H: ++ case BPF_B: ++ emit_mov_i(ctx, lo(tmp), imm); ++ break; ++ } ++ emit_stx(ctx, lo(dst), tmp, off, BPF_SIZE(code)); ++ break; ++ /* STX: *(size *)(dst + off) = src */ ++ case BPF_STX | BPF_MEM | BPF_W: ++ case BPF_STX | BPF_MEM | BPF_H: ++ case BPF_STX | BPF_MEM | BPF_B: ++ case BPF_STX | BPF_MEM | BPF_DW: ++ emit_stx(ctx, lo(dst), src, off, BPF_SIZE(code)); ++ break; ++ /* Speculation barrier */ ++ case BPF_ST | BPF_NOSPEC: ++ break; ++ /* Atomics */ ++ case BPF_STX | BPF_XADD | BPF_W: ++ switch (imm) { ++ case BPF_ADD: ++ case BPF_AND: ++ case BPF_OR: ++ case BPF_XOR: ++ if (cpu_has_llsc) ++ emit_atomic_r(ctx, lo(dst), lo(src), off, imm); ++ else /* Non-ll/sc fallback */ ++ emit_atomic_r32(ctx, lo(dst), lo(src), ++ off, imm); ++ break; ++ default: ++ goto notyet; ++ } ++ break; ++ /* Atomics (64-bit) */ ++ case BPF_STX | BPF_XADD | BPF_DW: ++ switch (imm) { ++ case BPF_ADD: ++ case BPF_AND: ++ case BPF_OR: ++ case BPF_XOR: ++ emit_atomic_r64(ctx, lo(dst), src, off, imm); ++ break; ++ default: ++ goto notyet; ++ } ++ break; ++ /* PC += off if dst == src */ ++ /* PC += off if dst != src */ ++ /* PC += off if dst & src */ ++ /* PC += off if dst > src */ ++ /* PC += off if dst >= src */ ++ /* PC += off if dst < src */ ++ /* PC += off if dst <= src */ ++ /* PC += off if dst > src (signed) */ ++ /* PC += off if dst >= src (signed) */ ++ /* PC += off if dst < src (signed) */ ++ /* PC += off if dst <= src (signed) */ ++ case BPF_JMP32 | BPF_JEQ | BPF_X: ++ case BPF_JMP32 | BPF_JNE | BPF_X: ++ case BPF_JMP32 | BPF_JSET | BPF_X: ++ case BPF_JMP32 | BPF_JGT | BPF_X: ++ case BPF_JMP32 | BPF_JGE | BPF_X: ++ case BPF_JMP32 | BPF_JLT | BPF_X: ++ case BPF_JMP32 | BPF_JLE | BPF_X: ++ case BPF_JMP32 | BPF_JSGT | BPF_X: ++ case BPF_JMP32 | BPF_JSGE | BPF_X: ++ case BPF_JMP32 | BPF_JSLT | BPF_X: ++ case BPF_JMP32 | BPF_JSLE | BPF_X: ++ if (off == 0) ++ break; ++ setup_jmp_r(ctx, dst == src, BPF_OP(code), off, &jmp, &rel); ++ emit_jmp_r(ctx, lo(dst), lo(src), rel, jmp); ++ if (finish_jmp(ctx, jmp, off) < 0) ++ goto toofar; ++ break; ++ /* PC += off if dst == imm */ ++ /* PC += off if dst != imm */ ++ /* PC += off if dst & imm */ ++ /* PC += off if dst > imm */ ++ /* PC += off if dst >= imm */ ++ /* PC += off if dst < imm */ ++ /* PC += off if dst <= imm */ ++ /* PC += off if dst > imm (signed) */ ++ /* PC += off if dst >= imm (signed) */ ++ /* PC += off if dst < imm (signed) */ ++ /* PC += off if dst <= imm (signed) */ ++ case BPF_JMP32 | BPF_JEQ | BPF_K: ++ case BPF_JMP32 | BPF_JNE | BPF_K: ++ case BPF_JMP32 | BPF_JSET | BPF_K: ++ case BPF_JMP32 | BPF_JGT | BPF_K: ++ case BPF_JMP32 | BPF_JGE | BPF_K: ++ case BPF_JMP32 | BPF_JLT | BPF_K: ++ case BPF_JMP32 | BPF_JLE | BPF_K: ++ case BPF_JMP32 | BPF_JSGT | BPF_K: ++ case BPF_JMP32 | BPF_JSGE | BPF_K: ++ case BPF_JMP32 | BPF_JSLT | BPF_K: ++ case BPF_JMP32 | BPF_JSLE | BPF_K: ++ if (off == 0) ++ break; ++ setup_jmp_i(ctx, imm, 32, BPF_OP(code), off, &jmp, &rel); ++ if (valid_jmp_i(jmp, imm)) { ++ emit_jmp_i(ctx, lo(dst), imm, rel, jmp); ++ } else { ++ /* Move large immediate to register */ ++ emit_mov_i(ctx, MIPS_R_T6, imm); ++ emit_jmp_r(ctx, lo(dst), MIPS_R_T6, rel, jmp); ++ } ++ if (finish_jmp(ctx, jmp, off) < 0) ++ goto toofar; ++ break; ++ /* PC += off if dst == src */ ++ /* PC += off if dst != src */ ++ /* PC += off if dst & src */ ++ /* PC += off if dst > src */ ++ /* PC += off if dst >= src */ ++ /* PC += off if dst < src */ ++ /* PC += off if dst <= src */ ++ /* PC += off if dst > src (signed) */ ++ /* PC += off if dst >= src (signed) */ ++ /* PC += off if dst < src (signed) */ ++ /* PC += off if dst <= src (signed) */ ++ case BPF_JMP | BPF_JEQ | BPF_X: ++ case BPF_JMP | BPF_JNE | BPF_X: ++ case BPF_JMP | BPF_JSET | BPF_X: ++ case BPF_JMP | BPF_JGT | BPF_X: ++ case BPF_JMP | BPF_JGE | BPF_X: ++ case BPF_JMP | BPF_JLT | BPF_X: ++ case BPF_JMP | BPF_JLE | BPF_X: ++ case BPF_JMP | BPF_JSGT | BPF_X: ++ case BPF_JMP | BPF_JSGE | BPF_X: ++ case BPF_JMP | BPF_JSLT | BPF_X: ++ case BPF_JMP | BPF_JSLE | BPF_X: ++ if (off == 0) ++ break; ++ setup_jmp_r(ctx, dst == src, BPF_OP(code), off, &jmp, &rel); ++ emit_jmp_r64(ctx, dst, src, rel, jmp); ++ if (finish_jmp(ctx, jmp, off) < 0) ++ goto toofar; ++ break; ++ /* PC += off if dst == imm */ ++ /* PC += off if dst != imm */ ++ /* PC += off if dst & imm */ ++ /* PC += off if dst > imm */ ++ /* PC += off if dst >= imm */ ++ /* PC += off if dst < imm */ ++ /* PC += off if dst <= imm */ ++ /* PC += off if dst > imm (signed) */ ++ /* PC += off if dst >= imm (signed) */ ++ /* PC += off if dst < imm (signed) */ ++ /* PC += off if dst <= imm (signed) */ ++ case BPF_JMP | BPF_JEQ | BPF_K: ++ case BPF_JMP | BPF_JNE | BPF_K: ++ case BPF_JMP | BPF_JSET | BPF_K: ++ case BPF_JMP | BPF_JGT | BPF_K: ++ case BPF_JMP | BPF_JGE | BPF_K: ++ case BPF_JMP | BPF_JLT | BPF_K: ++ case BPF_JMP | BPF_JLE | BPF_K: ++ case BPF_JMP | BPF_JSGT | BPF_K: ++ case BPF_JMP | BPF_JSGE | BPF_K: ++ case BPF_JMP | BPF_JSLT | BPF_K: ++ case BPF_JMP | BPF_JSLE | BPF_K: ++ if (off == 0) ++ break; ++ setup_jmp_i(ctx, imm, 64, BPF_OP(code), off, &jmp, &rel); ++ emit_jmp_i64(ctx, dst, imm, rel, jmp); ++ if (finish_jmp(ctx, jmp, off) < 0) ++ goto toofar; ++ break; ++ /* PC += off */ ++ case BPF_JMP | BPF_JA: ++ if (off == 0) ++ break; ++ if (emit_ja(ctx, off) < 0) ++ goto toofar; ++ break; ++ /* Tail call */ ++ case BPF_JMP | BPF_TAIL_CALL: ++ if (emit_tail_call(ctx) < 0) ++ goto invalid; ++ break; ++ /* Function call */ ++ case BPF_JMP | BPF_CALL: ++ if (emit_call(ctx, insn) < 0) ++ goto invalid; ++ break; ++ /* Function return */ ++ case BPF_JMP | BPF_EXIT: ++ /* ++ * Optimization: when last instruction is EXIT ++ * simply continue to epilogue. ++ */ ++ if (ctx->bpf_index == ctx->program->len - 1) ++ break; ++ if (emit_exit(ctx) < 0) ++ goto toofar; ++ break; ++ ++ default: ++invalid: ++ pr_err_once("unknown opcode %02x\n", code); ++ return -EINVAL; ++notyet: ++ pr_info_once("*** NOT YET: opcode %02x ***\n", code); ++ return -EFAULT; ++toofar: ++ pr_info_once("*** TOO FAR: jump at %u opcode %02x ***\n", ++ ctx->bpf_index, code); ++ return -E2BIG; ++ } ++ return 0; ++} diff --git a/target/linux/generic/backport-5.15/050-v5.16-03-mips-bpf-Add-new-eBPF-JIT-for-64-bit-MIPS.patch b/target/linux/generic/backport-5.15/050-v5.16-03-mips-bpf-Add-new-eBPF-JIT-for-64-bit-MIPS.patch new file mode 100644 index 0000000000..38b46c0b76 --- /dev/null +++ b/target/linux/generic/backport-5.15/050-v5.16-03-mips-bpf-Add-new-eBPF-JIT-for-64-bit-MIPS.patch @@ -0,0 +1,1005 @@ +From: Johan Almbladh <johan.almbladh@anyfinetworks.com> +Date: Tue, 5 Oct 2021 18:54:05 +0200 +Subject: [PATCH] mips: bpf: Add new eBPF JIT for 64-bit MIPS + +This is an implementation on of an eBPF JIT for 64-bit MIPS III-V and +MIPS64r1-r6. It uses the same framework introduced by the 32-bit JIT. + +Signed-off-by: Johan Almbladh <johan.almbladh@anyfinetworks.com> +--- + create mode 100644 arch/mips/net/bpf_jit_comp64.c + +--- /dev/null ++++ b/arch/mips/net/bpf_jit_comp64.c +@@ -0,0 +1,991 @@ ++// SPDX-License-Identifier: GPL-2.0-only ++/* ++ * Just-In-Time compiler for eBPF bytecode on MIPS. ++ * Implementation of JIT functions for 64-bit CPUs. ++ * ++ * Copyright (c) 2021 Anyfi Networks AB. ++ * Author: Johan Almbladh <johan.almbladh@gmail.com> ++ * ++ * Based on code and ideas from ++ * Copyright (c) 2017 Cavium, Inc. ++ * Copyright (c) 2017 Shubham Bansal <illusionist.neo@gmail.com> ++ * Copyright (c) 2011 Mircea Gherzan <mgherzan@gmail.com> ++ */ ++ ++#include <linux/errno.h> ++#include <linux/filter.h> ++#include <linux/bpf.h> ++#include <asm/cpu-features.h> ++#include <asm/isa-rev.h> ++#include <asm/uasm.h> ++ ++#include "bpf_jit_comp.h" ++ ++/* MIPS t0-t3 are not available in the n64 ABI */ ++#undef MIPS_R_T0 ++#undef MIPS_R_T1 ++#undef MIPS_R_T2 ++#undef MIPS_R_T3 ++ ++/* Stack is 16-byte aligned in n64 ABI */ ++#define MIPS_STACK_ALIGNMENT 16 ++ ++/* Extra 64-bit eBPF registers used by JIT */ ++#define JIT_REG_TC (MAX_BPF_JIT_REG + 0) ++#define JIT_REG_ZX (MAX_BPF_JIT_REG + 1) ++ ++/* Number of prologue bytes to skip when doing a tail call */ ++#define JIT_TCALL_SKIP 4 ++ ++/* Callee-saved CPU registers that the JIT must preserve */ ++#define JIT_CALLEE_REGS \ ++ (BIT(MIPS_R_S0) | \ ++ BIT(MIPS_R_S1) | \ ++ BIT(MIPS_R_S2) | \ ++ BIT(MIPS_R_S3) | \ ++ BIT(MIPS_R_S4) | \ ++ BIT(MIPS_R_S5) | \ ++ BIT(MIPS_R_S6) | \ ++ BIT(MIPS_R_S7) | \ ++ BIT(MIPS_R_GP) | \ ++ BIT(MIPS_R_FP) | \ ++ BIT(MIPS_R_RA)) ++ ++/* Caller-saved CPU registers available for JIT use */ ++#define JIT_CALLER_REGS \ ++ (BIT(MIPS_R_A5) | \ ++ BIT(MIPS_R_A6) | \ ++ BIT(MIPS_R_A7)) ++/* ++ * Mapping of 64-bit eBPF registers to 64-bit native MIPS registers. ++ * MIPS registers t4 - t7 may be used by the JIT as temporary registers. ++ * MIPS registers t8 - t9 are reserved for single-register common functions. ++ */ ++static const u8 bpf2mips64[] = { ++ /* Return value from in-kernel function, and exit value from eBPF */ ++ [BPF_REG_0] = MIPS_R_V0, ++ /* Arguments from eBPF program to in-kernel function */ ++ [BPF_REG_1] = MIPS_R_A0, ++ [BPF_REG_2] = MIPS_R_A1, ++ [BPF_REG_3] = MIPS_R_A2, ++ [BPF_REG_4] = MIPS_R_A3, ++ [BPF_REG_5] = MIPS_R_A4, ++ /* Callee-saved registers that in-kernel function will preserve */ ++ [BPF_REG_6] = MIPS_R_S0, ++ [BPF_REG_7] = MIPS_R_S1, ++ [BPF_REG_8] = MIPS_R_S2, ++ [BPF_REG_9] = MIPS_R_S3, ++ /* Read-only frame pointer to access the eBPF stack */ ++ [BPF_REG_FP] = MIPS_R_FP, ++ /* Temporary register for blinding constants */ ++ [BPF_REG_AX] = MIPS_R_AT, ++ /* Tail call count register, caller-saved */ ++ [JIT_REG_TC] = MIPS_R_A5, ++ /* Constant for register zero-extension */ ++ [JIT_REG_ZX] = MIPS_R_V1, ++}; ++ ++/* ++ * MIPS 32-bit operations on 64-bit registers generate a sign-extended ++ * result. However, the eBPF ISA mandates zero-extension, so we rely on the ++ * verifier to add that for us (emit_zext_ver). In addition, ALU arithmetic ++ * operations, right shift and byte swap require properly sign-extended ++ * operands or the result is unpredictable. We emit explicit sign-extensions ++ * in those cases. ++ */ ++ ++/* Sign extension */ ++static void emit_sext(struct jit_context *ctx, u8 dst, u8 src) ++{ ++ emit(ctx, sll, dst, src, 0); ++ clobber_reg(ctx, dst); ++} ++ ++/* Zero extension */ ++static void emit_zext(struct jit_context *ctx, u8 dst) ++{ ++ if (cpu_has_mips64r2 || cpu_has_mips64r6) { ++ emit(ctx, dinsu, dst, MIPS_R_ZERO, 32, 32); ++ } else { ++ emit(ctx, and, dst, dst, bpf2mips64[JIT_REG_ZX]); ++ access_reg(ctx, JIT_REG_ZX); /* We need the ZX register */ ++ } ++ clobber_reg(ctx, dst); ++} ++ ++/* Zero extension, if verifier does not do it for us */ ++static void emit_zext_ver(struct jit_context *ctx, u8 dst) ++{ ++ if (!ctx->program->aux->verifier_zext) ++ emit_zext(ctx, dst); ++} ++ ++/* dst = imm (64-bit) */ ++static void emit_mov_i64(struct jit_context *ctx, u8 dst, u64 imm64) ++{ ++ if (imm64 >= 0xffffffffffff8000ULL || imm64 < 0x8000ULL) { ++ emit(ctx, daddiu, dst, MIPS_R_ZERO, (s16)imm64); ++ } else if (imm64 >= 0xffffffff80000000ULL || ++ (imm64 < 0x80000000 && imm64 > 0xffff)) { ++ emit(ctx, lui, dst, (s16)(imm64 >> 16)); ++ emit(ctx, ori, dst, dst, (u16)imm64 & 0xffff); ++ } else { ++ u8 acc = MIPS_R_ZERO; ++ int k; ++ ++ for (k = 0; k < 4; k++) { ++ u16 half = imm64 >> (48 - 16 * k); ++ ++ if (acc == dst) ++ emit(ctx, dsll, dst, dst, 16); ++ ++ if (half) { ++ emit(ctx, ori, dst, acc, half); ++ acc = dst; ++ } ++ } ++ } ++ clobber_reg(ctx, dst); ++} ++ ++/* ALU immediate operation (64-bit) */ ++static void emit_alu_i64(struct jit_context *ctx, u8 dst, s32 imm, u8 op) ++{ ++ switch (BPF_OP(op)) { ++ /* dst = dst | imm */ ++ case BPF_OR: ++ emit(ctx, ori, dst, dst, (u16)imm); ++ break; ++ /* dst = dst ^ imm */ ++ case BPF_XOR: ++ emit(ctx, xori, dst, dst, (u16)imm); ++ break; ++ /* dst = -dst */ ++ case BPF_NEG: ++ emit(ctx, dsubu, dst, MIPS_R_ZERO, dst); ++ break; ++ /* dst = dst << imm */ ++ case BPF_LSH: ++ emit(ctx, dsll_safe, dst, dst, imm); ++ break; ++ /* dst = dst >> imm */ ++ case BPF_RSH: ++ emit(ctx, dsrl_safe, dst, dst, imm); ++ break; ++ /* dst = dst >> imm (arithmetic) */ ++ case BPF_ARSH: ++ emit(ctx, dsra_safe, dst, dst, imm); ++ break; ++ /* dst = dst + imm */ ++ case BPF_ADD: ++ emit(ctx, daddiu, dst, dst, imm); ++ break; ++ /* dst = dst - imm */ ++ case BPF_SUB: ++ emit(ctx, daddiu, dst, dst, -imm); ++ break; ++ default: ++ /* Width-generic operations */ ++ emit_alu_i(ctx, dst, imm, op); ++ } ++ clobber_reg(ctx, dst); ++} ++ ++/* ALU register operation (64-bit) */ ++static void emit_alu_r64(struct jit_context *ctx, u8 dst, u8 src, u8 op) ++{ ++ switch (BPF_OP(op)) { ++ /* dst = dst << src */ ++ case BPF_LSH: ++ emit(ctx, dsllv, dst, dst, src); ++ break; ++ /* dst = dst >> src */ ++ case BPF_RSH: ++ emit(ctx, dsrlv, dst, dst, src); ++ break; ++ /* dst = dst >> src (arithmetic) */ ++ case BPF_ARSH: ++ emit(ctx, dsrav, dst, dst, src); ++ break; ++ /* dst = dst + src */ ++ case BPF_ADD: ++ emit(ctx, daddu, dst, dst, src); ++ break; ++ /* dst = dst - src */ ++ case BPF_SUB: ++ emit(ctx, dsubu, dst, dst, src); ++ break; ++ /* dst = dst * src */ ++ case BPF_MUL: ++ if (cpu_has_mips64r6) { ++ emit(ctx, dmulu, dst, dst, src); ++ } else { ++ emit(ctx, dmultu, dst, src); ++ emit(ctx, mflo, dst); ++ } ++ break; ++ /* dst = dst / src */ ++ case BPF_DIV: ++ if (cpu_has_mips64r6) { ++ emit(ctx, ddivu_r6, dst, dst, src); ++ } else { ++ emit(ctx, ddivu, dst, src); ++ emit(ctx, mflo, dst); ++ } ++ break; ++ /* dst = dst % src */ ++ case BPF_MOD: ++ if (cpu_has_mips64r6) { ++ emit(ctx, dmodu, dst, dst, src); ++ } else { ++ emit(ctx, ddivu, dst, src); ++ emit(ctx, mfhi, dst); ++ } ++ break; ++ default: ++ /* Width-generic operations */ ++ emit_alu_r(ctx, dst, src, op); ++ } ++ clobber_reg(ctx, dst); ++} ++ ++/* Swap sub words in a register double word */ ++static void emit_swap_r64(struct jit_context *ctx, u8 dst, u8 mask, u32 bits) ++{ ++ u8 tmp = MIPS_R_T9; ++ ++ emit(ctx, and, tmp, dst, mask); /* tmp = dst & mask */ ++ emit(ctx, dsll, tmp, tmp, bits); /* tmp = tmp << bits */ ++ emit(ctx, dsrl, dst, dst, bits); /* dst = dst >> bits */ ++ emit(ctx, and, dst, dst, mask); /* dst = dst & mask */ ++ emit(ctx, or, dst, dst, tmp); /* dst = dst | tmp */ ++} ++ ++/* Swap bytes and truncate a register double word, word or half word */ ++static void emit_bswap_r64(struct jit_context *ctx, u8 dst, u32 width) ++{ ++ switch (width) { ++ /* Swap bytes in a double word */ ++ case 64: ++ if (cpu_has_mips64r2 || cpu_has_mips64r6) { ++ emit(ctx, dsbh, dst, dst); ++ emit(ctx, dshd, dst, dst); ++ } else { ++ u8 t1 = MIPS_R_T6; ++ u8 t2 = MIPS_R_T7; ++ ++ emit(ctx, dsll32, t2, dst, 0); /* t2 = dst << 32 */ ++ emit(ctx, dsrl32, dst, dst, 0); /* dst = dst >> 32 */ ++ emit(ctx, or, dst, dst, t2); /* dst = dst | t2 */ ++ ++ emit(ctx, ori, t2, MIPS_R_ZERO, 0xffff); ++ emit(ctx, dsll32, t1, t2, 0); /* t1 = t2 << 32 */ ++ emit(ctx, or, t1, t1, t2); /* t1 = t1 | t2 */ ++ emit_swap_r64(ctx, dst, t1, 16);/* dst = swap16(dst) */ ++ ++ emit(ctx, lui, t2, 0xff); /* t2 = 0x00ff0000 */ ++ emit(ctx, ori, t2, t2, 0xff); /* t2 = t2 | 0x00ff */ ++ emit(ctx, dsll32, t1, t2, 0); /* t1 = t2 << 32 */ ++ emit(ctx, or, t1, t1, t2); /* t1 = t1 | t2 */ ++ emit_swap_r64(ctx, dst, t1, 8); /* dst = swap8(dst) */ ++ } ++ break; ++ /* Swap bytes in a half word */ ++ /* Swap bytes in a word */ ++ case 32: ++ case 16: ++ emit_sext(ctx, dst, dst); ++ emit_bswap_r(ctx, dst, width); ++ if (cpu_has_mips64r2 || cpu_has_mips64r6) ++ emit_zext(ctx, dst); ++ break; ++ } ++ clobber_reg(ctx, dst); ++} ++ ++/* Truncate a register double word, word or half word */ ++static void emit_trunc_r64(struct jit_context *ctx, u8 dst, u32 width) ++{ ++ switch (width) { ++ case 64: ++ break; ++ /* Zero-extend a word */ ++ case 32: ++ emit_zext(ctx, dst); ++ break; ++ /* Zero-extend a half word */ ++ case 16: ++ emit(ctx, andi, dst, dst, 0xffff); ++ break; ++ } ++ clobber_reg(ctx, dst); ++} ++ ++/* Load operation: dst = *(size*)(src + off) */ ++static void emit_ldx(struct jit_context *ctx, u8 dst, u8 src, s16 off, u8 size) ++{ ++ switch (size) { ++ /* Load a byte */ ++ case BPF_B: ++ emit(ctx, lbu, dst, off, src); ++ break; ++ /* Load a half word */ ++ case BPF_H: ++ emit(ctx, lhu, dst, off, src); ++ break; ++ /* Load a word */ ++ case BPF_W: ++ emit(ctx, lwu, dst, off, src); ++ break; ++ /* Load a double word */ ++ case BPF_DW: ++ emit(ctx, ld, dst, off, src); ++ break; ++ } ++ clobber_reg(ctx, dst); ++} ++ ++/* Store operation: *(size *)(dst + off) = src */ ++static void emit_stx(struct jit_context *ctx, u8 dst, u8 src, s16 off, u8 size) ++{ ++ switch (size) { ++ /* Store a byte */ ++ case BPF_B: ++ emit(ctx, sb, src, off, dst); ++ break; ++ /* Store a half word */ ++ case BPF_H: ++ emit(ctx, sh, src, off, dst); ++ break; ++ /* Store a word */ ++ case BPF_W: ++ emit(ctx, sw, src, off, dst); ++ break; ++ /* Store a double word */ ++ case BPF_DW: ++ emit(ctx, sd, src, off, dst); ++ break; ++ } ++} ++ ++/* Atomic read-modify-write */ ++static void emit_atomic_r64(struct jit_context *ctx, ++ u8 dst, u8 src, s16 off, u8 code) ++{ ++ u8 t1 = MIPS_R_T6; ++ u8 t2 = MIPS_R_T7; ++ ++ emit(ctx, lld, t1, off, dst); ++ switch (code) { ++ case BPF_ADD: ++ emit(ctx, daddu, t2, t1, src); ++ break; ++ case BPF_AND: ++ emit(ctx, and, t2, t1, src); ++ break; ++ case BPF_OR: ++ emit(ctx, or, t2, t1, src); ++ break; ++ case BPF_XOR: ++ emit(ctx, xor, t2, t1, src); ++ break; ++ } ++ emit(ctx, scd, t2, off, dst); ++ emit(ctx, beqz, t2, -16); ++ emit(ctx, nop); /* Delay slot */ ++} ++ ++/* Function call */ ++static int emit_call(struct jit_context *ctx, const struct bpf_insn *insn) ++{ ++ u8 zx = bpf2mips64[JIT_REG_ZX]; ++ u8 tmp = MIPS_R_T6; ++ bool fixed; ++ u64 addr; ++ ++ /* Decode the call address */ ++ if (bpf_jit_get_func_addr(ctx->program, insn, false, ++ &addr, &fixed) < 0) ++ return -1; ++ if (!fixed) ++ return -1; ++ ++ /* Push caller-saved registers on stack */ ++ push_regs(ctx, ctx->clobbered & JIT_CALLER_REGS, 0, 0); ++ ++ /* Emit function call */ ++ emit_mov_i64(ctx, tmp, addr); ++ emit(ctx, jalr, MIPS_R_RA, tmp); ++ emit(ctx, nop); /* Delay slot */ ++ ++ /* Restore caller-saved registers */ ++ pop_regs(ctx, ctx->clobbered & JIT_CALLER_REGS, 0, 0); ++ ++ /* Re-initialize the JIT zero-extension register if accessed */ ++ if (ctx->accessed & BIT(JIT_REG_ZX)) { ++ emit(ctx, daddiu, zx, MIPS_R_ZERO, -1); ++ emit(ctx, dsrl32, zx, zx, 0); ++ } ++ ++ clobber_reg(ctx, MIPS_R_RA); ++ clobber_reg(ctx, MIPS_R_V0); ++ clobber_reg(ctx, MIPS_R_V1); ++ return 0; ++} ++ ++/* Function tail call */ ++static int emit_tail_call(struct jit_context *ctx) ++{ ++ u8 ary = bpf2mips64[BPF_REG_2]; ++ u8 ind = bpf2mips64[BPF_REG_3]; ++ u8 tcc = bpf2mips64[JIT_REG_TC]; ++ u8 tmp = MIPS_R_T6; ++ int off; ++ ++ /* ++ * Tail call: ++ * eBPF R1 - function argument (context ptr), passed in a0-a1 ++ * eBPF R2 - ptr to object with array of function entry points ++ * eBPF R3 - array index of function to be called ++ */ ++ ++ /* if (ind >= ary->map.max_entries) goto out */ ++ off = offsetof(struct bpf_array, map.max_entries); ++ if (off > 0x7fff) ++ return -1; ++ emit(ctx, lwu, tmp, off, ary); /* tmp = ary->map.max_entrs*/ ++ emit(ctx, sltu, tmp, ind, tmp); /* tmp = ind < t1 */ ++ emit(ctx, beqz, tmp, get_offset(ctx, 1)); /* PC += off(1) if tmp == 0*/ ++ ++ /* if (--TCC < 0) goto out */ ++ emit(ctx, daddiu, tcc, tcc, -1); /* tcc-- (delay slot) */ ++ emit(ctx, bltz, tcc, get_offset(ctx, 1)); /* PC += off(1) if tcc < 0 */ ++ /* (next insn delay slot) */ ++ /* prog = ary->ptrs[ind] */ ++ off = offsetof(struct bpf_array, ptrs); ++ if (off > 0x7fff) ++ return -1; ++ emit(ctx, dsll, tmp, ind, 3); /* tmp = ind << 3 */ ++ emit(ctx, daddu, tmp, tmp, ary); /* tmp += ary */ ++ emit(ctx, ld, tmp, off, tmp); /* tmp = *(tmp + off) */ ++ ++ /* if (prog == 0) goto out */ ++ emit(ctx, beqz, tmp, get_offset(ctx, 1)); /* PC += off(1) if tmp == 0*/ ++ emit(ctx, nop); /* Delay slot */ ++ ++ /* func = prog->bpf_func + 8 (prologue skip offset) */ ++ off = offsetof(struct bpf_prog, bpf_func); ++ if (off > 0x7fff) ++ return -1; ++ emit(ctx, ld, tmp, off, tmp); /* tmp = *(tmp + off) */ ++ emit(ctx, daddiu, tmp, tmp, JIT_TCALL_SKIP); /* tmp += skip (4) */ ++ ++ /* goto func */ ++ build_epilogue(ctx, tmp); ++ access_reg(ctx, JIT_REG_TC); ++ return 0; ++} ++ ++/* ++ * Stack frame layout for a JITed program (stack grows down). ++ * ++ * Higher address : Previous stack frame : ++ * +===========================+ <--- MIPS sp before call ++ * | Callee-saved registers, | ++ * | including RA and FP | ++ * +---------------------------+ <--- eBPF FP (MIPS fp) ++ * | Local eBPF variables | ++ * | allocated by program | ++ * +---------------------------+ ++ * | Reserved for caller-saved | ++ * | registers | ++ * Lower address +===========================+ <--- MIPS sp ++ */ ++ ++/* Build program prologue to set up the stack and registers */ ++void build_prologue(struct jit_context *ctx) ++{ ++ u8 fp = bpf2mips64[BPF_REG_FP]; ++ u8 tc = bpf2mips64[JIT_REG_TC]; ++ u8 zx = bpf2mips64[JIT_REG_ZX]; ++ int stack, saved, locals, reserved; ++ ++ /* ++ * The first instruction initializes the tail call count register. ++ * On a tail call, the calling function jumps into the prologue ++ * after this instruction. ++ */ ++ emit(ctx, addiu, tc, MIPS_R_ZERO, min(MAX_TAIL_CALL_CNT + 1, 0xffff)); ++ ++ /* === Entry-point for tail calls === */ ++ ++ /* ++ * If the eBPF frame pointer and tail call count registers were ++ * accessed they must be preserved. Mark them as clobbered here ++ * to save and restore them on the stack as needed. ++ */ ++ if (ctx->accessed & BIT(BPF_REG_FP)) ++ clobber_reg(ctx, fp); ++ if (ctx->accessed & BIT(JIT_REG_TC)) ++ clobber_reg(ctx, tc); ++ if (ctx->accessed & BIT(JIT_REG_ZX)) ++ clobber_reg(ctx, zx); ++ ++ /* Compute the stack space needed for callee-saved registers */ ++ saved = hweight32(ctx->clobbered & JIT_CALLEE_REGS) * sizeof(u64); ++ saved = ALIGN(saved, MIPS_STACK_ALIGNMENT); ++ ++ /* Stack space used by eBPF program local data */ ++ locals = ALIGN(ctx->program->aux->stack_depth, MIPS_STACK_ALIGNMENT); ++ ++ /* ++ * If we are emitting function calls, reserve extra stack space for ++ * caller-saved registers needed by the JIT. The required space is ++ * computed automatically during resource usage discovery (pass 1). ++ */ ++ reserved = ctx->stack_used; ++ ++ /* Allocate the stack frame */ ++ stack = ALIGN(saved + locals + reserved, MIPS_STACK_ALIGNMENT); ++ if (stack) ++ emit(ctx, daddiu, MIPS_R_SP, MIPS_R_SP, -stack); ++ ++ /* Store callee-saved registers on stack */ ++ push_regs(ctx, ctx->clobbered & JIT_CALLEE_REGS, 0, stack - saved); ++ ++ /* Initialize the eBPF frame pointer if accessed */ ++ if (ctx->accessed & BIT(BPF_REG_FP)) ++ emit(ctx, daddiu, fp, MIPS_R_SP, stack - saved); ++ ++ /* Initialize the ePF JIT zero-extension register if accessed */ ++ if (ctx->accessed & BIT(JIT_REG_ZX)) { ++ emit(ctx, daddiu, zx, MIPS_R_ZERO, -1); ++ emit(ctx, dsrl32, zx, zx, 0); ++ } ++ ++ ctx->saved_size = saved; ++ ctx->stack_size = stack; ++} ++ ++/* Build the program epilogue to restore the stack and registers */ ++void build_epilogue(struct jit_context *ctx, int dest_reg) ++{ ++ /* Restore callee-saved registers from stack */ ++ pop_regs(ctx, ctx->clobbered & JIT_CALLEE_REGS, 0, ++ ctx->stack_size - ctx->saved_size); ++ ++ /* Release the stack frame */ ++ if (ctx->stack_size) ++ emit(ctx, daddiu, MIPS_R_SP, MIPS_R_SP, ctx->stack_size); ++ ++ /* Jump to return address and sign-extend the 32-bit return value */ ++ emit(ctx, jr, dest_reg); ++ emit(ctx, sll, MIPS_R_V0, MIPS_R_V0, 0); /* Delay slot */ ++} ++ ++/* Build one eBPF instruction */ ++int build_insn(const struct bpf_insn *insn, struct jit_context *ctx) ++{ ++ u8 dst = bpf2mips64[insn->dst_reg]; ++ u8 src = bpf2mips64[insn->src_reg]; ++ u8 code = insn->code; ++ s16 off = insn->off; ++ s32 imm = insn->imm; ++ s32 val, rel; ++ u8 alu, jmp; ++ ++ switch (code) { ++ /* ALU operations */ ++ /* dst = imm */ ++ case BPF_ALU | BPF_MOV | BPF_K: ++ emit_mov_i(ctx, dst, imm); ++ emit_zext_ver(ctx, dst); ++ break; ++ /* dst = src */ ++ case BPF_ALU | BPF_MOV | BPF_X: ++ if (imm == 1) { ++ /* Special mov32 for zext */ ++ emit_zext(ctx, dst); ++ } else { ++ emit_mov_r(ctx, dst, src); ++ emit_zext_ver(ctx, dst); ++ } ++ break; ++ /* dst = -dst */ ++ case BPF_ALU | BPF_NEG: ++ emit_sext(ctx, dst, dst); ++ emit_alu_i(ctx, dst, 0, BPF_NEG); ++ emit_zext_ver(ctx, dst); ++ break; ++ /* dst = dst & imm */ ++ /* dst = dst | imm */ ++ /* dst = dst ^ imm */ ++ /* dst = dst << imm */ ++ case BPF_ALU | BPF_OR | BPF_K: ++ case BPF_ALU | BPF_AND | BPF_K: ++ case BPF_ALU | BPF_XOR | BPF_K: ++ case BPF_ALU | BPF_LSH | BPF_K: ++ if (!valid_alu_i(BPF_OP(code), imm)) { ++ emit_mov_i(ctx, MIPS_R_T4, imm); ++ emit_alu_r(ctx, dst, MIPS_R_T4, BPF_OP(code)); ++ } else if (rewrite_alu_i(BPF_OP(code), imm, &alu, &val)) { ++ emit_alu_i(ctx, dst, val, alu); ++ } ++ emit_zext_ver(ctx, dst); ++ break; ++ /* dst = dst >> imm */ ++ /* dst = dst >> imm (arithmetic) */ ++ /* dst = dst + imm */ ++ /* dst = dst - imm */ ++ /* dst = dst * imm */ ++ /* dst = dst / imm */ ++ /* dst = dst % imm */ ++ case BPF_ALU | BPF_RSH | BPF_K: ++ case BPF_ALU | BPF_ARSH | BPF_K: ++ case BPF_ALU | BPF_ADD | BPF_K: ++ case BPF_ALU | BPF_SUB | BPF_K: ++ case BPF_ALU | BPF_MUL | BPF_K: ++ case BPF_ALU | BPF_DIV | BPF_K: ++ case BPF_ALU | BPF_MOD | BPF_K: ++ if (!valid_alu_i(BPF_OP(code), imm)) { ++ emit_sext(ctx, dst, dst); ++ emit_mov_i(ctx, MIPS_R_T4, imm); ++ emit_alu_r(ctx, dst, MIPS_R_T4, BPF_OP(code)); ++ } else if (rewrite_alu_i(BPF_OP(code), imm, &alu, &val)) { ++ emit_sext(ctx, dst, dst); ++ emit_alu_i(ctx, dst, val, alu); ++ } ++ emit_zext_ver(ctx, dst); ++ break; ++ /* dst = dst & src */ ++ /* dst = dst | src */ ++ /* dst = dst ^ src */ ++ /* dst = dst << src */ ++ case BPF_ALU | BPF_AND | BPF_X: ++ case BPF_ALU | BPF_OR | BPF_X: ++ case BPF_ALU | BPF_XOR | BPF_X: ++ case BPF_ALU | BPF_LSH | BPF_X: ++ emit_alu_r(ctx, dst, src, BPF_OP(code)); ++ emit_zext_ver(ctx, dst); ++ break; ++ /* dst = dst >> src */ ++ /* dst = dst >> src (arithmetic) */ ++ /* dst = dst + src */ ++ /* dst = dst - src */ ++ /* dst = dst * src */ ++ /* dst = dst / src */ ++ /* dst = dst % src */ ++ case BPF_ALU | BPF_RSH | BPF_X: ++ case BPF_ALU | BPF_ARSH | BPF_X: ++ case BPF_ALU | BPF_ADD | BPF_X: ++ case BPF_ALU | BPF_SUB | BPF_X: ++ case BPF_ALU | BPF_MUL | BPF_X: ++ case BPF_ALU | BPF_DIV | BPF_X: ++ case BPF_ALU | BPF_MOD | BPF_X: ++ emit_sext(ctx, dst, dst); ++ emit_sext(ctx, MIPS_R_T4, src); ++ emit_alu_r(ctx, dst, MIPS_R_T4, BPF_OP(code)); ++ emit_zext_ver(ctx, dst); ++ break; ++ /* dst = imm (64-bit) */ ++ case BPF_ALU64 | BPF_MOV | BPF_K: ++ emit_mov_i(ctx, dst, imm); ++ break; ++ /* dst = src (64-bit) */ ++ case BPF_ALU64 | BPF_MOV | BPF_X: ++ emit_mov_r(ctx, dst, src); ++ break; ++ /* dst = -dst (64-bit) */ ++ case BPF_ALU64 | BPF_NEG: ++ emit_alu_i64(ctx, dst, 0, BPF_NEG); ++ break; ++ /* dst = dst & imm (64-bit) */ ++ /* dst = dst | imm (64-bit) */ ++ /* dst = dst ^ imm (64-bit) */ ++ /* dst = dst << imm (64-bit) */ ++ /* dst = dst >> imm (64-bit) */ ++ /* dst = dst >> imm ((64-bit, arithmetic) */ ++ /* dst = dst + imm (64-bit) */ ++ /* dst = dst - imm (64-bit) */ ++ /* dst = dst * imm (64-bit) */ ++ /* dst = dst / imm (64-bit) */ ++ /* dst = dst % imm (64-bit) */ ++ case BPF_ALU64 | BPF_AND | BPF_K: ++ case BPF_ALU64 | BPF_OR | BPF_K: ++ case BPF_ALU64 | BPF_XOR | BPF_K: ++ case BPF_ALU64 | BPF_LSH | BPF_K: ++ case BPF_ALU64 | BPF_RSH | BPF_K: ++ case BPF_ALU64 | BPF_ARSH | BPF_K: ++ case BPF_ALU64 | BPF_ADD | BPF_K: ++ case BPF_ALU64 | BPF_SUB | BPF_K: ++ case BPF_ALU64 | BPF_MUL | BPF_K: ++ case BPF_ALU64 | BPF_DIV | BPF_K: ++ case BPF_ALU64 | BPF_MOD | BPF_K: ++ if (!valid_alu_i(BPF_OP(code), imm)) { ++ emit_mov_i(ctx, MIPS_R_T4, imm); ++ emit_alu_r64(ctx, dst, MIPS_R_T4, BPF_OP(code)); ++ } else if (rewrite_alu_i(BPF_OP(code), imm, &alu, &val)) { ++ emit_alu_i64(ctx, dst, val, alu); ++ } ++ break; ++ /* dst = dst & src (64-bit) */ ++ /* dst = dst | src (64-bit) */ ++ /* dst = dst ^ src (64-bit) */ ++ /* dst = dst << src (64-bit) */ ++ /* dst = dst >> src (64-bit) */ ++ /* dst = dst >> src (64-bit, arithmetic) */ ++ /* dst = dst + src (64-bit) */ ++ /* dst = dst - src (64-bit) */ ++ /* dst = dst * src (64-bit) */ ++ /* dst = dst / src (64-bit) */ ++ /* dst = dst % src (64-bit) */ ++ case BPF_ALU64 | BPF_AND | BPF_X: ++ case BPF_ALU64 | BPF_OR | BPF_X: ++ case BPF_ALU64 | BPF_XOR | BPF_X: ++ case BPF_ALU64 | BPF_LSH | BPF_X: ++ case BPF_ALU64 | BPF_RSH | BPF_X: ++ case BPF_ALU64 | BPF_ARSH | BPF_X: ++ case BPF_ALU64 | BPF_ADD | BPF_X: ++ case BPF_ALU64 | BPF_SUB | BPF_X: ++ case BPF_ALU64 | BPF_MUL | BPF_X: ++ case BPF_ALU64 | BPF_DIV | BPF_X: ++ case BPF_ALU64 | BPF_MOD | BPF_X: ++ emit_alu_r64(ctx, dst, src, BPF_OP(code)); ++ break; ++ /* dst = htole(dst) */ ++ /* dst = htobe(dst) */ ++ case BPF_ALU | BPF_END | BPF_FROM_LE: ++ case BPF_ALU | BPF_END | BPF_FROM_BE: ++ if (BPF_SRC(code) == ++#ifdef __BIG_ENDIAN ++ BPF_FROM_LE ++#else ++ BPF_FROM_BE ++#endif ++ ) ++ emit_bswap_r64(ctx, dst, imm); ++ else ++ emit_trunc_r64(ctx, dst, imm); ++ break; ++ /* dst = imm64 */ ++ case BPF_LD | BPF_IMM | BPF_DW: ++ emit_mov_i64(ctx, dst, (u32)imm | ((u64)insn[1].imm << 32)); ++ return 1; ++ /* LDX: dst = *(size *)(src + off) */ ++ case BPF_LDX | BPF_MEM | BPF_W: ++ case BPF_LDX | BPF_MEM | BPF_H: ++ case BPF_LDX | BPF_MEM | BPF_B: ++ case BPF_LDX | BPF_MEM | BPF_DW: ++ emit_ldx(ctx, dst, src, off, BPF_SIZE(code)); ++ break; ++ /* ST: *(size *)(dst + off) = imm */ ++ case BPF_ST | BPF_MEM | BPF_W: ++ case BPF_ST | BPF_MEM | BPF_H: ++ case BPF_ST | BPF_MEM | BPF_B: ++ case BPF_ST | BPF_MEM | BPF_DW: ++ emit_mov_i(ctx, MIPS_R_T4, imm); ++ emit_stx(ctx, dst, MIPS_R_T4, off, BPF_SIZE(code)); ++ break; ++ /* STX: *(size *)(dst + off) = src */ ++ case BPF_STX | BPF_MEM | BPF_W: ++ case BPF_STX | BPF_MEM | BPF_H: ++ case BPF_STX | BPF_MEM | BPF_B: ++ case BPF_STX | BPF_MEM | BPF_DW: ++ emit_stx(ctx, dst, src, off, BPF_SIZE(code)); ++ break; ++ /* Speculation barrier */ ++ case BPF_ST | BPF_NOSPEC: ++ break; ++ /* Atomics */ ++ case BPF_STX | BPF_XADD | BPF_W: ++ case BPF_STX | BPF_XADD | BPF_DW: ++ switch (imm) { ++ case BPF_ADD: ++ case BPF_AND: ++ case BPF_OR: ++ case BPF_XOR: ++ if (BPF_SIZE(code) == BPF_DW) { ++ emit_atomic_r64(ctx, dst, src, off, imm); ++ } else { /* 32-bit, no fetch */ ++ emit_sext(ctx, MIPS_R_T4, src); ++ emit_atomic_r(ctx, dst, MIPS_R_T4, off, imm); ++ } ++ break; ++ default: ++ goto notyet; ++ } ++ break; ++ /* PC += off if dst == src */ ++ /* PC += off if dst != src */ ++ /* PC += off if dst & src */ ++ /* PC += off if dst > src */ ++ /* PC += off if dst >= src */ ++ /* PC += off if dst < src */ ++ /* PC += off if dst <= src */ ++ /* PC += off if dst > src (signed) */ ++ /* PC += off if dst >= src (signed) */ ++ /* PC += off if dst < src (signed) */ ++ /* PC += off if dst <= src (signed) */ ++ case BPF_JMP32 | BPF_JEQ | BPF_X: ++ case BPF_JMP32 | BPF_JNE | BPF_X: ++ case BPF_JMP32 | BPF_JSET | BPF_X: ++ case BPF_JMP32 | BPF_JGT | BPF_X: ++ case BPF_JMP32 | BPF_JGE | BPF_X: ++ case BPF_JMP32 | BPF_JLT | BPF_X: ++ case BPF_JMP32 | BPF_JLE | BPF_X: ++ case BPF_JMP32 | BPF_JSGT | BPF_X: ++ case BPF_JMP32 | BPF_JSGE | BPF_X: ++ case BPF_JMP32 | BPF_JSLT | BPF_X: ++ case BPF_JMP32 | BPF_JSLE | BPF_X: ++ if (off == 0) ++ break; ++ setup_jmp_r(ctx, dst == src, BPF_OP(code), off, &jmp, &rel); ++ emit_sext(ctx, MIPS_R_T4, dst); /* Sign-extended dst */ ++ emit_sext(ctx, MIPS_R_T5, src); /* Sign-extended src */ ++ emit_jmp_r(ctx, MIPS_R_T4, MIPS_R_T5, rel, jmp); ++ if (finish_jmp(ctx, jmp, off) < 0) ++ goto toofar; ++ break; ++ /* PC += off if dst == imm */ ++ /* PC += off if dst != imm */ ++ /* PC += off if dst & imm */ ++ /* PC += off if dst > imm */ ++ /* PC += off if dst >= imm */ ++ /* PC += off if dst < imm */ ++ /* PC += off if dst <= imm */ ++ /* PC += off if dst > imm (signed) */ ++ /* PC += off if dst >= imm (signed) */ ++ /* PC += off if dst < imm (signed) */ ++ /* PC += off if dst <= imm (signed) */ ++ case BPF_JMP32 | BPF_JEQ | BPF_K: ++ case BPF_JMP32 | BPF_JNE | BPF_K: ++ case BPF_JMP32 | BPF_JSET | BPF_K: ++ case BPF_JMP32 | BPF_JGT | BPF_K: ++ case BPF_JMP32 | BPF_JGE | BPF_K: ++ case BPF_JMP32 | BPF_JLT | BPF_K: ++ case BPF_JMP32 | BPF_JLE | BPF_K: ++ case BPF_JMP32 | BPF_JSGT | BPF_K: ++ case BPF_JMP32 | BPF_JSGE | BPF_K: ++ case BPF_JMP32 | BPF_JSLT | BPF_K: ++ case BPF_JMP32 | BPF_JSLE | BPF_K: ++ if (off == 0) ++ break; ++ setup_jmp_i(ctx, imm, 32, BPF_OP(code), off, &jmp, &rel); ++ emit_sext(ctx, MIPS_R_T4, dst); /* Sign-extended dst */ ++ if (valid_jmp_i(jmp, imm)) { ++ emit_jmp_i(ctx, MIPS_R_T4, imm, rel, jmp); ++ } else { ++ /* Move large immediate to register, sign-extended */ ++ emit_mov_i(ctx, MIPS_R_T5, imm); ++ emit_jmp_r(ctx, MIPS_R_T4, MIPS_R_T5, rel, jmp); ++ } ++ if (finish_jmp(ctx, jmp, off) < 0) ++ goto toofar; ++ break; ++ /* PC += off if dst == src */ ++ /* PC += off if dst != src */ ++ /* PC += off if dst & src */ ++ /* PC += off if dst > src */ ++ /* PC += off if dst >= src */ ++ /* PC += off if dst < src */ ++ /* PC += off if dst <= src */ ++ /* PC += off if dst > src (signed) */ ++ /* PC += off if dst >= src (signed) */ ++ /* PC += off if dst < src (signed) */ ++ /* PC += off if dst <= src (signed) */ ++ case BPF_JMP | BPF_JEQ | BPF_X: ++ case BPF_JMP | BPF_JNE | BPF_X: ++ case BPF_JMP | BPF_JSET | BPF_X: ++ case BPF_JMP | BPF_JGT | BPF_X: ++ case BPF_JMP | BPF_JGE | BPF_X: ++ case BPF_JMP | BPF_JLT | BPF_X: ++ case BPF_JMP | BPF_JLE | BPF_X: ++ case BPF_JMP | BPF_JSGT | BPF_X: ++ case BPF_JMP | BPF_JSGE | BPF_X: ++ case BPF_JMP | BPF_JSLT | BPF_X: ++ case BPF_JMP | BPF_JSLE | BPF_X: ++ if (off == 0) ++ break; ++ setup_jmp_r(ctx, dst == src, BPF_OP(code), off, &jmp, &rel); ++ emit_jmp_r(ctx, dst, src, rel, jmp); ++ if (finish_jmp(ctx, jmp, off) < 0) ++ goto toofar; ++ break; ++ /* PC += off if dst == imm */ ++ /* PC += off if dst != imm */ ++ /* PC += off if dst & imm */ ++ /* PC += off if dst > imm */ ++ /* PC += off if dst >= imm */ ++ /* PC += off if dst < imm */ ++ /* PC += off if dst <= imm */ ++ /* PC += off if dst > imm (signed) */ ++ /* PC += off if dst >= imm (signed) */ ++ /* PC += off if dst < imm (signed) */ ++ /* PC += off if dst <= imm (signed) */ ++ case BPF_JMP | BPF_JEQ | BPF_K: ++ case BPF_JMP | BPF_JNE | BPF_K: ++ case BPF_JMP | BPF_JSET | BPF_K: ++ case BPF_JMP | BPF_JGT | BPF_K: ++ case BPF_JMP | BPF_JGE | BPF_K: ++ case BPF_JMP | BPF_JLT | BPF_K: ++ case BPF_JMP | BPF_JLE | BPF_K: ++ case BPF_JMP | BPF_JSGT | BPF_K: ++ case BPF_JMP | BPF_JSGE | BPF_K: ++ case BPF_JMP | BPF_JSLT | BPF_K: ++ case BPF_JMP | BPF_JSLE | BPF_K: ++ if (off == 0) ++ break; ++ setup_jmp_i(ctx, imm, 64, BPF_OP(code), off, &jmp, &rel); ++ if (valid_jmp_i(jmp, imm)) { ++ emit_jmp_i(ctx, dst, imm, rel, jmp); ++ } else { ++ /* Move large immediate to register */ ++ emit_mov_i(ctx, MIPS_R_T4, imm); ++ emit_jmp_r(ctx, dst, MIPS_R_T4, rel, jmp); ++ } ++ if (finish_jmp(ctx, jmp, off) < 0) ++ goto toofar; ++ break; ++ /* PC += off */ ++ case BPF_JMP | BPF_JA: ++ if (off == 0) ++ break; ++ if (emit_ja(ctx, off) < 0) ++ goto toofar; ++ break; ++ /* Tail call */ ++ case BPF_JMP | BPF_TAIL_CALL: ++ if (emit_tail_call(ctx) < 0) ++ goto invalid; ++ break; ++ /* Function call */ ++ case BPF_JMP | BPF_CALL: ++ if (emit_call(ctx, insn) < 0) ++ goto invalid; ++ break; ++ /* Function return */ ++ case BPF_JMP | BPF_EXIT: ++ /* ++ * Optimization: when last instruction is EXIT ++ * simply continue to epilogue. ++ */ ++ if (ctx->bpf_index == ctx->program->len - 1) ++ break; ++ if (emit_exit(ctx) < 0) ++ goto toofar; ++ break; ++ ++ default: ++invalid: ++ pr_err_once("unknown opcode %02x\n", code); ++ return -EINVAL; ++notyet: ++ pr_info_once("*** NOT YET: opcode %02x ***\n", code); ++ return -EFAULT; ++toofar: ++ pr_info_once("*** TOO FAR: jump at %u opcode %02x ***\n", ++ ctx->bpf_index, code); ++ return -E2BIG; ++ } ++ return 0; ++} diff --git a/target/linux/generic/backport-5.15/050-v5.16-04-mips-bpf-Add-JIT-workarounds-for-CPU-errata.patch b/target/linux/generic/backport-5.15/050-v5.16-04-mips-bpf-Add-JIT-workarounds-for-CPU-errata.patch new file mode 100644 index 0000000000..63553ebe58 --- /dev/null +++ b/target/linux/generic/backport-5.15/050-v5.16-04-mips-bpf-Add-JIT-workarounds-for-CPU-errata.patch @@ -0,0 +1,120 @@ +From: Johan Almbladh <johan.almbladh@anyfinetworks.com> +Date: Tue, 5 Oct 2021 18:54:06 +0200 +Subject: [PATCH] mips: bpf: Add JIT workarounds for CPU errata + +This patch adds workarounds for the following CPU errata to the MIPS +eBPF JIT, if enabled in the kernel configuration. + + - R10000 ll/sc weak ordering + - Loongson-3 ll/sc weak ordering + - Loongson-2F jump hang + +The Loongson-2F nop errata is implemented in uasm, which the JIT uses, +so no additional mitigations are needed for that. + +Signed-off-by: Johan Almbladh <johan.almbladh@anyfinetworks.com> +Reviewed-by: Jiaxun Yang <jiaxun.yang@flygoat.com> +--- + +--- a/arch/mips/net/bpf_jit_comp.c ++++ b/arch/mips/net/bpf_jit_comp.c +@@ -404,6 +404,7 @@ void emit_alu_r(struct jit_context *ctx, + /* Atomic read-modify-write (32-bit) */ + void emit_atomic_r(struct jit_context *ctx, u8 dst, u8 src, s16 off, u8 code) + { ++ LLSC_sync(ctx); + emit(ctx, ll, MIPS_R_T9, off, dst); + switch (code) { + case BPF_ADD: +@@ -420,18 +421,19 @@ void emit_atomic_r(struct jit_context *c + break; + } + emit(ctx, sc, MIPS_R_T8, off, dst); +- emit(ctx, beqz, MIPS_R_T8, -16); ++ emit(ctx, LLSC_beqz, MIPS_R_T8, -16 - LLSC_offset); + emit(ctx, nop); /* Delay slot */ + } + + /* Atomic compare-and-exchange (32-bit) */ + void emit_cmpxchg_r(struct jit_context *ctx, u8 dst, u8 src, u8 res, s16 off) + { ++ LLSC_sync(ctx); + emit(ctx, ll, MIPS_R_T9, off, dst); + emit(ctx, bne, MIPS_R_T9, res, 12); + emit(ctx, move, MIPS_R_T8, src); /* Delay slot */ + emit(ctx, sc, MIPS_R_T8, off, dst); +- emit(ctx, beqz, MIPS_R_T8, -20); ++ emit(ctx, LLSC_beqz, MIPS_R_T8, -20 - LLSC_offset); + emit(ctx, move, res, MIPS_R_T9); /* Delay slot */ + clobber_reg(ctx, res); + } +--- a/arch/mips/net/bpf_jit_comp.h ++++ b/arch/mips/net/bpf_jit_comp.h +@@ -87,7 +87,7 @@ struct jit_context { + }; + + /* Emit the instruction if the JIT memory space has been allocated */ +-#define emit(ctx, func, ...) \ ++#define __emit(ctx, func, ...) \ + do { \ + if ((ctx)->target != NULL) { \ + u32 *p = &(ctx)->target[ctx->jit_index]; \ +@@ -95,6 +95,30 @@ do { \ + } \ + (ctx)->jit_index++; \ + } while (0) ++#define emit(...) __emit(__VA_ARGS__) ++ ++/* Workaround for R10000 ll/sc errata */ ++#ifdef CONFIG_WAR_R10000 ++#define LLSC_beqz beqzl ++#else ++#define LLSC_beqz beqz ++#endif ++ ++/* Workaround for Loongson-3 ll/sc errata */ ++#ifdef CONFIG_CPU_LOONGSON3_WORKAROUNDS ++#define LLSC_sync(ctx) emit(ctx, sync, 0) ++#define LLSC_offset 4 ++#else ++#define LLSC_sync(ctx) ++#define LLSC_offset 0 ++#endif ++ ++/* Workaround for Loongson-2F jump errata */ ++#ifdef CONFIG_CPU_JUMP_WORKAROUNDS ++#define JALR_MASK 0xffffffffcfffffffULL ++#else ++#define JALR_MASK (~0ULL) ++#endif + + /* + * Mark a BPF register as accessed, it needs to be +--- a/arch/mips/net/bpf_jit_comp64.c ++++ b/arch/mips/net/bpf_jit_comp64.c +@@ -375,6 +375,7 @@ static void emit_atomic_r64(struct jit_c + u8 t1 = MIPS_R_T6; + u8 t2 = MIPS_R_T7; + ++ LLSC_sync(ctx); + emit(ctx, lld, t1, off, dst); + switch (code) { + case BPF_ADD: +@@ -391,7 +392,7 @@ static void emit_atomic_r64(struct jit_c + break; + } + emit(ctx, scd, t2, off, dst); +- emit(ctx, beqz, t2, -16); ++ emit(ctx, LLSC_beqz, t2, -16 - LLSC_offset); + emit(ctx, nop); /* Delay slot */ + } + +@@ -414,7 +415,7 @@ static int emit_call(struct jit_context + push_regs(ctx, ctx->clobbered & JIT_CALLER_REGS, 0, 0); + + /* Emit function call */ +- emit_mov_i64(ctx, tmp, addr); ++ emit_mov_i64(ctx, tmp, addr & JALR_MASK); + emit(ctx, jalr, MIPS_R_RA, tmp); + emit(ctx, nop); /* Delay slot */ + diff --git a/target/linux/generic/backport-5.15/050-v5.16-05-mips-bpf-Enable-eBPF-JITs.patch b/target/linux/generic/backport-5.15/050-v5.16-05-mips-bpf-Enable-eBPF-JITs.patch new file mode 100644 index 0000000000..10685c5f3c --- /dev/null +++ b/target/linux/generic/backport-5.15/050-v5.16-05-mips-bpf-Enable-eBPF-JITs.patch @@ -0,0 +1,61 @@ +From: Johan Almbladh <johan.almbladh@anyfinetworks.com> +Date: Tue, 5 Oct 2021 18:54:07 +0200 +Subject: [PATCH] mips: bpf: Enable eBPF JITs + +This patch enables the new eBPF JITs for 32-bit and 64-bit MIPS. It also +disables the old cBPF JIT to so cBPF programs are converted to use the +new JIT. + +Workarounds for R4000 CPU errata are not implemented by the JIT, so the +JIT is disabled if any of those workarounds are configured. + +Signed-off-by: Johan Almbladh <johan.almbladh@anyfinetworks.com> +--- + +--- a/MAINTAINERS ++++ b/MAINTAINERS +@@ -3294,6 +3294,7 @@ S: Supported + F: arch/arm64/net/ + + BPF JIT for MIPS (32-BIT AND 64-BIT) ++M: Johan Almbladh <johan.almbladh@anyfinetworks.com> + M: Paul Burton <paulburton@kernel.org> + L: netdev@vger.kernel.org + L: bpf@vger.kernel.org +--- a/arch/mips/Kconfig ++++ b/arch/mips/Kconfig +@@ -49,7 +49,6 @@ config MIPS + select HAVE_ARCH_TRACEHOOK + select HAVE_ARCH_TRANSPARENT_HUGEPAGE if CPU_SUPPORTS_HUGEPAGES + select HAVE_ASM_MODVERSIONS +- select HAVE_CBPF_JIT if !64BIT && !CPU_MICROMIPS + select HAVE_CONTEXT_TRACKING + select HAVE_TIF_NOHZ + select HAVE_C_RECORDMCOUNT +@@ -57,7 +56,10 @@ config MIPS + select HAVE_DEBUG_STACKOVERFLOW + select HAVE_DMA_CONTIGUOUS + select HAVE_DYNAMIC_FTRACE +- select HAVE_EBPF_JIT if 64BIT && !CPU_MICROMIPS && TARGET_ISA_REV >= 2 ++ select HAVE_EBPF_JIT if !CPU_MICROMIPS && \ ++ !CPU_DADDI_WORKAROUNDS && \ ++ !CPU_R4000_WORKAROUNDS && \ ++ !CPU_R4400_WORKAROUNDS + select HAVE_EXIT_THREAD + select HAVE_FAST_GUP + select HAVE_FTRACE_MCOUNT_RECORD +--- a/arch/mips/net/Makefile ++++ b/arch/mips/net/Makefile +@@ -2,9 +2,10 @@ + # MIPS networking code + + obj-$(CONFIG_MIPS_CBPF_JIT) += bpf_jit.o bpf_jit_asm.o ++obj-$(CONFIG_MIPS_EBPF_JIT) += bpf_jit_comp.o + + ifeq ($(CONFIG_32BIT),y) +- obj-$(CONFIG_MIPS_EBPF_JIT) += bpf_jit_comp.o bpf_jit_comp32.o ++ obj-$(CONFIG_MIPS_EBPF_JIT) += bpf_jit_comp32.o + else +- obj-$(CONFIG_MIPS_EBPF_JIT) += ebpf_jit.o ++ obj-$(CONFIG_MIPS_EBPF_JIT) += bpf_jit_comp64.o + endif diff --git a/target/linux/generic/backport-5.15/050-v5.16-06-mips-bpf-Remove-old-BPF-JIT-implementations.patch b/target/linux/generic/backport-5.15/050-v5.16-06-mips-bpf-Remove-old-BPF-JIT-implementations.patch new file mode 100644 index 0000000000..e25c336831 --- /dev/null +++ b/target/linux/generic/backport-5.15/050-v5.16-06-mips-bpf-Remove-old-BPF-JIT-implementations.patch @@ -0,0 +1,387 @@ +From: Johan Almbladh <johan.almbladh@anyfinetworks.com> +Date: Tue, 5 Oct 2021 18:54:08 +0200 +Subject: [PATCH] mips: bpf: Remove old BPF JIT implementations + +This patch removes the old 32-bit cBPF and 64-bit eBPF JIT implementations. +They are replaced by a new eBPF implementation that supports both 32-bit +and 64-bit MIPS CPUs. + +Signed-off-by: Johan Almbladh <johan.almbladh@anyfinetworks.com> +--- + delete mode 100644 arch/mips/net/bpf_jit.c + delete mode 100644 arch/mips/net/bpf_jit.h + delete mode 100644 arch/mips/net/bpf_jit_asm.S + delete mode 100644 arch/mips/net/ebpf_jit.c + +--- a/arch/mips/net/bpf_jit.h ++++ /dev/null +@@ -1,81 +0,0 @@ +-/* SPDX-License-Identifier: GPL-2.0-only */ +-/* +- * Just-In-Time compiler for BPF filters on MIPS +- * +- * Copyright (c) 2014 Imagination Technologies Ltd. +- * Author: Markos Chandras <markos.chandras@imgtec.com> +- */ +- +-#ifndef BPF_JIT_MIPS_OP_H +-#define BPF_JIT_MIPS_OP_H +- +-/* Registers used by JIT */ +-#define MIPS_R_ZERO 0 +-#define MIPS_R_V0 2 +-#define MIPS_R_A0 4 +-#define MIPS_R_A1 5 +-#define MIPS_R_T4 12 +-#define MIPS_R_T5 13 +-#define MIPS_R_T6 14 +-#define MIPS_R_T7 15 +-#define MIPS_R_S0 16 +-#define MIPS_R_S1 17 +-#define MIPS_R_S2 18 +-#define MIPS_R_S3 19 +-#define MIPS_R_S4 20 +-#define MIPS_R_S5 21 +-#define MIPS_R_S6 22 +-#define MIPS_R_S7 23 +-#define MIPS_R_SP 29 +-#define MIPS_R_RA 31 +- +-/* Conditional codes */ +-#define MIPS_COND_EQ 0x1 +-#define MIPS_COND_GE (0x1 << 1) +-#define MIPS_COND_GT (0x1 << 2) +-#define MIPS_COND_NE (0x1 << 3) +-#define MIPS_COND_ALL (0x1 << 4) +-/* Conditionals on X register or K immediate */ +-#define MIPS_COND_X (0x1 << 5) +-#define MIPS_COND_K (0x1 << 6) +- +-#define r_ret MIPS_R_V0 +- +-/* +- * Use 2 scratch registers to avoid pipeline interlocks. +- * There is no overhead during epilogue and prologue since +- * any of the $s0-$s6 registers will only be preserved if +- * they are going to actually be used. +- */ +-#define r_skb_hl MIPS_R_S0 /* skb header length */ +-#define r_skb_data MIPS_R_S1 /* skb actual data */ +-#define r_off MIPS_R_S2 +-#define r_A MIPS_R_S3 +-#define r_X MIPS_R_S4 +-#define r_skb MIPS_R_S5 +-#define r_M MIPS_R_S6 +-#define r_skb_len MIPS_R_S7 +-#define r_s0 MIPS_R_T4 /* scratch reg 1 */ +-#define r_s1 MIPS_R_T5 /* scratch reg 2 */ +-#define r_tmp_imm MIPS_R_T6 /* No need to preserve this */ +-#define r_tmp MIPS_R_T7 /* No need to preserve this */ +-#define r_zero MIPS_R_ZERO +-#define r_sp MIPS_R_SP +-#define r_ra MIPS_R_RA +- +-#ifndef __ASSEMBLY__ +- +-/* Declare ASM helpers */ +- +-#define DECLARE_LOAD_FUNC(func) \ +- extern u8 func(unsigned long *skb, int offset); \ +- extern u8 func##_negative(unsigned long *skb, int offset); \ +- extern u8 func##_positive(unsigned long *skb, int offset) +- +-DECLARE_LOAD_FUNC(sk_load_word); +-DECLARE_LOAD_FUNC(sk_load_half); +-DECLARE_LOAD_FUNC(sk_load_byte); +- +-#endif +- +-#endif /* BPF_JIT_MIPS_OP_H */ +--- a/arch/mips/net/bpf_jit_asm.S ++++ /dev/null +@@ -1,285 +0,0 @@ +-/* +- * bpf_jib_asm.S: Packet/header access helper functions for MIPS/MIPS64 BPF +- * compiler. +- * +- * Copyright (C) 2015 Imagination Technologies Ltd. +- * Author: Markos Chandras <markos.chandras@imgtec.com> +- * +- * This program is free software; you can redistribute it and/or modify it +- * under the terms of the GNU General Public License as published by the +- * Free Software Foundation; version 2 of the License. +- */ +- +-#include <asm/asm.h> +-#include <asm/isa-rev.h> +-#include <asm/regdef.h> +-#include "bpf_jit.h" +- +-/* ABI +- * +- * r_skb_hl skb header length +- * r_skb_data skb data +- * r_off(a1) offset register +- * r_A BPF register A +- * r_X PF register X +- * r_skb(a0) *skb +- * r_M *scratch memory +- * r_skb_le skb length +- * r_s0 Scratch register 0 +- * r_s1 Scratch register 1 +- * +- * On entry: +- * a0: *skb +- * a1: offset (imm or imm + X) +- * +- * All non-BPF-ABI registers are free for use. On return, we only +- * care about r_ret. The BPF-ABI registers are assumed to remain +- * unmodified during the entire filter operation. +- */ +- +-#define skb a0 +-#define offset a1 +-#define SKF_LL_OFF (-0x200000) /* Can't include linux/filter.h in assembly */ +- +- /* We know better :) so prevent assembler reordering etc */ +- .set noreorder +- +-#define is_offset_negative(TYPE) \ +- /* If offset is negative we have more work to do */ \ +- slti t0, offset, 0; \ +- bgtz t0, bpf_slow_path_##TYPE##_neg; \ +- /* Be careful what follows in DS. */ +- +-#define is_offset_in_header(SIZE, TYPE) \ +- /* Reading from header? */ \ +- addiu $r_s0, $r_skb_hl, -SIZE; \ +- slt t0, $r_s0, offset; \ +- bgtz t0, bpf_slow_path_##TYPE; \ +- +-LEAF(sk_load_word) +- is_offset_negative(word) +-FEXPORT(sk_load_word_positive) +- is_offset_in_header(4, word) +- /* Offset within header boundaries */ +- PTR_ADDU t1, $r_skb_data, offset +- .set reorder +- lw $r_A, 0(t1) +- .set noreorder +-#ifdef CONFIG_CPU_LITTLE_ENDIAN +-# if MIPS_ISA_REV >= 2 +- wsbh t0, $r_A +- rotr $r_A, t0, 16 +-# else +- sll t0, $r_A, 24 +- srl t1, $r_A, 24 +- srl t2, $r_A, 8 +- or t0, t0, t1 +- andi t2, t2, 0xff00 +- andi t1, $r_A, 0xff00 +- or t0, t0, t2 +- sll t1, t1, 8 +- or $r_A, t0, t1 +-# endif +-#endif +- jr $r_ra +- move $r_ret, zero +- END(sk_load_word) +- +-LEAF(sk_load_half) +- is_offset_negative(half) +-FEXPORT(sk_load_half_positive) +- is_offset_in_header(2, half) +- /* Offset within header boundaries */ +- PTR_ADDU t1, $r_skb_data, offset +- lhu $r_A, 0(t1) +-#ifdef CONFIG_CPU_LITTLE_ENDIAN +-# if MIPS_ISA_REV >= 2 +- wsbh $r_A, $r_A +-# else +- sll t0, $r_A, 8 +- srl t1, $r_A, 8 +- andi t0, t0, 0xff00 +- or $r_A, t0, t1 +-# endif +-#endif +- jr $r_ra +- move $r_ret, zero +- END(sk_load_half) +- +-LEAF(sk_load_byte) +- is_offset_negative(byte) +-FEXPORT(sk_load_byte_positive) +- is_offset_in_header(1, byte) +- /* Offset within header boundaries */ +- PTR_ADDU t1, $r_skb_data, offset +- lbu $r_A, 0(t1) +- jr $r_ra +- move $r_ret, zero +- END(sk_load_byte) +- +-/* +- * call skb_copy_bits: +- * (prototype in linux/skbuff.h) +- * +- * int skb_copy_bits(sk_buff *skb, int offset, void *to, int len) +- * +- * o32 mandates we leave 4 spaces for argument registers in case +- * the callee needs to use them. Even though we don't care about +- * the argument registers ourselves, we need to allocate that space +- * to remain ABI compliant since the callee may want to use that space. +- * We also allocate 2 more spaces for $r_ra and our return register (*to). +- * +- * n64 is a bit different. The *caller* will allocate the space to preserve +- * the arguments. So in 64-bit kernels, we allocate the 4-arg space for no +- * good reason but it does not matter that much really. +- * +- * (void *to) is returned in r_s0 +- * +- */ +-#ifdef CONFIG_CPU_LITTLE_ENDIAN +-#define DS_OFFSET(SIZE) (4 * SZREG) +-#else +-#define DS_OFFSET(SIZE) ((4 * SZREG) + (4 - SIZE)) +-#endif +-#define bpf_slow_path_common(SIZE) \ +- /* Quick check. Are we within reasonable boundaries? */ \ +- LONG_ADDIU $r_s1, $r_skb_len, -SIZE; \ +- sltu $r_s0, offset, $r_s1; \ +- beqz $r_s0, fault; \ +- /* Load 4th argument in DS */ \ +- LONG_ADDIU a3, zero, SIZE; \ +- PTR_ADDIU $r_sp, $r_sp, -(6 * SZREG); \ +- PTR_LA t0, skb_copy_bits; \ +- PTR_S $r_ra, (5 * SZREG)($r_sp); \ +- /* Assign low slot to a2 */ \ +- PTR_ADDIU a2, $r_sp, DS_OFFSET(SIZE); \ +- jalr t0; \ +- /* Reset our destination slot (DS but it's ok) */ \ +- INT_S zero, (4 * SZREG)($r_sp); \ +- /* \ +- * skb_copy_bits returns 0 on success and -EFAULT \ +- * on error. Our data live in a2. Do not bother with \ +- * our data if an error has been returned. \ +- */ \ +- /* Restore our frame */ \ +- PTR_L $r_ra, (5 * SZREG)($r_sp); \ +- INT_L $r_s0, (4 * SZREG)($r_sp); \ +- bltz v0, fault; \ +- PTR_ADDIU $r_sp, $r_sp, 6 * SZREG; \ +- move $r_ret, zero; \ +- +-NESTED(bpf_slow_path_word, (6 * SZREG), $r_sp) +- bpf_slow_path_common(4) +-#ifdef CONFIG_CPU_LITTLE_ENDIAN +-# if MIPS_ISA_REV >= 2 +- wsbh t0, $r_s0 +- jr $r_ra +- rotr $r_A, t0, 16 +-# else +- sll t0, $r_s0, 24 +- srl t1, $r_s0, 24 +- srl t2, $r_s0, 8 +- or t0, t0, t1 +- andi t2, t2, 0xff00 +- andi t1, $r_s0, 0xff00 +- or t0, t0, t2 +- sll t1, t1, 8 +- jr $r_ra +- or $r_A, t0, t1 +-# endif +-#else +- jr $r_ra +- move $r_A, $r_s0 +-#endif +- +- END(bpf_slow_path_word) +- +-NESTED(bpf_slow_path_half, (6 * SZREG), $r_sp) +- bpf_slow_path_common(2) +-#ifdef CONFIG_CPU_LITTLE_ENDIAN +-# if MIPS_ISA_REV >= 2 +- jr $r_ra +- wsbh $r_A, $r_s0 +-# else +- sll t0, $r_s0, 8 +- andi t1, $r_s0, 0xff00 +- andi t0, t0, 0xff00 +- srl t1, t1, 8 +- jr $r_ra +- or $r_A, t0, t1 +-# endif +-#else +- jr $r_ra +- move $r_A, $r_s0 +-#endif +- +- END(bpf_slow_path_half) +- +-NESTED(bpf_slow_path_byte, (6 * SZREG), $r_sp) +- bpf_slow_path_common(1) +- jr $r_ra +- move $r_A, $r_s0 +- +- END(bpf_slow_path_byte) +- +-/* +- * Negative entry points +- */ +- .macro bpf_is_end_of_data +- li t0, SKF_LL_OFF +- /* Reading link layer data? */ +- slt t1, offset, t0 +- bgtz t1, fault +- /* Be careful what follows in DS. */ +- .endm +-/* +- * call skb_copy_bits: +- * (prototype in linux/filter.h) +- * +- * void *bpf_internal_load_pointer_neg_helper(const struct sk_buff *skb, +- * int k, unsigned int size) +- * +- * see above (bpf_slow_path_common) for ABI restrictions +- */ +-#define bpf_negative_common(SIZE) \ +- PTR_ADDIU $r_sp, $r_sp, -(6 * SZREG); \ +- PTR_LA t0, bpf_internal_load_pointer_neg_helper; \ +- PTR_S $r_ra, (5 * SZREG)($r_sp); \ +- jalr t0; \ +- li a2, SIZE; \ +- PTR_L $r_ra, (5 * SZREG)($r_sp); \ +- /* Check return pointer */ \ +- beqz v0, fault; \ +- PTR_ADDIU $r_sp, $r_sp, 6 * SZREG; \ +- /* Preserve our pointer */ \ +- move $r_s0, v0; \ +- /* Set return value */ \ +- move $r_ret, zero; \ +- +-bpf_slow_path_word_neg: +- bpf_is_end_of_data +-NESTED(sk_load_word_negative, (6 * SZREG), $r_sp) +- bpf_negative_common(4) +- jr $r_ra +- lw $r_A, 0($r_s0) +- END(sk_load_word_negative) +- +-bpf_slow_path_half_neg: +- bpf_is_end_of_data +-NESTED(sk_load_half_negative, (6 * SZREG), $r_sp) +- bpf_negative_common(2) +- jr $r_ra +- lhu $r_A, 0($r_s0) +- END(sk_load_half_negative) +- +-bpf_slow_path_byte_neg: +- bpf_is_end_of_data +-NESTED(sk_load_byte_negative, (6 * SZREG), $r_sp) +- bpf_negative_common(1) +- jr $r_ra +- lbu $r_A, 0($r_s0) +- END(sk_load_byte_negative) +- +-fault: +- jr $r_ra +- addiu $r_ret, zero, 1 diff --git a/target/linux/generic/backport-5.15/071-crypto-arm-chacha-neon-optimize-for-non-block-size-m.patch b/target/linux/generic/backport-5.15/071-crypto-arm-chacha-neon-optimize-for-non-block-size-m.patch new file mode 100644 index 0000000000..b1f46e9af8 --- /dev/null +++ b/target/linux/generic/backport-5.15/071-crypto-arm-chacha-neon-optimize-for-non-block-size-m.patch @@ -0,0 +1,272 @@ +From 03662fcd41f4b764857f17b95f9a2a63c24bddd4 Mon Sep 17 00:00:00 2001 +From: Ard Biesheuvel <ardb@kernel.org> +Date: Tue, 3 Nov 2020 17:28:09 +0100 +Subject: [PATCH 1/2] crypto: arm/chacha-neon - optimize for non-block size + multiples + +commit 86cd97ec4b943af35562a74688bc4e909b32c3d1 upstream. + +The current NEON based ChaCha implementation for ARM is optimized for +multiples of 4x the ChaCha block size (64 bytes). This makes sense for +block encryption, but given that ChaCha is also often used in the +context of networking, it makes sense to consider arbitrary length +inputs as well. + +For example, WireGuard typically uses 1420 byte packets, and performing +ChaCha encryption involves 5 invocations of chacha_4block_xor_neon() +and 3 invocations of chacha_block_xor_neon(), where the last one also +involves a memcpy() using a buffer on the stack to process the final +chunk of 1420 % 64 == 12 bytes. + +Let's optimize for this case as well, by letting chacha_4block_xor_neon() +deal with any input size between 64 and 256 bytes, using NEON permutation +instructions and overlapping loads and stores. This way, the 140 byte +tail of a 1420 byte input buffer can simply be processed in one go. + +This results in the following performance improvements for 1420 byte +blocks, without significant impact on power-of-2 input sizes. (Note +that Raspberry Pi is widely used in combination with a 32-bit kernel, +even though the core is 64-bit capable) + + Cortex-A8 (BeagleBone) : 7% + Cortex-A15 (Calxeda Midway) : 21% + Cortex-A53 (Raspberry Pi 3) : 3% + Cortex-A72 (Raspberry Pi 4) : 19% + +Cc: Eric Biggers <ebiggers@google.com> +Cc: "Jason A . Donenfeld" <Jason@zx2c4.com> +Signed-off-by: Ard Biesheuvel <ardb@kernel.org> +Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au> +Signed-off-by: Jason A. Donenfeld <Jason@zx2c4.com> +--- + arch/arm/crypto/chacha-glue.c | 34 +++++------ + arch/arm/crypto/chacha-neon-core.S | 97 +++++++++++++++++++++++++++--- + 2 files changed, 107 insertions(+), 24 deletions(-) + +--- a/arch/arm/crypto/chacha-glue.c ++++ b/arch/arm/crypto/chacha-glue.c +@@ -23,7 +23,7 @@ + asmlinkage void chacha_block_xor_neon(const u32 *state, u8 *dst, const u8 *src, + int nrounds); + asmlinkage void chacha_4block_xor_neon(const u32 *state, u8 *dst, const u8 *src, +- int nrounds); ++ int nrounds, unsigned int nbytes); + asmlinkage void hchacha_block_arm(const u32 *state, u32 *out, int nrounds); + asmlinkage void hchacha_block_neon(const u32 *state, u32 *out, int nrounds); + +@@ -42,24 +42,24 @@ static void chacha_doneon(u32 *state, u8 + { + u8 buf[CHACHA_BLOCK_SIZE]; + +- while (bytes >= CHACHA_BLOCK_SIZE * 4) { +- chacha_4block_xor_neon(state, dst, src, nrounds); +- bytes -= CHACHA_BLOCK_SIZE * 4; +- src += CHACHA_BLOCK_SIZE * 4; +- dst += CHACHA_BLOCK_SIZE * 4; +- state[12] += 4; +- } +- while (bytes >= CHACHA_BLOCK_SIZE) { +- chacha_block_xor_neon(state, dst, src, nrounds); +- bytes -= CHACHA_BLOCK_SIZE; +- src += CHACHA_BLOCK_SIZE; +- dst += CHACHA_BLOCK_SIZE; +- state[12]++; ++ while (bytes > CHACHA_BLOCK_SIZE) { ++ unsigned int l = min(bytes, CHACHA_BLOCK_SIZE * 4U); ++ ++ chacha_4block_xor_neon(state, dst, src, nrounds, l); ++ bytes -= l; ++ src += l; ++ dst += l; ++ state[12] += DIV_ROUND_UP(l, CHACHA_BLOCK_SIZE); + } + if (bytes) { +- memcpy(buf, src, bytes); +- chacha_block_xor_neon(state, buf, buf, nrounds); +- memcpy(dst, buf, bytes); ++ const u8 *s = src; ++ u8 *d = dst; ++ ++ if (bytes != CHACHA_BLOCK_SIZE) ++ s = d = memcpy(buf, src, bytes); ++ chacha_block_xor_neon(state, d, s, nrounds); ++ if (d != dst) ++ memcpy(dst, buf, bytes); + } + } + +--- a/arch/arm/crypto/chacha-neon-core.S ++++ b/arch/arm/crypto/chacha-neon-core.S +@@ -47,6 +47,7 @@ + */ + + #include <linux/linkage.h> ++#include <asm/cache.h> + + .text + .fpu neon +@@ -205,7 +206,7 @@ ENDPROC(hchacha_block_neon) + + .align 5 + ENTRY(chacha_4block_xor_neon) +- push {r4-r5} ++ push {r4, lr} + mov r4, sp // preserve the stack pointer + sub ip, sp, #0x20 // allocate a 32 byte buffer + bic ip, ip, #0x1f // aligned to 32 bytes +@@ -229,10 +230,10 @@ ENTRY(chacha_4block_xor_neon) + vld1.32 {q0-q1}, [r0] + vld1.32 {q2-q3}, [ip] + +- adr r5, .Lctrinc ++ adr lr, .Lctrinc + vdup.32 q15, d7[1] + vdup.32 q14, d7[0] +- vld1.32 {q4}, [r5, :128] ++ vld1.32 {q4}, [lr, :128] + vdup.32 q13, d6[1] + vdup.32 q12, d6[0] + vdup.32 q11, d5[1] +@@ -455,7 +456,7 @@ ENTRY(chacha_4block_xor_neon) + + // Re-interleave the words in the first two rows of each block (x0..7). + // Also add the counter values 0-3 to x12[0-3]. +- vld1.32 {q8}, [r5, :128] // load counter values 0-3 ++ vld1.32 {q8}, [lr, :128] // load counter values 0-3 + vzip.32 q0, q1 // => (0 1 0 1) (0 1 0 1) + vzip.32 q2, q3 // => (2 3 2 3) (2 3 2 3) + vzip.32 q4, q5 // => (4 5 4 5) (4 5 4 5) +@@ -493,6 +494,8 @@ ENTRY(chacha_4block_xor_neon) + + // Re-interleave the words in the last two rows of each block (x8..15). + vld1.32 {q8-q9}, [sp, :256] ++ mov sp, r4 // restore original stack pointer ++ ldr r4, [r4, #8] // load number of bytes + vzip.32 q12, q13 // => (12 13 12 13) (12 13 12 13) + vzip.32 q14, q15 // => (14 15 14 15) (14 15 14 15) + vzip.32 q8, q9 // => (8 9 8 9) (8 9 8 9) +@@ -520,41 +523,121 @@ ENTRY(chacha_4block_xor_neon) + // XOR the rest of the data with the keystream + + vld1.8 {q0-q1}, [r2]! ++ subs r4, r4, #96 + veor q0, q0, q8 + veor q1, q1, q12 ++ ble .Lle96 + vst1.8 {q0-q1}, [r1]! + + vld1.8 {q0-q1}, [r2]! ++ subs r4, r4, #32 + veor q0, q0, q2 + veor q1, q1, q6 ++ ble .Lle128 + vst1.8 {q0-q1}, [r1]! + + vld1.8 {q0-q1}, [r2]! ++ subs r4, r4, #32 + veor q0, q0, q10 + veor q1, q1, q14 ++ ble .Lle160 + vst1.8 {q0-q1}, [r1]! + + vld1.8 {q0-q1}, [r2]! ++ subs r4, r4, #32 + veor q0, q0, q4 + veor q1, q1, q5 ++ ble .Lle192 + vst1.8 {q0-q1}, [r1]! + + vld1.8 {q0-q1}, [r2]! ++ subs r4, r4, #32 + veor q0, q0, q9 + veor q1, q1, q13 ++ ble .Lle224 + vst1.8 {q0-q1}, [r1]! + + vld1.8 {q0-q1}, [r2]! ++ subs r4, r4, #32 + veor q0, q0, q3 + veor q1, q1, q7 ++ blt .Llt256 ++.Lout: + vst1.8 {q0-q1}, [r1]! + + vld1.8 {q0-q1}, [r2] +- mov sp, r4 // restore original stack pointer + veor q0, q0, q11 + veor q1, q1, q15 + vst1.8 {q0-q1}, [r1] + +- pop {r4-r5} +- bx lr ++ pop {r4, pc} ++ ++.Lle192: ++ vmov q4, q9 ++ vmov q5, q13 ++ ++.Lle160: ++ // nothing to do ++ ++.Lfinalblock: ++ // Process the final block if processing less than 4 full blocks. ++ // Entered with 32 bytes of ChaCha cipher stream in q4-q5, and the ++ // previous 32 byte output block that still needs to be written at ++ // [r1] in q0-q1. ++ beq .Lfullblock ++ ++.Lpartialblock: ++ adr lr, .Lpermute + 32 ++ add r2, r2, r4 ++ add lr, lr, r4 ++ add r4, r4, r1 ++ ++ vld1.8 {q2-q3}, [lr] ++ vld1.8 {q6-q7}, [r2] ++ ++ add r4, r4, #32 ++ ++ vtbl.8 d4, {q4-q5}, d4 ++ vtbl.8 d5, {q4-q5}, d5 ++ vtbl.8 d6, {q4-q5}, d6 ++ vtbl.8 d7, {q4-q5}, d7 ++ ++ veor q6, q6, q2 ++ veor q7, q7, q3 ++ ++ vst1.8 {q6-q7}, [r4] // overlapping stores ++ vst1.8 {q0-q1}, [r1] ++ pop {r4, pc} ++ ++.Lfullblock: ++ vmov q11, q4 ++ vmov q15, q5 ++ b .Lout ++.Lle96: ++ vmov q4, q2 ++ vmov q5, q6 ++ b .Lfinalblock ++.Lle128: ++ vmov q4, q10 ++ vmov q5, q14 ++ b .Lfinalblock ++.Lle224: ++ vmov q4, q3 ++ vmov q5, q7 ++ b .Lfinalblock ++.Llt256: ++ vmov q4, q11 ++ vmov q5, q15 ++ b .Lpartialblock + ENDPROC(chacha_4block_xor_neon) ++ ++ .align L1_CACHE_SHIFT ++.Lpermute: ++ .byte 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07 ++ .byte 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f ++ .byte 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17 ++ .byte 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f ++ .byte 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07 ++ .byte 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f ++ .byte 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17 ++ .byte 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f diff --git a/target/linux/generic/backport-5.15/072-crypto-arm-chacha-neon-add-missing-counter-increment.patch b/target/linux/generic/backport-5.15/072-crypto-arm-chacha-neon-add-missing-counter-increment.patch new file mode 100644 index 0000000000..1e4d2041e5 --- /dev/null +++ b/target/linux/generic/backport-5.15/072-crypto-arm-chacha-neon-add-missing-counter-increment.patch @@ -0,0 +1,38 @@ +From 7f63462faf9eab69132bea9abd48c2c05a93145b Mon Sep 17 00:00:00 2001 +From: Ard Biesheuvel <ardb@kernel.org> +Date: Sun, 13 Dec 2020 15:39:29 +0100 +Subject: [PATCH 2/2] crypto: arm/chacha-neon - add missing counter increment + +commit fd16931a2f518a32753920ff20895e5cf04c8ff1 upstream. + +Commit 86cd97ec4b943af3 ("crypto: arm/chacha-neon - optimize for non-block +size multiples") refactored the chacha block handling in the glue code in +a way that may result in the counter increment to be omitted when calling +chacha_block_xor_neon() to process a full block. This violates the skcipher +API, which requires that the output IV is suitable for handling more input +as long as the preceding input has been presented in round multiples of the +block size. Also, the same code is exposed via the chacha library interface +whose callers may actually rely on this increment to occur even for final +blocks that are smaller than the chacha block size. + +So increment the counter after calling chacha_block_xor_neon(). + +Fixes: 86cd97ec4b943af3 ("crypto: arm/chacha-neon - optimize for non-block size multiples") +Reported-by: Eric Biggers <ebiggers@kernel.org> +Signed-off-by: Ard Biesheuvel <ardb@kernel.org> +Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au> +Signed-off-by: Jason A. Donenfeld <Jason@zx2c4.com> +--- + arch/arm/crypto/chacha-glue.c | 1 + + 1 file changed, 1 insertion(+) + +--- a/arch/arm/crypto/chacha-glue.c ++++ b/arch/arm/crypto/chacha-glue.c +@@ -60,6 +60,7 @@ static void chacha_doneon(u32 *state, u8 + chacha_block_xor_neon(state, d, s, nrounds); + if (d != dst) + memcpy(dst, buf, bytes); ++ state[12]++; + } + } + diff --git a/target/linux/generic/backport-5.15/080-wireguard-peer-put-frequently-used-members-above-cac.patch b/target/linux/generic/backport-5.15/080-wireguard-peer-put-frequently-used-members-above-cac.patch new file mode 100644 index 0000000000..444fd677b4 --- /dev/null +++ b/target/linux/generic/backport-5.15/080-wireguard-peer-put-frequently-used-members-above-cac.patch @@ -0,0 +1,42 @@ +From a13827e9091c07e25cdeec9a402d74a27e2a1111 Mon Sep 17 00:00:00 2001 +From: "Jason A. Donenfeld" <Jason@zx2c4.com> +Date: Mon, 22 Feb 2021 17:25:46 +0100 +Subject: [PATCH] wireguard: peer: put frequently used members above cache + lines + +commit 5a0598695634a6bb4126818902dd9140cd9df8b6 upstream. + +The is_dead boolean is checked for every single packet, while the +internal_id member is used basically only for pr_debug messages. So it +makes sense to hoist up is_dead into some space formerly unused by a +struct hole, while demoting internal_api to below the lowest struct +cache line. + +Signed-off-by: Jason A. Donenfeld <Jason@zx2c4.com> +Signed-off-by: Jakub Kicinski <kuba@kernel.org> +Signed-off-by: Jason A. Donenfeld <Jason@zx2c4.com> +--- + drivers/net/wireguard/peer.h | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +--- a/drivers/net/wireguard/peer.h ++++ b/drivers/net/wireguard/peer.h +@@ -39,6 +39,7 @@ struct wg_peer { + struct prev_queue tx_queue, rx_queue; + struct sk_buff_head staged_packet_queue; + int serial_work_cpu; ++ bool is_dead; + struct noise_keypairs keypairs; + struct endpoint endpoint; + struct dst_cache endpoint_cache; +@@ -61,9 +62,8 @@ struct wg_peer { + struct rcu_head rcu; + struct list_head peer_list; + struct list_head allowedips_list; +- u64 internal_id; + struct napi_struct napi; +- bool is_dead; ++ u64 internal_id; + }; + + struct wg_peer *wg_peer_create(struct wg_device *wg, diff --git a/target/linux/generic/backport-5.15/081-net-next-regmap-allow-to-define-reg_update_bits-for-no-bus.patch b/target/linux/generic/backport-5.15/081-net-next-regmap-allow-to-define-reg_update_bits-for-no-bus.patch new file mode 100644 index 0000000000..6e274acb1f --- /dev/null +++ b/target/linux/generic/backport-5.15/081-net-next-regmap-allow-to-define-reg_update_bits-for-no-bus.patch @@ -0,0 +1,52 @@ +From 02d6fdecb9c38de19065f6bed8d5214556fd061d Mon Sep 17 00:00:00 2001 +From: Ansuel Smith <ansuelsmth@gmail.com> +Date: Thu, 4 Nov 2021 16:00:40 +0100 +Subject: regmap: allow to define reg_update_bits for no bus configuration + +Some device requires a special handling for reg_update_bits and can't use +the normal regmap read write logic. An example is when locking is +handled by the device and rmw operations requires to do atomic operations. +Allow to declare a dedicated function in regmap_config for +reg_update_bits in no bus configuration. + +Signed-off-by: Ansuel Smith <ansuelsmth@gmail.com> +Link: https://lore.kernel.org/r/20211104150040.1260-1-ansuelsmth@gmail.com +Signed-off-by: Mark Brown <broonie@kernel.org> +--- + drivers/base/regmap/regmap.c | 1 + + include/linux/regmap.h | 7 +++++++ + 2 files changed, 8 insertions(+) + +--- a/drivers/base/regmap/regmap.c ++++ b/drivers/base/regmap/regmap.c +@@ -842,6 +842,7 @@ struct regmap *__regmap_init(struct devi + if (!bus) { + map->reg_read = config->reg_read; + map->reg_write = config->reg_write; ++ map->reg_update_bits = config->reg_update_bits; + + map->defer_caching = false; + goto skip_format_initialization; +--- a/include/linux/regmap.h ++++ b/include/linux/regmap.h +@@ -289,6 +289,11 @@ typedef void (*regmap_unlock)(void *); + * read operation on a bus such as SPI, I2C, etc. Most of the + * devices do not need this. + * @reg_write: Same as above for writing. ++ * @reg_update_bits: Optional callback that if filled will be used to perform ++ * all the update_bits(rmw) operation. Should only be provided ++ * if the function require special handling with lock and reg ++ * handling and the operation cannot be represented as a simple ++ * update_bits operation on a bus such as SPI, I2C, etc. + * @fast_io: Register IO is fast. Use a spinlock instead of a mutex + * to perform locking. This field is ignored if custom lock/unlock + * functions are used (see fields lock/unlock of struct regmap_config). +@@ -366,6 +371,8 @@ struct regmap_config { + + int (*reg_read)(void *context, unsigned int reg, unsigned int *val); + int (*reg_write)(void *context, unsigned int reg, unsigned int val); ++ int (*reg_update_bits)(void *context, unsigned int reg, ++ unsigned int mask, unsigned int val); + + bool fast_io; + diff --git a/target/linux/generic/backport-5.15/103-v5.13-MIPS-select-CPU_MIPS64-for-remaining-MIPS64-CPUs.patch b/target/linux/generic/backport-5.15/103-v5.13-MIPS-select-CPU_MIPS64-for-remaining-MIPS64-CPUs.patch new file mode 100644 index 0000000000..fafe530ac5 --- /dev/null +++ b/target/linux/generic/backport-5.15/103-v5.13-MIPS-select-CPU_MIPS64-for-remaining-MIPS64-CPUs.patch @@ -0,0 +1,36 @@ +From 6523061868212473f63812a0c477a161742bed42 Mon Sep 17 00:00:00 2001 +From: "Jason A. Donenfeld" <Jason@zx2c4.com> +Date: Sat, 27 Feb 2021 13:20:24 +0100 +Subject: [PATCH] MIPS: select CPU_MIPS64 for remaining MIPS64 CPUs + +The CPU_MIPS64 and CPU_MIPS32 variables are supposed to be able to +distinguish broadly between 64-bit and 32-bit MIPS CPUs. However, they +weren't selected by the specialty CPUs, Octeon and Loongson, which meant +it was possible to hit a weird state of: + + MIPS=y, CONFIG_64BIT=y, CPU_MIPS64=n + +This commit rectifies the issue by having CPU_MIPS64 be selected when +the missing Octeon or Loongson models are selected. + +Cc: Thomas Bogendoerfer <tsbogend@alpha.franken.de> +Cc: Ralf Baechle <ralf@linux-mips.org> +Cc: George Cherian <gcherian@marvell.com> +Cc: Huacai Chen <chenhuacai@kernel.org> +Cc: Jiaxun Yang <jiaxun.yang@flygoat.com> +Signed-off-by: Jason A. Donenfeld <Jason@zx2c4.com> +--- + arch/mips/Kconfig | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +--- a/arch/mips/Kconfig ++++ b/arch/mips/Kconfig +@@ -2088,7 +2088,7 @@ config CPU_MIPS32 + config CPU_MIPS64 + bool + default y if CPU_MIPS64_R1 || CPU_MIPS64_R2 || CPU_MIPS64_R5 || \ +- CPU_MIPS64_R6 ++ CPU_MIPS64_R6 || CPU_LOONGSON64 || CPU_CAVIUM_OCTEON + + # + # These indicate the revision of the architecture diff --git a/target/linux/generic/backport-5.15/311-v5.11-MIPS-zboot-put-appended-dtb-into-a-section.patch b/target/linux/generic/backport-5.15/311-v5.11-MIPS-zboot-put-appended-dtb-into-a-section.patch new file mode 100644 index 0000000000..3f8808f702 --- /dev/null +++ b/target/linux/generic/backport-5.15/311-v5.11-MIPS-zboot-put-appended-dtb-into-a-section.patch @@ -0,0 +1,36 @@ +From 7d1531c81c0fb4c93bea8dc316043ad0e4d0c270 Mon Sep 17 00:00:00 2001 +From: Chuanhong Guo <gch981213@gmail.com> +Date: Sun, 25 Oct 2020 23:19:40 +0800 +Subject: [PATCH] MIPS: zboot: put appended dtb into a section + +This will make a separated section for dtb appear in ELF, and we can +then use objcopy to patch a dtb into vmlinuz when RAW_APPENDED_DTB +is set in kernel config. + +command to patch a dtb: +objcopy --set-section-flags=.appended_dtb=alloc,contents \ + --update-section=.appended_dtb=<target>.dtb vmlinuz vmlinuz-dtb + +Signed-off-by: Chuanhong Guo <gch981213@gmail.com> +--- + arch/mips/boot/compressed/ld.script | 9 ++++++--- + 1 file changed, 6 insertions(+), 3 deletions(-) + +--- a/arch/mips/boot/compressed/ld.script ++++ b/arch/mips/boot/compressed/ld.script +@@ -31,9 +31,12 @@ SECTIONS + CONSTRUCTORS + . = ALIGN(16); + } +- __appended_dtb = .; +- /* leave space for appended DTB */ +- . += 0x100000; ++ ++ .appended_dtb : { ++ __appended_dtb = .; ++ /* leave space for appended DTB */ ++ . += 0x100000; ++ } + + _edata = .; + /* End of data section */ diff --git a/target/linux/generic/backport-5.15/343-netfilter-nft_flow_offload-handle-netdevice-events-f.patch b/target/linux/generic/backport-5.15/343-netfilter-nft_flow_offload-handle-netdevice-events-f.patch new file mode 100644 index 0000000000..d300af3342 --- /dev/null +++ b/target/linux/generic/backport-5.15/343-netfilter-nft_flow_offload-handle-netdevice-events-f.patch @@ -0,0 +1,106 @@ +From: Pablo Neira Ayuso <pablo@netfilter.org> +Date: Thu, 25 Jan 2018 12:58:55 +0100 +Subject: [PATCH] netfilter: nft_flow_offload: handle netdevice events from + nf_flow_table + +Move the code that deals with device events to the core. + +Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org> +--- + +--- a/net/netfilter/nf_flow_table_core.c ++++ b/net/netfilter/nf_flow_table_core.c +@@ -576,13 +576,41 @@ void nf_flow_table_free(struct nf_flowta + } + EXPORT_SYMBOL_GPL(nf_flow_table_free); + ++static int nf_flow_table_netdev_event(struct notifier_block *this, ++ unsigned long event, void *ptr) ++{ ++ struct net_device *dev = netdev_notifier_info_to_dev(ptr); ++ ++ if (event != NETDEV_DOWN) ++ return NOTIFY_DONE; ++ ++ nf_flow_table_cleanup(dev); ++ ++ return NOTIFY_DONE; ++} ++ ++static struct notifier_block flow_offload_netdev_notifier = { ++ .notifier_call = nf_flow_table_netdev_event, ++}; ++ + static int __init nf_flow_table_module_init(void) + { +- return nf_flow_table_offload_init(); ++ int ret; ++ ++ ret = nf_flow_table_offload_init(); ++ if (ret) ++ return ret; ++ ++ ret = register_netdevice_notifier(&flow_offload_netdev_notifier); ++ if (ret) ++ nf_flow_table_offload_exit(); ++ ++ return ret; + } + + static void __exit nf_flow_table_module_exit(void) + { ++ unregister_netdevice_notifier(&flow_offload_netdev_notifier); + nf_flow_table_offload_exit(); + } + +--- a/net/netfilter/nft_flow_offload.c ++++ b/net/netfilter/nft_flow_offload.c +@@ -237,47 +237,14 @@ static struct nft_expr_type nft_flow_off + .owner = THIS_MODULE, + }; + +-static int flow_offload_netdev_event(struct notifier_block *this, +- unsigned long event, void *ptr) +-{ +- struct net_device *dev = netdev_notifier_info_to_dev(ptr); +- +- if (event != NETDEV_DOWN) +- return NOTIFY_DONE; +- +- nf_flow_table_cleanup(dev); +- +- return NOTIFY_DONE; +-} +- +-static struct notifier_block flow_offload_netdev_notifier = { +- .notifier_call = flow_offload_netdev_event, +-}; +- + static int __init nft_flow_offload_module_init(void) + { +- int err; +- +- err = register_netdevice_notifier(&flow_offload_netdev_notifier); +- if (err) +- goto err; +- +- err = nft_register_expr(&nft_flow_offload_type); +- if (err < 0) +- goto register_expr; +- +- return 0; +- +-register_expr: +- unregister_netdevice_notifier(&flow_offload_netdev_notifier); +-err: +- return err; ++ return nft_register_expr(&nft_flow_offload_type); + } + + static void __exit nft_flow_offload_module_exit(void) + { + nft_unregister_expr(&nft_flow_offload_type); +- unregister_netdevice_notifier(&flow_offload_netdev_notifier); + } + + module_init(nft_flow_offload_module_init); diff --git a/target/linux/generic/backport-5.15/401-v5.11-dt-bindings-mtd-convert-fixed-partitions-to-the-json.patch b/target/linux/generic/backport-5.15/401-v5.11-dt-bindings-mtd-convert-fixed-partitions-to-the-json.patch new file mode 100644 index 0000000000..8aded43526 --- /dev/null +++ b/target/linux/generic/backport-5.15/401-v5.11-dt-bindings-mtd-convert-fixed-partitions-to-the-json.patch @@ -0,0 +1,324 @@ +From 04e9ab75267489224364fa510a88ada83e11c325 Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?Rafa=C5=82=20Mi=C5=82ecki?= <rafal@milecki.pl> +Date: Thu, 10 Dec 2020 18:23:52 +0100 +Subject: [PATCH] dt-bindings: mtd: convert "fixed-partitions" to the + json-schema +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +This standardizes its documentation, allows validating with Makefile +checks and helps writing DTS files. + +Noticeable changes: +1. Dropped "Partitions can be represented by sub-nodes of a flash + device." as we also support subpartitions (don't have to be part of + flash device node) +2. Dropped "to Linux" as bindings are meant to be os agnostic. + +Signed-off-by: Rafał Miłecki <rafal@milecki.pl> +Link: https://lore.kernel.org/r/20201210172352.31632-1-zajec5@gmail.com +Signed-off-by: Rob Herring <robh@kernel.org> +--- + .../devicetree/bindings/mtd/partition.txt | 131 +-------------- + .../mtd/partitions/fixed-partitions.yaml | 152 ++++++++++++++++++ + 2 files changed, 154 insertions(+), 129 deletions(-) + create mode 100644 Documentation/devicetree/bindings/mtd/partitions/fixed-partitions.yaml + +--- a/Documentation/devicetree/bindings/mtd/partition.txt ++++ b/Documentation/devicetree/bindings/mtd/partition.txt +@@ -24,137 +24,10 @@ another partitioning method. + Available bindings are listed in the "partitions" subdirectory. + + +-Fixed Partitions +-================ +- +-Partitions can be represented by sub-nodes of a flash device. This can be used +-on platforms which have strong conventions about which portions of a flash are +-used for what purposes, but which don't use an on-flash partition table such +-as RedBoot. +- +-The partition table should be a subnode of the flash node and should be named +-'partitions'. This node should have the following property: +-- compatible : (required) must be "fixed-partitions" +-Partitions are then defined in subnodes of the partitions node. ++Deprecated: partitions defined in flash node ++============================================ + + For backwards compatibility partitions as direct subnodes of the flash device are + supported. This use is discouraged. + NOTE: also for backwards compatibility, direct subnodes that have a compatible + string are not considered partitions, as they may be used for other bindings. +- +-#address-cells & #size-cells must both be present in the partitions subnode of the +-flash device. There are two valid values for both: +-<1>: for partitions that require a single 32-bit cell to represent their +- size/address (aka the value is below 4 GiB) +-<2>: for partitions that require two 32-bit cells to represent their +- size/address (aka the value is 4 GiB or greater). +- +-Required properties: +-- reg : The partition's offset and size within the flash +- +-Optional properties: +-- label : The label / name for this partition. If omitted, the label is taken +- from the node name (excluding the unit address). +-- read-only : This parameter, if present, is a hint to Linux that this +- partition should only be mounted read-only. This is usually used for flash +- partitions containing early-boot firmware images or data which should not be +- clobbered. +-- lock : Do not unlock the partition at initialization time (not supported on +- all devices) +-- slc-mode: This parameter, if present, allows one to emulate SLC mode on a +- partition attached to an MLC NAND thus making this partition immune to +- paired-pages corruptions +- +-Examples: +- +- +-flash@0 { +- partitions { +- compatible = "fixed-partitions"; +- #address-cells = <1>; +- #size-cells = <1>; +- +- partition@0 { +- label = "u-boot"; +- reg = <0x0000000 0x100000>; +- read-only; +- }; +- +- uimage@100000 { +- reg = <0x0100000 0x200000>; +- }; +- }; +-}; +- +-flash@1 { +- partitions { +- compatible = "fixed-partitions"; +- #address-cells = <1>; +- #size-cells = <2>; +- +- /* a 4 GiB partition */ +- partition@0 { +- label = "filesystem"; +- reg = <0x00000000 0x1 0x00000000>; +- }; +- }; +-}; +- +-flash@2 { +- partitions { +- compatible = "fixed-partitions"; +- #address-cells = <2>; +- #size-cells = <2>; +- +- /* an 8 GiB partition */ +- partition@0 { +- label = "filesystem #1"; +- reg = <0x0 0x00000000 0x2 0x00000000>; +- }; +- +- /* a 4 GiB partition */ +- partition@200000000 { +- label = "filesystem #2"; +- reg = <0x2 0x00000000 0x1 0x00000000>; +- }; +- }; +-}; +- +-flash@3 { +- partitions { +- compatible = "fixed-partitions"; +- #address-cells = <1>; +- #size-cells = <1>; +- +- partition@0 { +- label = "bootloader"; +- reg = <0x000000 0x100000>; +- read-only; +- }; +- +- firmware@100000 { +- label = "firmware"; +- reg = <0x100000 0xe00000>; +- compatible = "brcm,trx"; +- }; +- +- calibration@f00000 { +- label = "calibration"; +- reg = <0xf00000 0x100000>; +- compatible = "fixed-partitions"; +- ranges = <0 0xf00000 0x100000>; +- #address-cells = <1>; +- #size-cells = <1>; +- +- partition@0 { +- label = "wifi0"; +- reg = <0x000000 0x080000>; +- }; +- +- partition@80000 { +- label = "wifi1"; +- reg = <0x080000 0x080000>; +- }; +- }; +- }; +-}; +--- /dev/null ++++ b/Documentation/devicetree/bindings/mtd/partitions/fixed-partitions.yaml +@@ -0,0 +1,152 @@ ++# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause ++%YAML 1.2 ++--- ++$id: http://devicetree.org/schemas/mtd/partitions/fixed-partitions.yaml# ++$schema: http://devicetree.org/meta-schemas/core.yaml# ++ ++title: Fixed partitions ++ ++description: | ++ This binding can be used on platforms which have strong conventions about ++ which portions of a flash are used for what purposes, but which don't use an ++ on-flash partition table such as RedBoot. ++ ++ The partition table should be a node named "partitions". Partitions are then ++ defined as subnodes. ++ ++maintainers: ++ - Rafał Miłecki <rafal@milecki.pl> ++ ++properties: ++ compatible: ++ const: fixed-partitions ++ ++ "#address-cells": true ++ ++ "#size-cells": true ++ ++patternProperties: ++ "@[0-9a-f]+$": ++ description: node describing a single flash partition ++ type: object ++ ++ properties: ++ reg: ++ description: partition's offset and size within the flash ++ maxItems: 1 ++ ++ label: ++ description: The label / name for this partition. If omitted, the label ++ is taken from the node name (excluding the unit address). ++ ++ read-only: ++ description: This parameter, if present, is a hint that this partition ++ should only be mounted read-only. This is usually used for flash ++ partitions containing early-boot firmware images or data which should ++ not be clobbered. ++ type: boolean ++ ++ lock: ++ description: Do not unlock the partition at initialization time (not ++ supported on all devices) ++ type: boolean ++ ++ slc-mode: ++ description: This parameter, if present, allows one to emulate SLC mode ++ on a partition attached to an MLC NAND thus making this partition ++ immune to paired-pages corruptions ++ type: boolean ++ ++ required: ++ - reg ++ ++required: ++ - "#address-cells" ++ - "#size-cells" ++ ++additionalProperties: true ++ ++examples: ++ - | ++ partitions { ++ compatible = "fixed-partitions"; ++ #address-cells = <1>; ++ #size-cells = <1>; ++ ++ partition@0 { ++ label = "u-boot"; ++ reg = <0x0000000 0x100000>; ++ read-only; ++ }; ++ ++ uimage@100000 { ++ reg = <0x0100000 0x200000>; ++ }; ++ }; ++ - | ++ partitions { ++ compatible = "fixed-partitions"; ++ #address-cells = <1>; ++ #size-cells = <2>; ++ ++ /* a 4 GiB partition */ ++ partition@0 { ++ label = "filesystem"; ++ reg = <0x00000000 0x1 0x00000000>; ++ }; ++ }; ++ - | ++ partitions { ++ compatible = "fixed-partitions"; ++ #address-cells = <2>; ++ #size-cells = <2>; ++ ++ /* an 8 GiB partition */ ++ partition@0 { ++ label = "filesystem #1"; ++ reg = <0x0 0x00000000 0x2 0x00000000>; ++ }; ++ ++ /* a 4 GiB partition */ ++ partition@200000000 { ++ label = "filesystem #2"; ++ reg = <0x2 0x00000000 0x1 0x00000000>; ++ }; ++ }; ++ - | ++ partitions { ++ compatible = "fixed-partitions"; ++ #address-cells = <1>; ++ #size-cells = <1>; ++ ++ partition@0 { ++ label = "bootloader"; ++ reg = <0x000000 0x100000>; ++ read-only; ++ }; ++ ++ firmware@100000 { ++ compatible = "brcm,trx"; ++ label = "firmware"; ++ reg = <0x100000 0xe00000>; ++ }; ++ ++ calibration@f00000 { ++ compatible = "fixed-partitions"; ++ label = "calibration"; ++ reg = <0xf00000 0x100000>; ++ ranges = <0 0xf00000 0x100000>; ++ #address-cells = <1>; ++ #size-cells = <1>; ++ ++ partition@0 { ++ label = "wifi0"; ++ reg = <0x000000 0x080000>; ++ }; ++ ++ partition@80000 { ++ label = "wifi1"; ++ reg = <0x080000 0x080000>; ++ }; ++ }; ++ }; diff --git a/target/linux/generic/backport-5.15/402-v5.12-0001-dt-bindings-mtd-move-partition-binding-to-its-own-fi.patch b/target/linux/generic/backport-5.15/402-v5.12-0001-dt-bindings-mtd-move-partition-binding-to-its-own-fi.patch new file mode 100644 index 0000000000..f3b1179ecd --- /dev/null +++ b/target/linux/generic/backport-5.15/402-v5.12-0001-dt-bindings-mtd-move-partition-binding-to-its-own-fi.patch @@ -0,0 +1,115 @@ +From 6418522022c706fd867b00b2571edba48b8fa8c7 Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?Rafa=C5=82=20Mi=C5=82ecki?= <rafal@milecki.pl> +Date: Thu, 11 Feb 2021 23:04:25 +0100 +Subject: [PATCH] dt-bindings: mtd: move partition binding to its own file +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +Single partition binding is quite common and may be: +1. Used by multiple parsers +2. Extended for more specific cases + +Move it to separated file to avoid code duplication. + +Signed-off-by: Rafał Miłecki <rafal@milecki.pl> +Reviewed-by: Rob Herring <robh@kernel.org> +Signed-off-by: Richard Weinberger <richard@nod.at> +--- + .../mtd/partitions/fixed-partitions.yaml | 33 +------------ + .../bindings/mtd/partitions/partition.yaml | 47 +++++++++++++++++++ + 2 files changed, 48 insertions(+), 32 deletions(-) + create mode 100644 Documentation/devicetree/bindings/mtd/partitions/partition.yaml + +--- a/Documentation/devicetree/bindings/mtd/partitions/fixed-partitions.yaml ++++ b/Documentation/devicetree/bindings/mtd/partitions/fixed-partitions.yaml +@@ -27,38 +27,7 @@ properties: + + patternProperties: + "@[0-9a-f]+$": +- description: node describing a single flash partition +- type: object +- +- properties: +- reg: +- description: partition's offset and size within the flash +- maxItems: 1 +- +- label: +- description: The label / name for this partition. If omitted, the label +- is taken from the node name (excluding the unit address). +- +- read-only: +- description: This parameter, if present, is a hint that this partition +- should only be mounted read-only. This is usually used for flash +- partitions containing early-boot firmware images or data which should +- not be clobbered. +- type: boolean +- +- lock: +- description: Do not unlock the partition at initialization time (not +- supported on all devices) +- type: boolean +- +- slc-mode: +- description: This parameter, if present, allows one to emulate SLC mode +- on a partition attached to an MLC NAND thus making this partition +- immune to paired-pages corruptions +- type: boolean +- +- required: +- - reg ++ $ref: "partition.yaml#" + + required: + - "#address-cells" +--- /dev/null ++++ b/Documentation/devicetree/bindings/mtd/partitions/partition.yaml +@@ -0,0 +1,47 @@ ++# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause ++%YAML 1.2 ++--- ++$id: http://devicetree.org/schemas/mtd/partitions/partition.yaml# ++$schema: http://devicetree.org/meta-schemas/core.yaml# ++ ++title: Partition ++ ++description: | ++ This binding describes a single flash partition. Each partition must have its ++ relative offset and size specified. Depending on partition function extra ++ properties can be used. ++ ++maintainers: ++ - Rafał Miłecki <rafal@milecki.pl> ++ ++properties: ++ reg: ++ description: partition's offset and size within the flash ++ maxItems: 1 ++ ++ label: ++ description: The label / name for this partition. If omitted, the label ++ is taken from the node name (excluding the unit address). ++ ++ read-only: ++ description: This parameter, if present, is a hint that this partition ++ should only be mounted read-only. This is usually used for flash ++ partitions containing early-boot firmware images or data which should ++ not be clobbered. ++ type: boolean ++ ++ lock: ++ description: Do not unlock the partition at initialization time (not ++ supported on all devices) ++ type: boolean ++ ++ slc-mode: ++ description: This parameter, if present, allows one to emulate SLC mode ++ on a partition attached to an MLC NAND thus making this partition ++ immune to paired-pages corruptions ++ type: boolean ++ ++required: ++ - reg ++ ++additionalProperties: true diff --git a/target/linux/generic/backport-5.15/402-v5.12-0002-dt-bindings-mtd-add-binding-for-BCM4908-partitions.patch b/target/linux/generic/backport-5.15/402-v5.12-0002-dt-bindings-mtd-add-binding-for-BCM4908-partitions.patch new file mode 100644 index 0000000000..8576c7d78d --- /dev/null +++ b/target/linux/generic/backport-5.15/402-v5.12-0002-dt-bindings-mtd-add-binding-for-BCM4908-partitions.patch @@ -0,0 +1,92 @@ +From 6e9dff6fe3fbc452f16566e4a7e293b0decefdba Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?Rafa=C5=82=20Mi=C5=82ecki?= <rafal@milecki.pl> +Date: Thu, 11 Feb 2021 23:04:26 +0100 +Subject: [PATCH] dt-bindings: mtd: add binding for BCM4908 partitions +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +BCM4908 uses fixed partitions layout but function of some partitions may +vary. Some devices use multiple firmware partitions and those partitions +should be marked to let system discover their purpose. + +Signed-off-by: Rafał Miłecki <rafal@milecki.pl> +Signed-off-by: Richard Weinberger <richard@nod.at> +--- + .../partitions/brcm,bcm4908-partitions.yaml | 70 +++++++++++++++++++ + 1 file changed, 70 insertions(+) + create mode 100644 Documentation/devicetree/bindings/mtd/partitions/brcm,bcm4908-partitions.yaml + +--- /dev/null ++++ b/Documentation/devicetree/bindings/mtd/partitions/brcm,bcm4908-partitions.yaml +@@ -0,0 +1,70 @@ ++# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause ++%YAML 1.2 ++--- ++$id: http://devicetree.org/schemas/mtd/partitions/brcm,bcm4908-partitions.yaml# ++$schema: http://devicetree.org/meta-schemas/core.yaml# ++ ++title: Broadcom BCM4908 partitioning ++ ++description: | ++ Broadcom BCM4908 CFE bootloader supports two firmware partitions. One is used ++ for regular booting, the other is treated as fallback. ++ ++ This binding allows defining all fixed partitions and marking those containing ++ firmware. System can use that information e.g. for booting or flashing ++ purposes. ++ ++maintainers: ++ - Rafał Miłecki <rafal@milecki.pl> ++ ++properties: ++ compatible: ++ const: brcm,bcm4908-partitions ++ ++ "#address-cells": ++ enum: [ 1, 2 ] ++ ++ "#size-cells": ++ enum: [ 1, 2 ] ++ ++patternProperties: ++ "^partition@[0-9a-f]+$": ++ $ref: "partition.yaml#" ++ properties: ++ compatible: ++ const: brcm,bcm4908-firmware ++ unevaluatedProperties: false ++ ++required: ++ - "#address-cells" ++ - "#size-cells" ++ ++additionalProperties: false ++ ++examples: ++ - | ++ partitions { ++ compatible = "brcm,bcm4908-partitions"; ++ #address-cells = <1>; ++ #size-cells = <1>; ++ ++ partition@0 { ++ label = "cferom"; ++ reg = <0x0 0x100000>; ++ }; ++ ++ partition@100000 { ++ compatible = "brcm,bcm4908-firmware"; ++ reg = <0x100000 0xf00000>; ++ }; ++ ++ partition@1000000 { ++ compatible = "brcm,bcm4908-firmware"; ++ reg = <0x1000000 0xf00000>; ++ }; ++ ++ partition@1f00000 { ++ label = "calibration"; ++ reg = <0x1f00000 0x100000>; ++ }; ++ }; diff --git a/target/linux/generic/backport-5.15/403-v5.13-mtd-parsers-ofpart-support-BCM4908-fixed-partitions.patch b/target/linux/generic/backport-5.15/403-v5.13-mtd-parsers-ofpart-support-BCM4908-fixed-partitions.patch new file mode 100644 index 0000000000..d3891228e2 --- /dev/null +++ b/target/linux/generic/backport-5.15/403-v5.13-mtd-parsers-ofpart-support-BCM4908-fixed-partitions.patch @@ -0,0 +1,654 @@ +From afbef8efb591792579c633a7c545f914c6165f82 Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?Rafa=C5=82=20Mi=C5=82ecki?= <rafal@milecki.pl> +Date: Thu, 11 Feb 2021 23:04:27 +0100 +Subject: [PATCH] mtd: parsers: ofpart: support BCM4908 fixed partitions +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +Some devices use fixed partitioning with some partitions requiring some +extra logic. E.g. BCM4908 may have multiple firmware partitions but +detecting currently used one requires checking bootloader parameters. + +To support such cases without duplicating a lot of code (without copying +most of the ofpart.c code) support for post-parsing callback was added. + +BCM4908 support in ofpart can be enabled using config option and results +in compiling & executing a specific callback. It simply reads offset of +currently used firmware partition from the DT. Bootloader specifies it +using the "brcm_blparms" property. + +Signed-off-by: Rafał Miłecki <rafal@milecki.pl> +--- + drivers/mtd/parsers/Kconfig | 9 +++ + drivers/mtd/parsers/Makefile | 2 + + drivers/mtd/parsers/ofpart_bcm4908.c | 64 +++++++++++++++++++ + drivers/mtd/parsers/ofpart_bcm4908.h | 15 +++++ + .../mtd/parsers/{ofpart.c => ofpart_core.c} | 28 +++++++- + 5 files changed, 116 insertions(+), 2 deletions(-) + create mode 100644 drivers/mtd/parsers/ofpart_bcm4908.c + create mode 100644 drivers/mtd/parsers/ofpart_bcm4908.h + rename drivers/mtd/parsers/{ofpart.c => ofpart_core.c} (88%) + +--- a/drivers/mtd/parsers/Kconfig ++++ b/drivers/mtd/parsers/Kconfig +@@ -67,6 +67,15 @@ config MTD_OF_PARTS + flash memory node, as described in + Documentation/devicetree/bindings/mtd/partition.txt. + ++config MTD_OF_PARTS_BCM4908 ++ bool "BCM4908 partitioning support" ++ depends on MTD_OF_PARTS && (ARCH_BCM4908 || COMPILE_TEST) ++ default ARCH_BCM4908 ++ help ++ This provides partitions parser for BCM4908 family devices ++ that can have multiple "firmware" partitions. It takes care of ++ finding currently used one and backup ones. ++ + config MTD_PARSER_IMAGETAG + tristate "Parser for BCM963XX Image Tag format partitions" + depends on BCM63XX || BMIPS_GENERIC || COMPILE_TEST +--- a/drivers/mtd/parsers/Makefile ++++ b/drivers/mtd/parsers/Makefile +@@ -4,6 +4,8 @@ obj-$(CONFIG_MTD_BCM47XX_PARTS) += bcm4 + obj-$(CONFIG_MTD_BCM63XX_PARTS) += bcm63xxpart.o + obj-$(CONFIG_MTD_CMDLINE_PARTS) += cmdlinepart.o + obj-$(CONFIG_MTD_OF_PARTS) += ofpart.o ++ofpart-y += ofpart_core.o ++ofpart-$(CONFIG_MTD_OF_PARTS_BCM4908) += ofpart_bcm4908.o + obj-$(CONFIG_MTD_PARSER_IMAGETAG) += parser_imagetag.o + obj-$(CONFIG_MTD_AFS_PARTS) += afs.o + obj-$(CONFIG_MTD_PARSER_TRX) += parser_trx.o +--- /dev/null ++++ b/drivers/mtd/parsers/ofpart_bcm4908.c +@@ -0,0 +1,64 @@ ++// SPDX-License-Identifier: GPL-2.0 ++/* ++ * Copyright (C) 2021 Rafał Miłecki <rafal@milecki.pl> ++ */ ++ ++#include <linux/module.h> ++#include <linux/init.h> ++#include <linux/of.h> ++#include <linux/mtd/mtd.h> ++#include <linux/slab.h> ++#include <linux/mtd/partitions.h> ++ ++#include "ofpart_bcm4908.h" ++ ++#define BLPARAMS_FW_OFFSET "NAND_RFS_OFS" ++ ++static long long bcm4908_partitions_fw_offset(void) ++{ ++ struct device_node *root; ++ struct property *prop; ++ const char *s; ++ ++ root = of_find_node_by_path("/"); ++ if (!root) ++ return -ENOENT; ++ ++ of_property_for_each_string(root, "brcm_blparms", prop, s) { ++ size_t len = strlen(BLPARAMS_FW_OFFSET); ++ unsigned long offset; ++ int err; ++ ++ if (strncmp(s, BLPARAMS_FW_OFFSET, len) || s[len] != '=') ++ continue; ++ ++ err = kstrtoul(s + len + 1, 0, &offset); ++ if (err) { ++ pr_err("failed to parse %s\n", s + len + 1); ++ return err; ++ } ++ ++ return offset << 10; ++ } ++ ++ return -ENOENT; ++} ++ ++int bcm4908_partitions_post_parse(struct mtd_info *mtd, struct mtd_partition *parts, int nr_parts) ++{ ++ long long fw_offset; ++ int i; ++ ++ fw_offset = bcm4908_partitions_fw_offset(); ++ ++ for (i = 0; i < nr_parts; i++) { ++ if (of_device_is_compatible(parts[i].of_node, "brcm,bcm4908-firmware")) { ++ if (fw_offset < 0 || parts[i].offset == fw_offset) ++ parts[i].name = "firmware"; ++ else ++ parts[i].name = "backup"; ++ } ++ } ++ ++ return 0; ++} +--- /dev/null ++++ b/drivers/mtd/parsers/ofpart_bcm4908.h +@@ -0,0 +1,15 @@ ++/* SPDX-License-Identifier: GPL-2.0 */ ++#ifndef __BCM4908_PARTITIONS_H ++#define __BCM4908_PARTITIONS_H ++ ++#ifdef CONFIG_MTD_OF_PARTS_BCM4908 ++int bcm4908_partitions_post_parse(struct mtd_info *mtd, struct mtd_partition *parts, int nr_parts); ++#else ++static inline int bcm4908_partitions_post_parse(struct mtd_info *mtd, struct mtd_partition *parts, ++ int nr_parts) ++{ ++ return -EOPNOTSUPP; ++} ++#endif ++ ++#endif +--- a/drivers/mtd/parsers/ofpart.c ++++ /dev/null +@@ -1,239 +0,0 @@ +-// SPDX-License-Identifier: GPL-2.0-or-later +-/* +- * Flash partitions described by the OF (or flattened) device tree +- * +- * Copyright © 2006 MontaVista Software Inc. +- * Author: Vitaly Wool <vwool@ru.mvista.com> +- * +- * Revised to handle newer style flash binding by: +- * Copyright © 2007 David Gibson, IBM Corporation. +- */ +- +-#include <linux/module.h> +-#include <linux/init.h> +-#include <linux/of.h> +-#include <linux/mtd/mtd.h> +-#include <linux/slab.h> +-#include <linux/mtd/partitions.h> +- +-static bool node_has_compatible(struct device_node *pp) +-{ +- return of_get_property(pp, "compatible", NULL); +-} +- +-static int parse_fixed_partitions(struct mtd_info *master, +- const struct mtd_partition **pparts, +- struct mtd_part_parser_data *data) +-{ +- struct mtd_partition *parts; +- struct device_node *mtd_node; +- struct device_node *ofpart_node; +- const char *partname; +- struct device_node *pp; +- int nr_parts, i, ret = 0; +- bool dedicated = true; +- +- +- /* Pull of_node from the master device node */ +- mtd_node = mtd_get_of_node(master); +- if (!mtd_node) +- return 0; +- +- ofpart_node = of_get_child_by_name(mtd_node, "partitions"); +- if (!ofpart_node) { +- /* +- * We might get here even when ofpart isn't used at all (e.g., +- * when using another parser), so don't be louder than +- * KERN_DEBUG +- */ +- pr_debug("%s: 'partitions' subnode not found on %pOF. Trying to parse direct subnodes as partitions.\n", +- master->name, mtd_node); +- ofpart_node = mtd_node; +- dedicated = false; +- } else if (!of_device_is_compatible(ofpart_node, "fixed-partitions")) { +- /* The 'partitions' subnode might be used by another parser */ +- return 0; +- } +- +- /* First count the subnodes */ +- nr_parts = 0; +- for_each_child_of_node(ofpart_node, pp) { +- if (!dedicated && node_has_compatible(pp)) +- continue; +- +- nr_parts++; +- } +- +- if (nr_parts == 0) +- return 0; +- +- parts = kcalloc(nr_parts, sizeof(*parts), GFP_KERNEL); +- if (!parts) +- return -ENOMEM; +- +- i = 0; +- for_each_child_of_node(ofpart_node, pp) { +- const __be32 *reg; +- int len; +- int a_cells, s_cells; +- +- if (!dedicated && node_has_compatible(pp)) +- continue; +- +- reg = of_get_property(pp, "reg", &len); +- if (!reg) { +- if (dedicated) { +- pr_debug("%s: ofpart partition %pOF (%pOF) missing reg property.\n", +- master->name, pp, +- mtd_node); +- goto ofpart_fail; +- } else { +- nr_parts--; +- continue; +- } +- } +- +- a_cells = of_n_addr_cells(pp); +- s_cells = of_n_size_cells(pp); +- if (len / 4 != a_cells + s_cells) { +- pr_debug("%s: ofpart partition %pOF (%pOF) error parsing reg property.\n", +- master->name, pp, +- mtd_node); +- goto ofpart_fail; +- } +- +- parts[i].offset = of_read_number(reg, a_cells); +- parts[i].size = of_read_number(reg + a_cells, s_cells); +- parts[i].of_node = pp; +- +- partname = of_get_property(pp, "label", &len); +- if (!partname) +- partname = of_get_property(pp, "name", &len); +- parts[i].name = partname; +- +- if (of_get_property(pp, "read-only", &len)) +- parts[i].mask_flags |= MTD_WRITEABLE; +- +- if (of_get_property(pp, "lock", &len)) +- parts[i].mask_flags |= MTD_POWERUP_LOCK; +- +- if (of_property_read_bool(pp, "slc-mode")) +- parts[i].add_flags |= MTD_SLC_ON_MLC_EMULATION; +- +- i++; +- } +- +- if (!nr_parts) +- goto ofpart_none; +- +- *pparts = parts; +- return nr_parts; +- +-ofpart_fail: +- pr_err("%s: error parsing ofpart partition %pOF (%pOF)\n", +- master->name, pp, mtd_node); +- ret = -EINVAL; +-ofpart_none: +- of_node_put(pp); +- kfree(parts); +- return ret; +-} +- +-static const struct of_device_id parse_ofpart_match_table[] = { +- { .compatible = "fixed-partitions" }, +- {}, +-}; +-MODULE_DEVICE_TABLE(of, parse_ofpart_match_table); +- +-static struct mtd_part_parser ofpart_parser = { +- .parse_fn = parse_fixed_partitions, +- .name = "fixed-partitions", +- .of_match_table = parse_ofpart_match_table, +-}; +- +-static int parse_ofoldpart_partitions(struct mtd_info *master, +- const struct mtd_partition **pparts, +- struct mtd_part_parser_data *data) +-{ +- struct mtd_partition *parts; +- struct device_node *dp; +- int i, plen, nr_parts; +- const struct { +- __be32 offset, len; +- } *part; +- const char *names; +- +- /* Pull of_node from the master device node */ +- dp = mtd_get_of_node(master); +- if (!dp) +- return 0; +- +- part = of_get_property(dp, "partitions", &plen); +- if (!part) +- return 0; /* No partitions found */ +- +- pr_warn("Device tree uses obsolete partition map binding: %pOF\n", dp); +- +- nr_parts = plen / sizeof(part[0]); +- +- parts = kcalloc(nr_parts, sizeof(*parts), GFP_KERNEL); +- if (!parts) +- return -ENOMEM; +- +- names = of_get_property(dp, "partition-names", &plen); +- +- for (i = 0; i < nr_parts; i++) { +- parts[i].offset = be32_to_cpu(part->offset); +- parts[i].size = be32_to_cpu(part->len) & ~1; +- /* bit 0 set signifies read only partition */ +- if (be32_to_cpu(part->len) & 1) +- parts[i].mask_flags = MTD_WRITEABLE; +- +- if (names && (plen > 0)) { +- int len = strlen(names) + 1; +- +- parts[i].name = names; +- plen -= len; +- names += len; +- } else { +- parts[i].name = "unnamed"; +- } +- +- part++; +- } +- +- *pparts = parts; +- return nr_parts; +-} +- +-static struct mtd_part_parser ofoldpart_parser = { +- .parse_fn = parse_ofoldpart_partitions, +- .name = "ofoldpart", +-}; +- +-static int __init ofpart_parser_init(void) +-{ +- register_mtd_parser(&ofpart_parser); +- register_mtd_parser(&ofoldpart_parser); +- return 0; +-} +- +-static void __exit ofpart_parser_exit(void) +-{ +- deregister_mtd_parser(&ofpart_parser); +- deregister_mtd_parser(&ofoldpart_parser); +-} +- +-module_init(ofpart_parser_init); +-module_exit(ofpart_parser_exit); +- +-MODULE_LICENSE("GPL"); +-MODULE_DESCRIPTION("Parser for MTD partitioning information in device tree"); +-MODULE_AUTHOR("Vitaly Wool, David Gibson"); +-/* +- * When MTD core cannot find the requested parser, it tries to load the module +- * with the same name. Since we provide the ofoldpart parser, we should have +- * the corresponding alias. +- */ +-MODULE_ALIAS("fixed-partitions"); +-MODULE_ALIAS("ofoldpart"); +--- /dev/null ++++ b/drivers/mtd/parsers/ofpart_core.c +@@ -0,0 +1,263 @@ ++// SPDX-License-Identifier: GPL-2.0-or-later ++/* ++ * Flash partitions described by the OF (or flattened) device tree ++ * ++ * Copyright © 2006 MontaVista Software Inc. ++ * Author: Vitaly Wool <vwool@ru.mvista.com> ++ * ++ * Revised to handle newer style flash binding by: ++ * Copyright © 2007 David Gibson, IBM Corporation. ++ */ ++ ++#include <linux/module.h> ++#include <linux/init.h> ++#include <linux/of.h> ++#include <linux/mtd/mtd.h> ++#include <linux/slab.h> ++#include <linux/mtd/partitions.h> ++ ++#include "ofpart_bcm4908.h" ++ ++struct fixed_partitions_quirks { ++ int (*post_parse)(struct mtd_info *mtd, struct mtd_partition *parts, int nr_parts); ++}; ++ ++struct fixed_partitions_quirks bcm4908_partitions_quirks = { ++ .post_parse = bcm4908_partitions_post_parse, ++}; ++ ++static const struct of_device_id parse_ofpart_match_table[]; ++ ++static bool node_has_compatible(struct device_node *pp) ++{ ++ return of_get_property(pp, "compatible", NULL); ++} ++ ++static int parse_fixed_partitions(struct mtd_info *master, ++ const struct mtd_partition **pparts, ++ struct mtd_part_parser_data *data) ++{ ++ const struct fixed_partitions_quirks *quirks; ++ const struct of_device_id *of_id; ++ struct mtd_partition *parts; ++ struct device_node *mtd_node; ++ struct device_node *ofpart_node; ++ const char *partname; ++ struct device_node *pp; ++ int nr_parts, i, ret = 0; ++ bool dedicated = true; ++ ++ /* Pull of_node from the master device node */ ++ mtd_node = mtd_get_of_node(master); ++ if (!mtd_node) ++ return 0; ++ ++ ofpart_node = of_get_child_by_name(mtd_node, "partitions"); ++ if (!ofpart_node) { ++ /* ++ * We might get here even when ofpart isn't used at all (e.g., ++ * when using another parser), so don't be louder than ++ * KERN_DEBUG ++ */ ++ pr_debug("%s: 'partitions' subnode not found on %pOF. Trying to parse direct subnodes as partitions.\n", ++ master->name, mtd_node); ++ ofpart_node = mtd_node; ++ dedicated = false; ++ } ++ ++ of_id = of_match_node(parse_ofpart_match_table, ofpart_node); ++ if (dedicated && !of_id) { ++ /* The 'partitions' subnode might be used by another parser */ ++ return 0; ++ } ++ ++ quirks = of_id ? of_id->data : NULL; ++ ++ /* First count the subnodes */ ++ nr_parts = 0; ++ for_each_child_of_node(ofpart_node, pp) { ++ if (!dedicated && node_has_compatible(pp)) ++ continue; ++ ++ nr_parts++; ++ } ++ ++ if (nr_parts == 0) ++ return 0; ++ ++ parts = kcalloc(nr_parts, sizeof(*parts), GFP_KERNEL); ++ if (!parts) ++ return -ENOMEM; ++ ++ i = 0; ++ for_each_child_of_node(ofpart_node, pp) { ++ const __be32 *reg; ++ int len; ++ int a_cells, s_cells; ++ ++ if (!dedicated && node_has_compatible(pp)) ++ continue; ++ ++ reg = of_get_property(pp, "reg", &len); ++ if (!reg) { ++ if (dedicated) { ++ pr_debug("%s: ofpart partition %pOF (%pOF) missing reg property.\n", ++ master->name, pp, ++ mtd_node); ++ goto ofpart_fail; ++ } else { ++ nr_parts--; ++ continue; ++ } ++ } ++ ++ a_cells = of_n_addr_cells(pp); ++ s_cells = of_n_size_cells(pp); ++ if (len / 4 != a_cells + s_cells) { ++ pr_debug("%s: ofpart partition %pOF (%pOF) error parsing reg property.\n", ++ master->name, pp, ++ mtd_node); ++ goto ofpart_fail; ++ } ++ ++ parts[i].offset = of_read_number(reg, a_cells); ++ parts[i].size = of_read_number(reg + a_cells, s_cells); ++ parts[i].of_node = pp; ++ ++ partname = of_get_property(pp, "label", &len); ++ if (!partname) ++ partname = of_get_property(pp, "name", &len); ++ parts[i].name = partname; ++ ++ if (of_get_property(pp, "read-only", &len)) ++ parts[i].mask_flags |= MTD_WRITEABLE; ++ ++ if (of_get_property(pp, "lock", &len)) ++ parts[i].mask_flags |= MTD_POWERUP_LOCK; ++ ++ if (of_property_read_bool(pp, "slc-mode")) ++ parts[i].add_flags |= MTD_SLC_ON_MLC_EMULATION; ++ ++ i++; ++ } ++ ++ if (!nr_parts) ++ goto ofpart_none; ++ ++ if (quirks && quirks->post_parse) ++ quirks->post_parse(master, parts, nr_parts); ++ ++ *pparts = parts; ++ return nr_parts; ++ ++ofpart_fail: ++ pr_err("%s: error parsing ofpart partition %pOF (%pOF)\n", ++ master->name, pp, mtd_node); ++ ret = -EINVAL; ++ofpart_none: ++ of_node_put(pp); ++ kfree(parts); ++ return ret; ++} ++ ++static const struct of_device_id parse_ofpart_match_table[] = { ++ /* Generic */ ++ { .compatible = "fixed-partitions" }, ++ /* Customized */ ++ { .compatible = "brcm,bcm4908-partitions", .data = &bcm4908_partitions_quirks, }, ++ {}, ++}; ++MODULE_DEVICE_TABLE(of, parse_ofpart_match_table); ++ ++static struct mtd_part_parser ofpart_parser = { ++ .parse_fn = parse_fixed_partitions, ++ .name = "fixed-partitions", ++ .of_match_table = parse_ofpart_match_table, ++}; ++ ++static int parse_ofoldpart_partitions(struct mtd_info *master, ++ const struct mtd_partition **pparts, ++ struct mtd_part_parser_data *data) ++{ ++ struct mtd_partition *parts; ++ struct device_node *dp; ++ int i, plen, nr_parts; ++ const struct { ++ __be32 offset, len; ++ } *part; ++ const char *names; ++ ++ /* Pull of_node from the master device node */ ++ dp = mtd_get_of_node(master); ++ if (!dp) ++ return 0; ++ ++ part = of_get_property(dp, "partitions", &plen); ++ if (!part) ++ return 0; /* No partitions found */ ++ ++ pr_warn("Device tree uses obsolete partition map binding: %pOF\n", dp); ++ ++ nr_parts = plen / sizeof(part[0]); ++ ++ parts = kcalloc(nr_parts, sizeof(*parts), GFP_KERNEL); ++ if (!parts) ++ return -ENOMEM; ++ ++ names = of_get_property(dp, "partition-names", &plen); ++ ++ for (i = 0; i < nr_parts; i++) { ++ parts[i].offset = be32_to_cpu(part->offset); ++ parts[i].size = be32_to_cpu(part->len) & ~1; ++ /* bit 0 set signifies read only partition */ ++ if (be32_to_cpu(part->len) & 1) ++ parts[i].mask_flags = MTD_WRITEABLE; ++ ++ if (names && (plen > 0)) { ++ int len = strlen(names) + 1; ++ ++ parts[i].name = names; ++ plen -= len; ++ names += len; ++ } else { ++ parts[i].name = "unnamed"; ++ } ++ ++ part++; ++ } ++ ++ *pparts = parts; ++ return nr_parts; ++} ++ ++static struct mtd_part_parser ofoldpart_parser = { ++ .parse_fn = parse_ofoldpart_partitions, ++ .name = "ofoldpart", ++}; ++ ++static int __init ofpart_parser_init(void) ++{ ++ register_mtd_parser(&ofpart_parser); ++ register_mtd_parser(&ofoldpart_parser); ++ return 0; ++} ++ ++static void __exit ofpart_parser_exit(void) ++{ ++ deregister_mtd_parser(&ofpart_parser); ++ deregister_mtd_parser(&ofoldpart_parser); ++} ++ ++module_init(ofpart_parser_init); ++module_exit(ofpart_parser_exit); ++ ++MODULE_LICENSE("GPL"); ++MODULE_DESCRIPTION("Parser for MTD partitioning information in device tree"); ++MODULE_AUTHOR("Vitaly Wool, David Gibson"); ++/* ++ * When MTD core cannot find the requested parser, it tries to load the module ++ * with the same name. Since we provide the ofoldpart parser, we should have ++ * the corresponding alias. ++ */ ++MODULE_ALIAS("fixed-partitions"); ++MODULE_ALIAS("ofoldpart"); diff --git a/target/linux/generic/backport-5.15/404-v5.13-mtd-parsers-ofpart-limit-parsing-of-deprecated-DT-sy.patch b/target/linux/generic/backport-5.15/404-v5.13-mtd-parsers-ofpart-limit-parsing-of-deprecated-DT-sy.patch new file mode 100644 index 0000000000..55a91d7680 --- /dev/null +++ b/target/linux/generic/backport-5.15/404-v5.13-mtd-parsers-ofpart-limit-parsing-of-deprecated-DT-sy.patch @@ -0,0 +1,69 @@ +From 2d751203aacf86a1b301a188d8551c7da91043ab Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?Rafa=C5=82=20Mi=C5=82ecki?= <rafal@milecki.pl> +Date: Tue, 2 Mar 2021 20:00:12 +0100 +Subject: [PATCH] mtd: parsers: ofpart: limit parsing of deprecated DT syntax +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +For backward compatibility ofpart still supports the old syntax like: +spi-flash@0 { + compatible = "jedec,spi-nor"; + reg = <0x0>; + + partition@0 { + label = "bootloader"; + reg = <0x0 0x100000>; + }; +}; +(without "partitions" subnode). + +There is no reason however to support nested partitions without a clear +"compatible" string like: +partitions { + compatible = "fixed-partitions"; + #address-cells = <1>; + #size-cells = <1>; + + partition@0 { + label = "bootloader"; + reg = <0x0 0x100000>; + + partition@0 { + label = "config"; + reg = <0x80000 0x80000>; + }; + }; +}; +(we never officially supported or documented that). + +Make sure ofpart doesn't attempt to parse above. + +Cc: Ansuel Smith <ansuelsmth@gmail.com> +Signed-off-by: Rafał Miłecki <rafal@milecki.pl> +Signed-off-by: Miquel Raynal <miquel.raynal@bootlin.com> +Link: https://lore.kernel.org/linux-mtd/20210302190012.1255-1-zajec5@gmail.com +--- + drivers/mtd/parsers/ofpart_core.c | 4 +++- + 1 file changed, 3 insertions(+), 1 deletion(-) + +--- a/drivers/mtd/parsers/ofpart_core.c ++++ b/drivers/mtd/parsers/ofpart_core.c +@@ -53,7 +53,7 @@ static int parse_fixed_partitions(struct + return 0; + + ofpart_node = of_get_child_by_name(mtd_node, "partitions"); +- if (!ofpart_node) { ++ if (!ofpart_node && !master->parent) { + /* + * We might get here even when ofpart isn't used at all (e.g., + * when using another parser), so don't be louder than +@@ -64,6 +64,8 @@ static int parse_fixed_partitions(struct + ofpart_node = mtd_node; + dedicated = false; + } ++ if (!ofpart_node) ++ return 0; + + of_id = of_match_node(parse_ofpart_match_table, ofpart_node); + if (dedicated && !of_id) { diff --git a/target/linux/generic/backport-5.15/405-v5.13-mtd-parsers-ofpart-make-symbol-bcm4908_partitions_qu.patch b/target/linux/generic/backport-5.15/405-v5.13-mtd-parsers-ofpart-make-symbol-bcm4908_partitions_qu.patch new file mode 100644 index 0000000000..f1b778a6e1 --- /dev/null +++ b/target/linux/generic/backport-5.15/405-v5.13-mtd-parsers-ofpart-make-symbol-bcm4908_partitions_qu.patch @@ -0,0 +1,34 @@ +From b87b6d2d6f540e29c3f98e1572d64e560d73d6c1 Mon Sep 17 00:00:00 2001 +From: Wei Yongjun <weiyongjun1@huawei.com> +Date: Thu, 4 Mar 2021 06:46:00 +0000 +Subject: [PATCH] mtd: parsers: ofpart: make symbol 'bcm4908_partitions_quirks' + static + +The sparse tool complains as follows: + +drivers/mtd/parsers/ofpart_core.c:25:32: warning: + symbol 'bcm4908_partitions_quirks' was not declared. Should it be static? + +This symbol is not used outside of ofpart_core.c, so this +commit marks it static. + +Fixes: 457da931b608 ("mtd: parsers: ofpart: support BCM4908 fixed partitions") +Reported-by: Hulk Robot <hulkci@huawei.com> +Signed-off-by: Wei Yongjun <weiyongjun1@huawei.com> +Signed-off-by: Miquel Raynal <miquel.raynal@bootlin.com> +Link: https://lore.kernel.org/linux-mtd/20210304064600.3279138-1-weiyongjun1@huawei.com +--- + drivers/mtd/parsers/ofpart_core.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +--- a/drivers/mtd/parsers/ofpart_core.c ++++ b/drivers/mtd/parsers/ofpart_core.c +@@ -22,7 +22,7 @@ struct fixed_partitions_quirks { + int (*post_parse)(struct mtd_info *mtd, struct mtd_partition *parts, int nr_parts); + }; + +-struct fixed_partitions_quirks bcm4908_partitions_quirks = { ++static struct fixed_partitions_quirks bcm4908_partitions_quirks = { + .post_parse = bcm4908_partitions_post_parse, + }; + diff --git a/target/linux/generic/backport-5.15/406-v5.13-0001-mtd-core-add-nvmem-cells-compatible-to-parse-mtd-as-.patch b/target/linux/generic/backport-5.15/406-v5.13-0001-mtd-core-add-nvmem-cells-compatible-to-parse-mtd-as-.patch new file mode 100644 index 0000000000..28335cb71f --- /dev/null +++ b/target/linux/generic/backport-5.15/406-v5.13-0001-mtd-core-add-nvmem-cells-compatible-to-parse-mtd-as-.patch @@ -0,0 +1,38 @@ +From a5d83d6e2bc747b13f347962d4b335d70b23559b Mon Sep 17 00:00:00 2001 +From: Ansuel Smith <ansuelsmth@gmail.com> +Date: Fri, 12 Mar 2021 07:28:19 +0100 +Subject: [PATCH] mtd: core: add nvmem-cells compatible to parse mtd as nvmem + cells +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +Partitions that contains the nvmem-cells compatible will register +their direct subonodes as nvmem cells and the node will be treated as a +nvmem provider. + +Signed-off-by: Ansuel Smith <ansuelsmth@gmail.com> +Tested-by: Rafał Miłecki <rafal@milecki.pl> +--- + drivers/mtd/mtdcore.c | 3 ++- + 1 file changed, 2 insertions(+), 1 deletion(-) + +--- a/drivers/mtd/mtdcore.c ++++ b/drivers/mtd/mtdcore.c +@@ -531,6 +531,7 @@ static int mtd_nvmem_reg_read(void *priv + + static int mtd_nvmem_add(struct mtd_info *mtd) + { ++ struct device_node *node = mtd_get_of_node(mtd); + struct nvmem_config config = {}; + + config.id = -1; +@@ -543,7 +544,7 @@ static int mtd_nvmem_add(struct mtd_info + config.stride = 1; + config.read_only = true; + config.root_only = true; +- config.no_of_node = true; ++ config.no_of_node = !of_device_is_compatible(node, "nvmem-cells"); + config.priv = mtd; + + mtd->nvmem = nvmem_register(&config); diff --git a/target/linux/generic/backport-5.15/406-v5.13-0002-dt-bindings-nvmem-drop-nodename-restriction.patch b/target/linux/generic/backport-5.15/406-v5.13-0002-dt-bindings-nvmem-drop-nodename-restriction.patch new file mode 100644 index 0000000000..14ea3f6b8c --- /dev/null +++ b/target/linux/generic/backport-5.15/406-v5.13-0002-dt-bindings-nvmem-drop-nodename-restriction.patch @@ -0,0 +1,25 @@ +From 42645976c3289b03a12f1bd2bc131fd98fc27170 Mon Sep 17 00:00:00 2001 +From: Ansuel Smith <ansuelsmth@gmail.com> +Date: Fri, 12 Mar 2021 07:28:20 +0100 +Subject: [PATCH] devicetree: nvmem: nvmem: drop $nodename restriction + +Drop $nodename restriction as now mtd partition can also be used as +nvmem provider. + +Signed-off-by: Ansuel Smith <ansuelsmth@gmail.com> +--- + Documentation/devicetree/bindings/nvmem/nvmem.yaml | 3 --- + 1 file changed, 3 deletions(-) + +--- a/Documentation/devicetree/bindings/nvmem/nvmem.yaml ++++ b/Documentation/devicetree/bindings/nvmem/nvmem.yaml +@@ -20,9 +20,6 @@ description: | + storage device. + + properties: +- $nodename: +- pattern: "^(eeprom|efuse|nvram)(@.*|-[0-9a-f])*$" +- + "#address-cells": + const: 1 + diff --git a/target/linux/generic/backport-5.15/406-v5.13-0003-dt-bindings-mtd-Document-use-of-nvmem-cells-compatib.patch b/target/linux/generic/backport-5.15/406-v5.13-0003-dt-bindings-mtd-Document-use-of-nvmem-cells-compatib.patch new file mode 100644 index 0000000000..0eb4c637cf --- /dev/null +++ b/target/linux/generic/backport-5.15/406-v5.13-0003-dt-bindings-mtd-Document-use-of-nvmem-cells-compatib.patch @@ -0,0 +1,117 @@ +From 377aa0135dc8489312edd3184d143ce3a89ff7ee Mon Sep 17 00:00:00 2001 +From: Ansuel Smith <ansuelsmth@gmail.com> +Date: Fri, 12 Mar 2021 07:28:21 +0100 +Subject: [PATCH] dt-bindings: mtd: Document use of nvmem-cells compatible + +Document nvmem-cells compatible used to treat mtd partitions as a +nvmem provider. + +Signed-off-by: Ansuel Smith <ansuelsmth@gmail.com> +Reviewed-by: Rob Herring <robh@kernel.org> +--- + .../bindings/mtd/partitions/nvmem-cells.yaml | 99 +++++++++++++++++++ + 1 file changed, 99 insertions(+) + create mode 100644 Documentation/devicetree/bindings/mtd/partitions/nvmem-cells.yaml + +--- /dev/null ++++ b/Documentation/devicetree/bindings/mtd/partitions/nvmem-cells.yaml +@@ -0,0 +1,99 @@ ++# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause ++%YAML 1.2 ++--- ++$id: http://devicetree.org/schemas/mtd/partitions/nvmem-cells.yaml# ++$schema: http://devicetree.org/meta-schemas/core.yaml# ++ ++title: Nvmem cells ++ ++description: | ++ Any partition containing the compatible "nvmem-cells" will register as a ++ nvmem provider. ++ Each direct subnodes represents a nvmem cell following the nvmem binding. ++ Nvmem binding to declare nvmem-cells can be found in: ++ Documentation/devicetree/bindings/nvmem/nvmem.yaml ++ ++maintainers: ++ - Ansuel Smith <ansuelsmth@gmail.com> ++ ++allOf: ++ - $ref: /schemas/nvmem/nvmem.yaml# ++ ++properties: ++ compatible: ++ const: nvmem-cells ++ ++required: ++ - compatible ++ ++additionalProperties: true ++ ++examples: ++ - | ++ partitions { ++ compatible = "fixed-partitions"; ++ #address-cells = <1>; ++ #size-cells = <1>; ++ ++ /* ... */ ++ ++ }; ++ art: art@1200000 { ++ compatible = "nvmem-cells"; ++ reg = <0x1200000 0x0140000>; ++ label = "art"; ++ read-only; ++ #address-cells = <1>; ++ #size-cells = <1>; ++ ++ macaddr_gmac1: macaddr_gmac1@0 { ++ reg = <0x0 0x6>; ++ }; ++ ++ macaddr_gmac2: macaddr_gmac2@6 { ++ reg = <0x6 0x6>; ++ }; ++ ++ pre_cal_24g: pre_cal_24g@1000 { ++ reg = <0x1000 0x2f20>; ++ }; ++ ++ pre_cal_5g: pre_cal_5g@5000{ ++ reg = <0x5000 0x2f20>; ++ }; ++ }; ++ - | ++ partitions { ++ compatible = "fixed-partitions"; ++ #address-cells = <1>; ++ #size-cells = <1>; ++ ++ partition@0 { ++ label = "bootloader"; ++ reg = <0x000000 0x100000>; ++ read-only; ++ }; ++ ++ firmware@100000 { ++ compatible = "brcm,trx"; ++ label = "firmware"; ++ reg = <0x100000 0xe00000>; ++ }; ++ ++ calibration@f00000 { ++ compatible = "nvmem-cells"; ++ label = "calibration"; ++ reg = <0xf00000 0x100000>; ++ ranges = <0 0xf00000 0x100000>; ++ #address-cells = <1>; ++ #size-cells = <1>; ++ ++ wifi0@0 { ++ reg = <0x000000 0x080000>; ++ }; ++ ++ wifi1@80000 { ++ reg = <0x080000 0x080000>; ++ }; ++ }; ++ }; diff --git a/target/linux/generic/backport-5.15/407-v5.13-0001-dt-bindings-mtd-add-binding-for-Linksys-Northstar-pa.patch b/target/linux/generic/backport-5.15/407-v5.13-0001-dt-bindings-mtd-add-binding-for-Linksys-Northstar-pa.patch new file mode 100644 index 0000000000..35a4afd67b --- /dev/null +++ b/target/linux/generic/backport-5.15/407-v5.13-0001-dt-bindings-mtd-add-binding-for-Linksys-Northstar-pa.patch @@ -0,0 +1,98 @@ +From 2fa7294175c76e1ec568aa75c1891fd908728c8d Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?Rafa=C5=82=20Mi=C5=82ecki?= <rafal@milecki.pl> +Date: Fri, 12 Mar 2021 14:49:18 +0100 +Subject: [PATCH] dt-bindings: mtd: add binding for Linksys Northstar + partitions +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +Linksys on Broadcom Northstar devices uses fixed flash layout with +multiple firmware partitions. + +Signed-off-by: Rafał Miłecki <rafal@milecki.pl> +Reviewed-by: Rob Herring <robh@kernel.org> +Signed-off-by: Miquel Raynal <miquel.raynal@bootlin.com> +Link: https://lore.kernel.org/linux-mtd/20210312134919.7767-1-zajec5@gmail.com +--- + .../mtd/partitions/linksys,ns-partitions.yaml | 74 +++++++++++++++++++ + 1 file changed, 74 insertions(+) + create mode 100644 Documentation/devicetree/bindings/mtd/partitions/linksys,ns-partitions.yaml + +--- /dev/null ++++ b/Documentation/devicetree/bindings/mtd/partitions/linksys,ns-partitions.yaml +@@ -0,0 +1,74 @@ ++# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause ++%YAML 1.2 ++--- ++$id: http://devicetree.org/schemas/mtd/partitions/linksys,ns-partitions.yaml# ++$schema: http://devicetree.org/meta-schemas/core.yaml# ++ ++title: Linksys Northstar partitioning ++ ++description: | ++ Linksys devices based on Broadcom Northstar architecture often use two ++ firmware partitions. One is used for regular booting, the other is treated as ++ fallback. ++ ++ This binding allows defining all fixed partitions and marking those containing ++ firmware. System can use that information e.g. for booting or flashing ++ purposes. ++ ++maintainers: ++ - Rafał Miłecki <rafal@milecki.pl> ++ ++properties: ++ compatible: ++ const: linksys,ns-partitions ++ ++ "#address-cells": ++ enum: [ 1, 2 ] ++ ++ "#size-cells": ++ enum: [ 1, 2 ] ++ ++patternProperties: ++ "^partition@[0-9a-f]+$": ++ $ref: "partition.yaml#" ++ properties: ++ compatible: ++ items: ++ - const: linksys,ns-firmware ++ - const: brcm,trx ++ unevaluatedProperties: false ++ ++required: ++ - "#address-cells" ++ - "#size-cells" ++ ++additionalProperties: false ++ ++examples: ++ - | ++ partitions { ++ compatible = "linksys,ns-partitions"; ++ #address-cells = <1>; ++ #size-cells = <1>; ++ ++ partition@0 { ++ label = "boot"; ++ reg = <0x0 0x100000>; ++ read-only; ++ }; ++ ++ partition@100000 { ++ label = "nvram"; ++ reg = <0x100000 0x100000>; ++ }; ++ ++ partition@200000 { ++ compatible = "linksys,ns-firmware", "brcm,trx"; ++ reg = <0x200000 0xf00000>; ++ }; ++ ++ partition@1100000 { ++ compatible = "linksys,ns-firmware", "brcm,trx"; ++ reg = <0x1100000 0xf00000>; ++ }; ++ }; diff --git a/target/linux/generic/backport-5.15/407-v5.13-0002-mtd-parsers-ofpart-support-Linksys-Northstar-partiti.patch b/target/linux/generic/backport-5.15/407-v5.13-0002-mtd-parsers-ofpart-support-Linksys-Northstar-partiti.patch new file mode 100644 index 0000000000..f317889785 --- /dev/null +++ b/target/linux/generic/backport-5.15/407-v5.13-0002-mtd-parsers-ofpart-support-Linksys-Northstar-partiti.patch @@ -0,0 +1,156 @@ +From 7134a2d026d942210b4d26d6059c9d979ca7866e Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?Rafa=C5=82=20Mi=C5=82ecki?= <rafal@milecki.pl> +Date: Fri, 12 Mar 2021 14:49:19 +0100 +Subject: [PATCH] mtd: parsers: ofpart: support Linksys Northstar partitions +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +This allows extending ofpart parser with support for Linksys Northstar +devices. That support uses recently added quirks mechanism. + +Signed-off-by: Rafał Miłecki <rafal@milecki.pl> +Signed-off-by: Miquel Raynal <miquel.raynal@bootlin.com> +Link: https://lore.kernel.org/linux-mtd/20210312134919.7767-2-zajec5@gmail.com +--- + drivers/mtd/parsers/Kconfig | 10 +++++ + drivers/mtd/parsers/Makefile | 1 + + drivers/mtd/parsers/ofpart_core.c | 6 +++ + drivers/mtd/parsers/ofpart_linksys_ns.c | 50 +++++++++++++++++++++++++ + drivers/mtd/parsers/ofpart_linksys_ns.h | 18 +++++++++ + 5 files changed, 85 insertions(+) + create mode 100644 drivers/mtd/parsers/ofpart_linksys_ns.c + create mode 100644 drivers/mtd/parsers/ofpart_linksys_ns.h + +--- a/drivers/mtd/parsers/Kconfig ++++ b/drivers/mtd/parsers/Kconfig +@@ -76,6 +76,16 @@ config MTD_OF_PARTS_BCM4908 + that can have multiple "firmware" partitions. It takes care of + finding currently used one and backup ones. + ++config MTD_OF_PARTS_LINKSYS_NS ++ bool "Linksys Northstar partitioning support" ++ depends on MTD_OF_PARTS && (ARCH_BCM_5301X || ARCH_BCM4908 || COMPILE_TEST) ++ default ARCH_BCM_5301X ++ help ++ This provides partitions parser for Linksys devices based on Broadcom ++ Northstar architecture. Linksys commonly uses fixed flash layout with ++ two "firmware" partitions. Currently used firmware has to be detected ++ using CFE environment variable. ++ + config MTD_PARSER_IMAGETAG + tristate "Parser for BCM963XX Image Tag format partitions" + depends on BCM63XX || BMIPS_GENERIC || COMPILE_TEST +--- a/drivers/mtd/parsers/Makefile ++++ b/drivers/mtd/parsers/Makefile +@@ -6,6 +6,7 @@ obj-$(CONFIG_MTD_CMDLINE_PARTS) += cmdl + obj-$(CONFIG_MTD_OF_PARTS) += ofpart.o + ofpart-y += ofpart_core.o + ofpart-$(CONFIG_MTD_OF_PARTS_BCM4908) += ofpart_bcm4908.o ++ofpart-$(CONFIG_MTD_OF_PARTS_LINKSYS_NS)+= ofpart_linksys_ns.o + obj-$(CONFIG_MTD_PARSER_IMAGETAG) += parser_imagetag.o + obj-$(CONFIG_MTD_AFS_PARTS) += afs.o + obj-$(CONFIG_MTD_PARSER_TRX) += parser_trx.o +--- a/drivers/mtd/parsers/ofpart_core.c ++++ b/drivers/mtd/parsers/ofpart_core.c +@@ -17,6 +17,7 @@ + #include <linux/mtd/partitions.h> + + #include "ofpart_bcm4908.h" ++#include "ofpart_linksys_ns.h" + + struct fixed_partitions_quirks { + int (*post_parse)(struct mtd_info *mtd, struct mtd_partition *parts, int nr_parts); +@@ -26,6 +27,10 @@ static struct fixed_partitions_quirks bc + .post_parse = bcm4908_partitions_post_parse, + }; + ++static struct fixed_partitions_quirks linksys_ns_partitions_quirks = { ++ .post_parse = linksys_ns_partitions_post_parse, ++}; ++ + static const struct of_device_id parse_ofpart_match_table[]; + + static bool node_has_compatible(struct device_node *pp) +@@ -167,6 +172,7 @@ static const struct of_device_id parse_o + { .compatible = "fixed-partitions" }, + /* Customized */ + { .compatible = "brcm,bcm4908-partitions", .data = &bcm4908_partitions_quirks, }, ++ { .compatible = "linksys,ns-partitions", .data = &linksys_ns_partitions_quirks, }, + {}, + }; + MODULE_DEVICE_TABLE(of, parse_ofpart_match_table); +--- /dev/null ++++ b/drivers/mtd/parsers/ofpart_linksys_ns.c +@@ -0,0 +1,50 @@ ++// SPDX-License-Identifier: GPL-2.0 ++/* ++ * Copyright (C) 2021 Rafał Miłecki <rafal@milecki.pl> ++ */ ++ ++#include <linux/bcm47xx_nvram.h> ++#include <linux/mtd/mtd.h> ++#include <linux/mtd/partitions.h> ++ ++#include "ofpart_linksys_ns.h" ++ ++#define NVRAM_BOOT_PART "bootpartition" ++ ++static int ofpart_linksys_ns_bootpartition(void) ++{ ++ char buf[4]; ++ int bootpartition; ++ ++ /* Check CFE environment variable */ ++ if (bcm47xx_nvram_getenv(NVRAM_BOOT_PART, buf, sizeof(buf)) > 0) { ++ if (!kstrtoint(buf, 0, &bootpartition)) ++ return bootpartition; ++ pr_warn("Failed to parse %s value \"%s\"\n", NVRAM_BOOT_PART, ++ buf); ++ } else { ++ pr_warn("Failed to get NVRAM \"%s\"\n", NVRAM_BOOT_PART); ++ } ++ ++ return 0; ++} ++ ++int linksys_ns_partitions_post_parse(struct mtd_info *mtd, ++ struct mtd_partition *parts, ++ int nr_parts) ++{ ++ int bootpartition = ofpart_linksys_ns_bootpartition(); ++ int trx_idx = 0; ++ int i; ++ ++ for (i = 0; i < nr_parts; i++) { ++ if (of_device_is_compatible(parts[i].of_node, "linksys,ns-firmware")) { ++ if (trx_idx++ == bootpartition) ++ parts[i].name = "firmware"; ++ else ++ parts[i].name = "backup"; ++ } ++ } ++ ++ return 0; ++} +--- /dev/null ++++ b/drivers/mtd/parsers/ofpart_linksys_ns.h +@@ -0,0 +1,18 @@ ++/* SPDX-License-Identifier: GPL-2.0 */ ++#ifndef __OFPART_LINKSYS_NS_H ++#define __OFPART_LINKSYS_NS_H ++ ++#ifdef CONFIG_MTD_OF_PARTS_LINKSYS_NS ++int linksys_ns_partitions_post_parse(struct mtd_info *mtd, ++ struct mtd_partition *parts, ++ int nr_parts); ++#else ++static inline int linksys_ns_partitions_post_parse(struct mtd_info *mtd, ++ struct mtd_partition *parts, ++ int nr_parts) ++{ ++ return -EOPNOTSUPP; ++} ++#endif ++ ++#endif diff --git a/target/linux/generic/backport-5.15/408-v5.13-mtd-cfi_cmdset_0002-Disable-buffered-writes-for-AMD.patch b/target/linux/generic/backport-5.15/408-v5.13-mtd-cfi_cmdset_0002-Disable-buffered-writes-for-AMD.patch new file mode 100644 index 0000000000..3af641e62e --- /dev/null +++ b/target/linux/generic/backport-5.15/408-v5.13-mtd-cfi_cmdset_0002-Disable-buffered-writes-for-AMD.patch @@ -0,0 +1,54 @@ +From 7e4404113686868858a34210c28ae122e967aa64 Mon Sep 17 00:00:00 2001 +From: Mauri Sandberg <sandberg@mailfence.com> +Date: Tue, 9 Mar 2021 19:48:59 +0200 +Subject: [PATCH] mtd: cfi_cmdset_0002: Disable buffered writes for AMD chip + 0x2201 + +Buffer writes do not work with AMD chip 0x2201. The chip in question +is a AMD/Spansion/Cypress Semiconductor S29GL256N and datasheet [1] +talks about writing buffers being possible. While waiting for a neater +solution resort to writing word-sized chunks only. + +Without the patch kernel logs will be flooded with entries like below: + +jffs2_scan_eraseblock(): End of filesystem marker found at 0x0 +jffs2_build_filesystem(): unlocking the mtd device... +done. +jffs2_build_filesystem(): erasing all blocks after the end marker... +MTD do_write_buffer_wait(): software timeout, address:0x01ec000a. +jffs2: Write clean marker to block at 0x01920000 failed: -5 +MTD do_write_buffer_wait(): software timeout, address:0x01e2000a. +jffs2: Write clean marker to block at 0x01880000 failed: -5 +MTD do_write_buffer_wait(): software timeout, address:0x01e0000a. +jffs2: Write clean marker to block at 0x01860000 failed: -5 +MTD do_write_buffer_wait(): software timeout, address:0x01dc000a. +jffs2: Write clean marker to block at 0x01820000 failed: -5 +MTD do_write_buffer_wait(): software timeout, address:0x01da000a. +jffs2: Write clean marker to block at 0x01800000 failed: -5 +... + +Tested on a Buffalo wzr-hp-g300nh running kernel 5.10.16. + +[1] https://www.cypress.com/file/219941/download +or https://datasheetspdf.com/pdf-file/565708/SPANSION/S29GL256N/1 + +Signed-off-by: Mauri Sandberg <sandberg@mailfence.com> +Signed-off-by: Vignesh Raghavendra <vigneshr@ti.com> +Link: https://lore.kernel.org/r/20210309174859.362060-1-sandberg@mailfence.com +--- + drivers/mtd/chips/cfi_cmdset_0002.c | 4 ++++ + 1 file changed, 4 insertions(+) + +--- a/drivers/mtd/chips/cfi_cmdset_0002.c ++++ b/drivers/mtd/chips/cfi_cmdset_0002.c +@@ -272,6 +272,10 @@ static void fixup_use_write_buffers(stru + { + struct map_info *map = mtd->priv; + struct cfi_private *cfi = map->fldrv_priv; ++ ++ if (cfi->mfr == CFI_MFR_AMD && cfi->id == 0x2201) ++ return; ++ + if (cfi->cfiq->BufWriteTimeoutTyp) { + pr_debug("Using buffer write method\n"); + mtd->_write = cfi_amdstd_write_buffers; diff --git a/target/linux/generic/backport-5.15/409-v5.14-0001-dt-bindings-mtd-brcm-trx-Add-brcm-trx-magic.patch b/target/linux/generic/backport-5.15/409-v5.14-0001-dt-bindings-mtd-brcm-trx-Add-brcm-trx-magic.patch new file mode 100644 index 0000000000..1f34652141 --- /dev/null +++ b/target/linux/generic/backport-5.15/409-v5.14-0001-dt-bindings-mtd-brcm-trx-Add-brcm-trx-magic.patch @@ -0,0 +1,32 @@ +From a4d82940ff85a7e307953dfa715f65d5ab487e10 Mon Sep 17 00:00:00 2001 +From: Hauke Mehrtens <hauke@hauke-m.de> +Date: Sun, 18 Apr 2021 23:46:14 +0200 +Subject: dt-bindings: mtd: brcm,trx: Add brcm,trx-magic + +This adds the description of an additional property which allows to +specify a custom partition parser magic to detect a trx partition. +Buffalo has multiple device which are using the trx format, but with +different magic values. + +Signed-off-by: Hauke Mehrtens <hauke@hauke-m.de> +Acked-by: Rob Herring <robh@kernel.org> +Signed-off-by: Miquel Raynal <miquel.raynal@bootlin.com> +Link: https://lore.kernel.org/linux-mtd/20210418214616.239574-2-hauke@hauke-m.de +--- + .../devicetree/bindings/mtd/partitions/brcm,trx.txt | 5 +++++ + 1 file changed, 5 insertions(+) + +--- a/Documentation/devicetree/bindings/mtd/partitions/brcm,trx.txt ++++ b/Documentation/devicetree/bindings/mtd/partitions/brcm,trx.txt +@@ -28,6 +28,11 @@ detected by a software parsing TRX heade + Required properties: + - compatible : (required) must be "brcm,trx" + ++Optional properties: ++ ++- brcm,trx-magic: TRX magic, if it is different from the default magic ++ 0x30524448 as a u32. ++ + Example: + + flash@0 { diff --git a/target/linux/generic/backport-5.15/409-v5.14-0002-mtd-parsers-trx-Allow-to-specify-brcm-trx-magic-in-D.patch b/target/linux/generic/backport-5.15/409-v5.14-0002-mtd-parsers-trx-Allow-to-specify-brcm-trx-magic-in-D.patch new file mode 100644 index 0000000000..de2d914852 --- /dev/null +++ b/target/linux/generic/backport-5.15/409-v5.14-0002-mtd-parsers-trx-Allow-to-specify-brcm-trx-magic-in-D.patch @@ -0,0 +1,50 @@ +From d7f7e04f8b67571a4bf5a0dcd4f9da4214f5262c Mon Sep 17 00:00:00 2001 +From: Hauke Mehrtens <hauke@hauke-m.de> +Date: Sun, 18 Apr 2021 23:46:15 +0200 +Subject: mtd: parsers: trx: Allow to specify brcm, trx-magic in DT + +Buffalo uses a different TRX magic for every device, to be able to use +this trx parser, make it possible to specify the TRX magic in device +tree. If no TRX magic is specified in device tree, the standard value +will be used. This value should only be specified if a vendor chooses to +use a non standard TRX magic. + +Signed-off-by: Hauke Mehrtens <hauke@hauke-m.de> +Signed-off-by: Miquel Raynal <miquel.raynal@bootlin.com> +Link: https://lore.kernel.org/linux-mtd/20210418214616.239574-3-hauke@hauke-m.de +--- + drivers/mtd/parsers/parser_trx.c | 9 ++++++++- + 1 file changed, 8 insertions(+), 1 deletion(-) + +--- a/drivers/mtd/parsers/parser_trx.c ++++ b/drivers/mtd/parsers/parser_trx.c +@@ -51,13 +51,20 @@ static int parser_trx_parse(struct mtd_i + const struct mtd_partition **pparts, + struct mtd_part_parser_data *data) + { ++ struct device_node *np = mtd_get_of_node(mtd); + struct mtd_partition *parts; + struct mtd_partition *part; + struct trx_header trx; + size_t bytes_read; + uint8_t curr_part = 0, i = 0; ++ uint32_t trx_magic = TRX_MAGIC; + int err; + ++ /* Get different magic from device tree if specified */ ++ err = of_property_read_u32(np, "brcm,trx-magic", &trx_magic); ++ if (err != 0 && err != -EINVAL) ++ pr_err("failed to parse \"brcm,trx-magic\" DT attribute, using default: %d\n", err); ++ + parts = kcalloc(TRX_PARSER_MAX_PARTS, sizeof(struct mtd_partition), + GFP_KERNEL); + if (!parts) +@@ -70,7 +77,7 @@ static int parser_trx_parse(struct mtd_i + return err; + } + +- if (trx.magic != TRX_MAGIC) { ++ if (trx.magic != trx_magic) { + kfree(parts); + return -ENOENT; + } diff --git a/target/linux/generic/backport-5.15/409-v5.14-0003-mtd-parsers-trx-Allow-to-use-TRX-parser-on-Mediatek-.patch b/target/linux/generic/backport-5.15/409-v5.14-0003-mtd-parsers-trx-Allow-to-use-TRX-parser-on-Mediatek-.patch new file mode 100644 index 0000000000..faac535270 --- /dev/null +++ b/target/linux/generic/backport-5.15/409-v5.14-0003-mtd-parsers-trx-Allow-to-use-TRX-parser-on-Mediatek-.patch @@ -0,0 +1,25 @@ +From 81bb218c829246962a6327c64eec18ddcc049936 Mon Sep 17 00:00:00 2001 +From: Hauke Mehrtens <hauke@hauke-m.de> +Date: Sun, 18 Apr 2021 23:46:16 +0200 +Subject: mtd: parsers: trx: Allow to use TRX parser on Mediatek SoCs + +Buffalo uses the TRX partition format also on Mediatek MT7622 SoCs. + +Signed-off-by: Hauke Mehrtens <hauke@hauke-m.de> +Signed-off-by: Miquel Raynal <miquel.raynal@bootlin.com> +Link: https://lore.kernel.org/linux-mtd/20210418214616.239574-4-hauke@hauke-m.de +--- + drivers/mtd/parsers/Kconfig | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +--- a/drivers/mtd/parsers/Kconfig ++++ b/drivers/mtd/parsers/Kconfig +@@ -115,7 +115,7 @@ config MTD_AFS_PARTS + + config MTD_PARSER_TRX + tristate "Parser for TRX format partitions" +- depends on MTD && (BCM47XX || ARCH_BCM_5301X || COMPILE_TEST) ++ depends on MTD && (BCM47XX || ARCH_BCM_5301X || ARCH_MEDIATEK || COMPILE_TEST) + help + TRX is a firmware format used by Broadcom on their devices. It + may contain up to 3/4 partitions (depending on the version). diff --git a/target/linux/generic/backport-5.15/410-mtd-next-mtd-parsers-trx-allow-to-use-on-MediaTek-MIPS-SoCs.patch b/target/linux/generic/backport-5.15/410-mtd-next-mtd-parsers-trx-allow-to-use-on-MediaTek-MIPS-SoCs.patch new file mode 100644 index 0000000000..5c49841760 --- /dev/null +++ b/target/linux/generic/backport-5.15/410-mtd-next-mtd-parsers-trx-allow-to-use-on-MediaTek-MIPS-SoCs.patch @@ -0,0 +1,33 @@ +From 2365f91c861cbfeef7141c69842848c7b2d3c2db Mon Sep 17 00:00:00 2001 +From: INAGAKI Hiroshi <musashino.open@gmail.com> +Date: Sun, 13 Feb 2022 15:40:44 +0900 +Subject: [PATCH] mtd: parsers: trx: allow to use on MediaTek MIPS SoCs + +Buffalo sells some router devices which have trx-formatted firmware, +based on MediaTek MIPS SoCs. To use parser_trx on those devices, add +"RALINK" to dependency and allow to compile for MediaTek MIPS SoCs. + +examples: + +- WCR-1166DS (MT7628) +- WSR-1166DHP (MT7621) +- WSR-2533DHP (MT7621) + +Signed-off-by: INAGAKI Hiroshi <musashino.open@gmail.com> +Signed-off-by: Miquel Raynal <miquel.raynal@bootlin.com> +Link: https://lore.kernel.org/linux-mtd/20220213064045.1781-1-musashino.open@gmail.com +--- + drivers/mtd/parsers/Kconfig | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +--- a/drivers/mtd/parsers/Kconfig ++++ b/drivers/mtd/parsers/Kconfig +@@ -115,7 +115,7 @@ config MTD_AFS_PARTS + + config MTD_PARSER_TRX + tristate "Parser for TRX format partitions" +- depends on MTD && (BCM47XX || ARCH_BCM_5301X || ARCH_MEDIATEK || COMPILE_TEST) ++ depends on MTD && (BCM47XX || ARCH_BCM_5301X || ARCH_MEDIATEK || RALINK || COMPILE_TEST) + help + TRX is a firmware format used by Broadcom on their devices. It + may contain up to 3/4 partitions (depending on the version). diff --git a/target/linux/generic/backport-5.15/500-v5.13-ubifs-default-to-zstd-compression.patch b/target/linux/generic/backport-5.15/500-v5.13-ubifs-default-to-zstd-compression.patch new file mode 100644 index 0000000000..dd50c19c27 --- /dev/null +++ b/target/linux/generic/backport-5.15/500-v5.13-ubifs-default-to-zstd-compression.patch @@ -0,0 +1,25 @@ +From dcdf415b740923530dc71d89fecc8361078473f5 Mon Sep 17 00:00:00 2001 +From: Rui Salvaterra <rsalvaterra@gmail.com> +Date: Mon, 5 Apr 2021 16:11:55 +0100 +Subject: [PATCH] ubifs: default to zstd compression + +Compared to lzo and zlib, zstd is the best all-around performer, both in terms +of speed and compression ratio. Set it as the default, if available. + +Signed-off-by: Rui Salvaterra <rsalvaterra@gmail.com> +--- + fs/ubifs/sb.c | 3 +++ + 1 file changed, 3 insertions(+) + +--- a/fs/ubifs/sb.c ++++ b/fs/ubifs/sb.c +@@ -53,6 +53,9 @@ + + static int get_default_compressor(struct ubifs_info *c) + { ++ if (ubifs_compr_present(c, UBIFS_COMPR_ZSTD)) ++ return UBIFS_COMPR_ZSTD; ++ + if (ubifs_compr_present(c, UBIFS_COMPR_LZO)) + return UBIFS_COMPR_LZO; + diff --git a/target/linux/generic/backport-5.15/600-v5.12-net-extract-napi-poll-functionality-to-__napi_poll.patch b/target/linux/generic/backport-5.15/600-v5.12-net-extract-napi-poll-functionality-to-__napi_poll.patch new file mode 100644 index 0000000000..1fca4a5e74 --- /dev/null +++ b/target/linux/generic/backport-5.15/600-v5.12-net-extract-napi-poll-functionality-to-__napi_poll.patch @@ -0,0 +1,88 @@ +From: Felix Fietkau <nbd@nbd.name> +Date: Mon, 8 Feb 2021 11:34:08 -0800 +Subject: [PATCH] net: extract napi poll functionality to __napi_poll() + +This commit introduces a new function __napi_poll() which does the main +logic of the existing napi_poll() function, and will be called by other +functions in later commits. +This idea and implementation is done by Felix Fietkau <nbd@nbd.name> and +is proposed as part of the patch to move napi work to work_queue +context. +This commit by itself is a code restructure. + +Signed-off-by: Felix Fietkau <nbd@nbd.name> +Signed-off-by: Wei Wang <weiwan@google.com> +Reviewed-by: Alexander Duyck <alexanderduyck@fb.com> +Signed-off-by: David S. Miller <davem@davemloft.net> +--- + +--- a/net/core/dev.c ++++ b/net/core/dev.c +@@ -6805,15 +6805,10 @@ void __netif_napi_del(struct napi_struct + } + EXPORT_SYMBOL(__netif_napi_del); + +-static int napi_poll(struct napi_struct *n, struct list_head *repoll) ++static int __napi_poll(struct napi_struct *n, bool *repoll) + { +- void *have; + int work, weight; + +- list_del_init(&n->poll_list); +- +- have = netpoll_poll_lock(n); +- + weight = n->weight; + + /* This NAPI_STATE_SCHED test is for avoiding a race +@@ -6833,7 +6828,7 @@ static int napi_poll(struct napi_struct + n->poll, work, weight); + + if (likely(work < weight)) +- goto out_unlock; ++ return work; + + /* Drivers must not modify the NAPI state if they + * consume the entire weight. In such cases this code +@@ -6842,7 +6837,7 @@ static int napi_poll(struct napi_struct + */ + if (unlikely(napi_disable_pending(n))) { + napi_complete(n); +- goto out_unlock; ++ return work; + } + + if (n->gro_bitmask) { +@@ -6860,12 +6855,29 @@ static int napi_poll(struct napi_struct + if (unlikely(!list_empty(&n->poll_list))) { + pr_warn_once("%s: Budget exhausted after napi rescheduled\n", + n->dev ? n->dev->name : "backlog"); +- goto out_unlock; ++ return work; + } + +- list_add_tail(&n->poll_list, repoll); ++ *repoll = true; ++ ++ return work; ++} ++ ++static int napi_poll(struct napi_struct *n, struct list_head *repoll) ++{ ++ bool do_repoll = false; ++ void *have; ++ int work; ++ ++ list_del_init(&n->poll_list); ++ ++ have = netpoll_poll_lock(n); ++ ++ work = __napi_poll(n, &do_repoll); ++ ++ if (do_repoll) ++ list_add_tail(&n->poll_list, repoll); + +-out_unlock: + netpoll_poll_unlock(have); + + return work; diff --git a/target/linux/generic/backport-5.15/601-v5.12-net-implement-threaded-able-napi-poll-loop-support.patch b/target/linux/generic/backport-5.15/601-v5.12-net-implement-threaded-able-napi-poll-loop-support.patch new file mode 100644 index 0000000000..38e276ef72 --- /dev/null +++ b/target/linux/generic/backport-5.15/601-v5.12-net-implement-threaded-able-napi-poll-loop-support.patch @@ -0,0 +1,261 @@ +From: Wei Wang <weiwan@google.com> +Date: Mon, 8 Feb 2021 11:34:09 -0800 +Subject: [PATCH] net: implement threaded-able napi poll loop support + +This patch allows running each napi poll loop inside its own +kernel thread. +The kthread is created during netif_napi_add() if dev->threaded +is set. And threaded mode is enabled in napi_enable(). We will +provide a way to set dev->threaded and enable threaded mode +without a device up/down in the following patch. + +Once that threaded mode is enabled and the kthread is +started, napi_schedule() will wake-up such thread instead +of scheduling the softirq. + +The threaded poll loop behaves quite likely the net_rx_action, +but it does not have to manipulate local irqs and uses +an explicit scheduling point based on netdev_budget. + +Co-developed-by: Paolo Abeni <pabeni@redhat.com> +Signed-off-by: Paolo Abeni <pabeni@redhat.com> +Co-developed-by: Hannes Frederic Sowa <hannes@stressinduktion.org> +Signed-off-by: Hannes Frederic Sowa <hannes@stressinduktion.org> +Co-developed-by: Jakub Kicinski <kuba@kernel.org> +Signed-off-by: Jakub Kicinski <kuba@kernel.org> +Signed-off-by: Wei Wang <weiwan@google.com> +Reviewed-by: Alexander Duyck <alexanderduyck@fb.com> +Signed-off-by: David S. Miller <davem@davemloft.net> +--- + +--- a/include/linux/netdevice.h ++++ b/include/linux/netdevice.h +@@ -347,6 +347,7 @@ struct napi_struct { + struct list_head dev_list; + struct hlist_node napi_hash_node; + unsigned int napi_id; ++ struct task_struct *thread; + }; + + enum { +@@ -357,6 +358,7 @@ enum { + NAPI_STATE_LISTED, /* NAPI added to system lists */ + NAPI_STATE_NO_BUSY_POLL,/* Do not add in napi_hash, no busy polling */ + NAPI_STATE_IN_BUSY_POLL,/* sk_busy_loop() owns this NAPI */ ++ NAPI_STATE_THREADED, /* The poll is performed inside its own thread*/ + }; + + enum { +@@ -367,6 +369,7 @@ enum { + NAPIF_STATE_LISTED = BIT(NAPI_STATE_LISTED), + NAPIF_STATE_NO_BUSY_POLL = BIT(NAPI_STATE_NO_BUSY_POLL), + NAPIF_STATE_IN_BUSY_POLL = BIT(NAPI_STATE_IN_BUSY_POLL), ++ NAPIF_STATE_THREADED = BIT(NAPI_STATE_THREADED), + }; + + enum gro_result { +@@ -497,20 +500,7 @@ static inline bool napi_complete(struct + */ + void napi_disable(struct napi_struct *n); + +-/** +- * napi_enable - enable NAPI scheduling +- * @n: NAPI context +- * +- * Resume NAPI from being scheduled on this context. +- * Must be paired with napi_disable. +- */ +-static inline void napi_enable(struct napi_struct *n) +-{ +- BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state)); +- smp_mb__before_atomic(); +- clear_bit(NAPI_STATE_SCHED, &n->state); +- clear_bit(NAPI_STATE_NPSVC, &n->state); +-} ++void napi_enable(struct napi_struct *n); + + /** + * napi_synchronize - wait until NAPI is not running +@@ -1842,6 +1832,8 @@ enum netdev_ml_priv_type { + * + * @wol_enabled: Wake-on-LAN is enabled + * ++ * @threaded: napi threaded mode is enabled ++ * + * @net_notifier_list: List of per-net netdev notifier block + * that follow this device when it is moved + * to another network namespace. +@@ -2161,6 +2153,7 @@ struct net_device { + struct lock_class_key *qdisc_running_key; + bool proto_down; + unsigned wol_enabled:1; ++ unsigned threaded:1; + + struct list_head net_notifier_list; + +--- a/net/core/dev.c ++++ b/net/core/dev.c +@@ -91,6 +91,7 @@ + #include <linux/etherdevice.h> + #include <linux/ethtool.h> + #include <linux/skbuff.h> ++#include <linux/kthread.h> + #include <linux/bpf.h> + #include <linux/bpf_trace.h> + #include <net/net_namespace.h> +@@ -1500,6 +1501,27 @@ void netdev_notify_peers(struct net_devi + } + EXPORT_SYMBOL(netdev_notify_peers); + ++static int napi_threaded_poll(void *data); ++ ++static int napi_kthread_create(struct napi_struct *n) ++{ ++ int err = 0; ++ ++ /* Create and wake up the kthread once to put it in ++ * TASK_INTERRUPTIBLE mode to avoid the blocked task ++ * warning and work with loadavg. ++ */ ++ n->thread = kthread_run(napi_threaded_poll, n, "napi/%s-%d", ++ n->dev->name, n->napi_id); ++ if (IS_ERR(n->thread)) { ++ err = PTR_ERR(n->thread); ++ pr_err("kthread_run failed with err %d\n", err); ++ n->thread = NULL; ++ } ++ ++ return err; ++} ++ + static int __dev_open(struct net_device *dev, struct netlink_ext_ack *extack) + { + const struct net_device_ops *ops = dev->netdev_ops; +@@ -4267,6 +4289,21 @@ int gro_normal_batch __read_mostly = 8; + static inline void ____napi_schedule(struct softnet_data *sd, + struct napi_struct *napi) + { ++ struct task_struct *thread; ++ ++ if (test_bit(NAPI_STATE_THREADED, &napi->state)) { ++ /* Paired with smp_mb__before_atomic() in ++ * napi_enable(). Use READ_ONCE() to guarantee ++ * a complete read on napi->thread. Only call ++ * wake_up_process() when it's not NULL. ++ */ ++ thread = READ_ONCE(napi->thread); ++ if (thread) { ++ wake_up_process(thread); ++ return; ++ } ++ } ++ + list_add_tail(&napi->poll_list, &sd->poll_list); + __raise_softirq_irqoff(NET_RX_SOFTIRQ); + } +@@ -6758,6 +6795,12 @@ void netif_napi_add(struct net_device *d + set_bit(NAPI_STATE_NPSVC, &napi->state); + list_add_rcu(&napi->dev_list, &dev->napi_list); + napi_hash_add(napi); ++ /* Create kthread for this napi if dev->threaded is set. ++ * Clear dev->threaded if kthread creation failed so that ++ * threaded mode will not be enabled in napi_enable(). ++ */ ++ if (dev->threaded && napi_kthread_create(napi)) ++ dev->threaded = 0; + } + EXPORT_SYMBOL(netif_napi_add); + +@@ -6774,9 +6817,28 @@ void napi_disable(struct napi_struct *n) + hrtimer_cancel(&n->timer); + + clear_bit(NAPI_STATE_DISABLE, &n->state); ++ clear_bit(NAPI_STATE_THREADED, &n->state); + } + EXPORT_SYMBOL(napi_disable); + ++/** ++ * napi_enable - enable NAPI scheduling ++ * @n: NAPI context ++ * ++ * Resume NAPI from being scheduled on this context. ++ * Must be paired with napi_disable. ++ */ ++void napi_enable(struct napi_struct *n) ++{ ++ BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state)); ++ smp_mb__before_atomic(); ++ clear_bit(NAPI_STATE_SCHED, &n->state); ++ clear_bit(NAPI_STATE_NPSVC, &n->state); ++ if (n->dev->threaded && n->thread) ++ set_bit(NAPI_STATE_THREADED, &n->state); ++} ++EXPORT_SYMBOL(napi_enable); ++ + static void flush_gro_hash(struct napi_struct *napi) + { + int i; +@@ -6802,6 +6864,11 @@ void __netif_napi_del(struct napi_struct + + flush_gro_hash(napi); + napi->gro_bitmask = 0; ++ ++ if (napi->thread) { ++ kthread_stop(napi->thread); ++ napi->thread = NULL; ++ } + } + EXPORT_SYMBOL(__netif_napi_del); + +@@ -6883,6 +6950,51 @@ static int napi_poll(struct napi_struct + return work; + } + ++static int napi_thread_wait(struct napi_struct *napi) ++{ ++ set_current_state(TASK_INTERRUPTIBLE); ++ ++ while (!kthread_should_stop() && !napi_disable_pending(napi)) { ++ if (test_bit(NAPI_STATE_SCHED, &napi->state)) { ++ WARN_ON(!list_empty(&napi->poll_list)); ++ __set_current_state(TASK_RUNNING); ++ return 0; ++ } ++ ++ schedule(); ++ set_current_state(TASK_INTERRUPTIBLE); ++ } ++ __set_current_state(TASK_RUNNING); ++ return -1; ++} ++ ++static int napi_threaded_poll(void *data) ++{ ++ struct napi_struct *napi = data; ++ void *have; ++ ++ while (!napi_thread_wait(napi)) { ++ for (;;) { ++ bool repoll = false; ++ ++ local_bh_disable(); ++ ++ have = netpoll_poll_lock(napi); ++ __napi_poll(napi, &repoll); ++ netpoll_poll_unlock(have); ++ ++ __kfree_skb_flush(); ++ local_bh_enable(); ++ ++ if (!repoll) ++ break; ++ ++ cond_resched(); ++ } ++ } ++ return 0; ++} ++ + static __latent_entropy void net_rx_action(struct softirq_action *h) + { + struct softnet_data *sd = this_cpu_ptr(&softnet_data); diff --git a/target/linux/generic/backport-5.15/602-v5.12-net-add-sysfs-attribute-to-control-napi-threaded-mod.patch b/target/linux/generic/backport-5.15/602-v5.12-net-add-sysfs-attribute-to-control-napi-threaded-mod.patch new file mode 100644 index 0000000000..c3119a6e9b --- /dev/null +++ b/target/linux/generic/backport-5.15/602-v5.12-net-add-sysfs-attribute-to-control-napi-threaded-mod.patch @@ -0,0 +1,177 @@ +From: Wei Wang <weiwan@google.com> +Date: Mon, 8 Feb 2021 11:34:10 -0800 +Subject: [PATCH] net: add sysfs attribute to control napi threaded mode + +This patch adds a new sysfs attribute to the network device class. +Said attribute provides a per-device control to enable/disable the +threaded mode for all the napi instances of the given network device, +without the need for a device up/down. +User sets it to 1 or 0 to enable or disable threaded mode. +Note: when switching between threaded and the current softirq based mode +for a napi instance, it will not immediately take effect if the napi is +currently being polled. The mode switch will happen for the next time +napi_schedule() is called. + +Co-developed-by: Paolo Abeni <pabeni@redhat.com> +Signed-off-by: Paolo Abeni <pabeni@redhat.com> +Co-developed-by: Hannes Frederic Sowa <hannes@stressinduktion.org> +Signed-off-by: Hannes Frederic Sowa <hannes@stressinduktion.org> +Co-developed-by: Felix Fietkau <nbd@nbd.name> +Signed-off-by: Felix Fietkau <nbd@nbd.name> +Signed-off-by: Wei Wang <weiwan@google.com> +Reviewed-by: Alexander Duyck <alexanderduyck@fb.com> +Signed-off-by: David S. Miller <davem@davemloft.net> +--- + +--- a/Documentation/ABI/testing/sysfs-class-net ++++ b/Documentation/ABI/testing/sysfs-class-net +@@ -337,3 +337,18 @@ Contact: netdev@vger.kernel.org + Description: + 32-bit unsigned integer counting the number of times the link has + been down ++ ++What: /sys/class/net/<iface>/threaded ++Date: Jan 2021 ++KernelVersion: 5.12 ++Contact: netdev@vger.kernel.org ++Description: ++ Boolean value to control the threaded mode per device. User could ++ set this value to enable/disable threaded mode for all napi ++ belonging to this device, without the need to do device up/down. ++ ++ Possible values: ++ == ================================== ++ 0 threaded mode disabled for this dev ++ 1 threaded mode enabled for this dev ++ == ================================== +--- a/include/linux/netdevice.h ++++ b/include/linux/netdevice.h +@@ -491,6 +491,8 @@ static inline bool napi_complete(struct + return napi_complete_done(n, 0); + } + ++int dev_set_threaded(struct net_device *dev, bool threaded); ++ + /** + * napi_disable - prevent NAPI from scheduling + * @n: NAPI context +--- a/net/core/dev.c ++++ b/net/core/dev.c +@@ -4293,8 +4293,9 @@ static inline void ____napi_schedule(str + + if (test_bit(NAPI_STATE_THREADED, &napi->state)) { + /* Paired with smp_mb__before_atomic() in +- * napi_enable(). Use READ_ONCE() to guarantee +- * a complete read on napi->thread. Only call ++ * napi_enable()/dev_set_threaded(). ++ * Use READ_ONCE() to guarantee a complete ++ * read on napi->thread. Only call + * wake_up_process() when it's not NULL. + */ + thread = READ_ONCE(napi->thread); +@@ -6768,6 +6769,49 @@ static void init_gro_hash(struct napi_st + napi->gro_bitmask = 0; + } + ++int dev_set_threaded(struct net_device *dev, bool threaded) ++{ ++ struct napi_struct *napi; ++ int err = 0; ++ ++ if (dev->threaded == threaded) ++ return 0; ++ ++ if (threaded) { ++ list_for_each_entry(napi, &dev->napi_list, dev_list) { ++ if (!napi->thread) { ++ err = napi_kthread_create(napi); ++ if (err) { ++ threaded = false; ++ break; ++ } ++ } ++ } ++ } ++ ++ dev->threaded = threaded; ++ ++ /* Make sure kthread is created before THREADED bit ++ * is set. ++ */ ++ smp_mb__before_atomic(); ++ ++ /* Setting/unsetting threaded mode on a napi might not immediately ++ * take effect, if the current napi instance is actively being ++ * polled. In this case, the switch between threaded mode and ++ * softirq mode will happen in the next round of napi_schedule(). ++ * This should not cause hiccups/stalls to the live traffic. ++ */ ++ list_for_each_entry(napi, &dev->napi_list, dev_list) { ++ if (threaded) ++ set_bit(NAPI_STATE_THREADED, &napi->state); ++ else ++ clear_bit(NAPI_STATE_THREADED, &napi->state); ++ } ++ ++ return err; ++} ++ + void netif_napi_add(struct net_device *dev, struct napi_struct *napi, + int (*poll)(struct napi_struct *, int), int weight) + { +--- a/net/core/net-sysfs.c ++++ b/net/core/net-sysfs.c +@@ -587,6 +587,45 @@ static ssize_t phys_switch_id_show(struc + } + static DEVICE_ATTR_RO(phys_switch_id); + ++static ssize_t threaded_show(struct device *dev, ++ struct device_attribute *attr, char *buf) ++{ ++ struct net_device *netdev = to_net_dev(dev); ++ ssize_t ret = -EINVAL; ++ ++ if (!rtnl_trylock()) ++ return restart_syscall(); ++ ++ if (dev_isalive(netdev)) ++ ret = sprintf(buf, fmt_dec, netdev->threaded); ++ ++ rtnl_unlock(); ++ return ret; ++} ++ ++static int modify_napi_threaded(struct net_device *dev, unsigned long val) ++{ ++ int ret; ++ ++ if (list_empty(&dev->napi_list)) ++ return -EOPNOTSUPP; ++ ++ if (val != 0 && val != 1) ++ return -EOPNOTSUPP; ++ ++ ret = dev_set_threaded(dev, val); ++ ++ return ret; ++} ++ ++static ssize_t threaded_store(struct device *dev, ++ struct device_attribute *attr, ++ const char *buf, size_t len) ++{ ++ return netdev_store(dev, attr, buf, len, modify_napi_threaded); ++} ++static DEVICE_ATTR_RW(threaded); ++ + static struct attribute *net_class_attrs[] __ro_after_init = { + &dev_attr_netdev_group.attr, + &dev_attr_type.attr, +@@ -619,6 +658,7 @@ static struct attribute *net_class_attrs + &dev_attr_proto_down.attr, + &dev_attr_carrier_up_count.attr, + &dev_attr_carrier_down_count.attr, ++ &dev_attr_threaded.attr, + NULL, + }; + ATTRIBUTE_GROUPS(net_class); diff --git a/target/linux/generic/backport-5.15/603-v5.12-net-fix-race-between-napi-kthread-mode-and-busy-poll.patch b/target/linux/generic/backport-5.15/603-v5.12-net-fix-race-between-napi-kthread-mode-and-busy-poll.patch new file mode 100644 index 0000000000..febef3e28a --- /dev/null +++ b/target/linux/generic/backport-5.15/603-v5.12-net-fix-race-between-napi-kthread-mode-and-busy-poll.patch @@ -0,0 +1,93 @@ +From: Wei Wang <weiwan@google.com> +Date: Mon, 1 Mar 2021 17:21:13 -0800 +Subject: [PATCH] net: fix race between napi kthread mode and busy poll + +Currently, napi_thread_wait() checks for NAPI_STATE_SCHED bit to +determine if the kthread owns this napi and could call napi->poll() on +it. However, if socket busy poll is enabled, it is possible that the +busy poll thread grabs this SCHED bit (after the previous napi->poll() +invokes napi_complete_done() and clears SCHED bit) and tries to poll +on the same napi. napi_disable() could grab the SCHED bit as well. +This patch tries to fix this race by adding a new bit +NAPI_STATE_SCHED_THREADED in napi->state. This bit gets set in +____napi_schedule() if the threaded mode is enabled, and gets cleared +in napi_complete_done(), and we only poll the napi in kthread if this +bit is set. This helps distinguish the ownership of the napi between +kthread and other scenarios and fixes the race issue. + +Fixes: 29863d41bb6e ("net: implement threaded-able napi poll loop support") +Reported-by: Martin Zaharinov <micron10@gmail.com> +Suggested-by: Jakub Kicinski <kuba@kernel.org> +Signed-off-by: Wei Wang <weiwan@google.com> +Cc: Alexander Duyck <alexanderduyck@fb.com> +Cc: Eric Dumazet <edumazet@google.com> +Cc: Paolo Abeni <pabeni@redhat.com> +Cc: Hannes Frederic Sowa <hannes@stressinduktion.org> +--- + +--- a/include/linux/netdevice.h ++++ b/include/linux/netdevice.h +@@ -359,6 +359,7 @@ enum { + NAPI_STATE_NO_BUSY_POLL,/* Do not add in napi_hash, no busy polling */ + NAPI_STATE_IN_BUSY_POLL,/* sk_busy_loop() owns this NAPI */ + NAPI_STATE_THREADED, /* The poll is performed inside its own thread*/ ++ NAPI_STATE_SCHED_THREADED, /* Napi is currently scheduled in threaded mode */ + }; + + enum { +@@ -370,6 +371,7 @@ enum { + NAPIF_STATE_NO_BUSY_POLL = BIT(NAPI_STATE_NO_BUSY_POLL), + NAPIF_STATE_IN_BUSY_POLL = BIT(NAPI_STATE_IN_BUSY_POLL), + NAPIF_STATE_THREADED = BIT(NAPI_STATE_THREADED), ++ NAPIF_STATE_SCHED_THREADED = BIT(NAPI_STATE_SCHED_THREADED), + }; + + enum gro_result { +--- a/net/core/dev.c ++++ b/net/core/dev.c +@@ -4300,6 +4300,8 @@ static inline void ____napi_schedule(str + */ + thread = READ_ONCE(napi->thread); + if (thread) { ++ if (thread->state != TASK_INTERRUPTIBLE) ++ set_bit(NAPI_STATE_SCHED_THREADED, &napi->state); + wake_up_process(thread); + return; + } +@@ -6560,7 +6562,8 @@ bool napi_complete_done(struct napi_stru + + WARN_ON_ONCE(!(val & NAPIF_STATE_SCHED)); + +- new = val & ~(NAPIF_STATE_MISSED | NAPIF_STATE_SCHED); ++ new = val & ~(NAPIF_STATE_MISSED | NAPIF_STATE_SCHED | ++ NAPIF_STATE_SCHED_THREADED); + + /* If STATE_MISSED was set, leave STATE_SCHED set, + * because we will call napi->poll() one more time. +@@ -6996,16 +6999,25 @@ static int napi_poll(struct napi_struct + + static int napi_thread_wait(struct napi_struct *napi) + { ++ bool woken = false; ++ + set_current_state(TASK_INTERRUPTIBLE); + + while (!kthread_should_stop() && !napi_disable_pending(napi)) { +- if (test_bit(NAPI_STATE_SCHED, &napi->state)) { ++ /* Testing SCHED_THREADED bit here to make sure the current ++ * kthread owns this napi and could poll on this napi. ++ * Testing SCHED bit is not enough because SCHED bit might be ++ * set by some other busy poll thread or by napi_disable(). ++ */ ++ if (test_bit(NAPI_STATE_SCHED_THREADED, &napi->state) || woken) { + WARN_ON(!list_empty(&napi->poll_list)); + __set_current_state(TASK_RUNNING); + return 0; + } + + schedule(); ++ /* woken being true indicates this thread owns this napi. */ ++ woken = true; + set_current_state(TASK_INTERRUPTIBLE); + } + __set_current_state(TASK_RUNNING); diff --git a/target/linux/generic/backport-5.15/604-v5.12-net-fix-hangup-on-napi_disable-for-threaded-napi.patch b/target/linux/generic/backport-5.15/604-v5.12-net-fix-hangup-on-napi_disable-for-threaded-napi.patch new file mode 100644 index 0000000000..1223b15c33 --- /dev/null +++ b/target/linux/generic/backport-5.15/604-v5.12-net-fix-hangup-on-napi_disable-for-threaded-napi.patch @@ -0,0 +1,53 @@ +From: Paolo Abeni <pabeni@redhat.com> +Date: Fri, 9 Apr 2021 17:24:17 +0200 +Subject: [PATCH] net: fix hangup on napi_disable for threaded napi + +napi_disable() is subject to an hangup, when the threaded +mode is enabled and the napi is under heavy traffic. + +If the relevant napi has been scheduled and the napi_disable() +kicks in before the next napi_threaded_wait() completes - so +that the latter quits due to the napi_disable_pending() condition, +the existing code leaves the NAPI_STATE_SCHED bit set and the +napi_disable() loop waiting for such bit will hang. + +This patch addresses the issue by dropping the NAPI_STATE_DISABLE +bit test in napi_thread_wait(). The later napi_threaded_poll() +iteration will take care of clearing the NAPI_STATE_SCHED. + +This also addresses a related problem reported by Jakub: +before this patch a napi_disable()/napi_enable() pair killed +the napi thread, effectively disabling the threaded mode. +On the patched kernel napi_disable() simply stops scheduling +the relevant thread. + +v1 -> v2: + - let the main napi_thread_poll() loop clear the SCHED bit + +Reported-by: Jakub Kicinski <kuba@kernel.org> +Fixes: 29863d41bb6e ("net: implement threaded-able napi poll loop support") +Signed-off-by: Paolo Abeni <pabeni@redhat.com> +Reviewed-by: Eric Dumazet <edumazet@google.com> +Link: https://lore.kernel.org/r/883923fa22745a9589e8610962b7dc59df09fb1f.1617981844.git.pabeni@redhat.com +Signed-off-by: Jakub Kicinski <kuba@kernel.org> +--- + +--- a/net/core/dev.c ++++ b/net/core/dev.c +@@ -7003,7 +7003,7 @@ static int napi_thread_wait(struct napi_ + + set_current_state(TASK_INTERRUPTIBLE); + +- while (!kthread_should_stop() && !napi_disable_pending(napi)) { ++ while (!kthread_should_stop()) { + /* Testing SCHED_THREADED bit here to make sure the current + * kthread owns this napi and could poll on this napi. + * Testing SCHED bit is not enough because SCHED bit might be +@@ -7021,6 +7021,7 @@ static int napi_thread_wait(struct napi_ + set_current_state(TASK_INTERRUPTIBLE); + } + __set_current_state(TASK_RUNNING); ++ + return -1; + } + diff --git a/target/linux/generic/backport-5.15/610-v5.13-00-netfilter-flowtable-add-hash-offset-field-to-tuple.patch b/target/linux/generic/backport-5.15/610-v5.13-00-netfilter-flowtable-add-hash-offset-field-to-tuple.patch new file mode 100644 index 0000000000..c881ccfcb0 --- /dev/null +++ b/target/linux/generic/backport-5.15/610-v5.13-00-netfilter-flowtable-add-hash-offset-field-to-tuple.patch @@ -0,0 +1,52 @@ +From: Pablo Neira Ayuso <pablo@netfilter.org> +Date: Fri, 20 Nov 2020 13:49:13 +0100 +Subject: [PATCH] netfilter: flowtable: add hash offset field to tuple + +Add a placeholder field to calculate hash tuple offset. Similar to +2c407aca6497 ("netfilter: conntrack: avoid gcc-10 zero-length-bounds +warning"). + +Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org> +--- + +--- a/include/net/netfilter/nf_flow_table.h ++++ b/include/net/netfilter/nf_flow_table.h +@@ -107,6 +107,10 @@ struct flow_offload_tuple { + + u8 l3proto; + u8 l4proto; ++ ++ /* All members above are keys for lookups, see flow_offload_hash(). */ ++ struct { } __hash; ++ + u8 dir; + + u16 mtu; +--- a/net/netfilter/nf_flow_table_core.c ++++ b/net/netfilter/nf_flow_table_core.c +@@ -191,14 +191,14 @@ static u32 flow_offload_hash(const void + { + const struct flow_offload_tuple *tuple = data; + +- return jhash(tuple, offsetof(struct flow_offload_tuple, dir), seed); ++ return jhash(tuple, offsetof(struct flow_offload_tuple, __hash), seed); + } + + static u32 flow_offload_hash_obj(const void *data, u32 len, u32 seed) + { + const struct flow_offload_tuple_rhash *tuplehash = data; + +- return jhash(&tuplehash->tuple, offsetof(struct flow_offload_tuple, dir), seed); ++ return jhash(&tuplehash->tuple, offsetof(struct flow_offload_tuple, __hash), seed); + } + + static int flow_offload_hash_cmp(struct rhashtable_compare_arg *arg, +@@ -207,7 +207,7 @@ static int flow_offload_hash_cmp(struct + const struct flow_offload_tuple *tuple = arg->key; + const struct flow_offload_tuple_rhash *x = ptr; + +- if (memcmp(&x->tuple, tuple, offsetof(struct flow_offload_tuple, dir))) ++ if (memcmp(&x->tuple, tuple, offsetof(struct flow_offload_tuple, __hash))) + return 1; + + return 0; diff --git a/target/linux/generic/backport-5.15/610-v5.13-01-netfilter-flowtable-separate-replace-destroy-and-sta.patch b/target/linux/generic/backport-5.15/610-v5.13-01-netfilter-flowtable-separate-replace-destroy-and-sta.patch new file mode 100644 index 0000000000..d98b4bfb7f --- /dev/null +++ b/target/linux/generic/backport-5.15/610-v5.13-01-netfilter-flowtable-separate-replace-destroy-and-sta.patch @@ -0,0 +1,98 @@ +From: Oz Shlomo <ozsh@nvidia.com> +Date: Tue, 23 Mar 2021 00:56:19 +0100 +Subject: [PATCH] netfilter: flowtable: separate replace, destroy and + stats to different workqueues + +Currently the flow table offload replace, destroy and stats work items are +executed on a single workqueue. As such, DESTROY and STATS commands may +be backloged after a burst of REPLACE work items. This scenario can bloat +up memory and may cause active connections to age. + +Instatiate add, del and stats workqueues to avoid backlogs of non-dependent +actions. Provide sysfs control over the workqueue attributes, allowing +userspace applications to control the workqueue cpumask. + +Signed-off-by: Oz Shlomo <ozsh@nvidia.com> +Reviewed-by: Paul Blakey <paulb@nvidia.com> +Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org> +--- + +--- a/net/netfilter/nf_flow_table_offload.c ++++ b/net/netfilter/nf_flow_table_offload.c +@@ -13,7 +13,9 @@ + #include <net/netfilter/nf_conntrack_core.h> + #include <net/netfilter/nf_conntrack_tuple.h> + +-static struct workqueue_struct *nf_flow_offload_wq; ++static struct workqueue_struct *nf_flow_offload_add_wq; ++static struct workqueue_struct *nf_flow_offload_del_wq; ++static struct workqueue_struct *nf_flow_offload_stats_wq; + + struct flow_offload_work { + struct list_head list; +@@ -827,7 +829,12 @@ static void flow_offload_work_handler(st + + static void flow_offload_queue_work(struct flow_offload_work *offload) + { +- queue_work(nf_flow_offload_wq, &offload->work); ++ if (offload->cmd == FLOW_CLS_REPLACE) ++ queue_work(nf_flow_offload_add_wq, &offload->work); ++ else if (offload->cmd == FLOW_CLS_DESTROY) ++ queue_work(nf_flow_offload_del_wq, &offload->work); ++ else ++ queue_work(nf_flow_offload_stats_wq, &offload->work); + } + + static struct flow_offload_work * +@@ -899,8 +906,11 @@ void nf_flow_offload_stats(struct nf_flo + + void nf_flow_table_offload_flush(struct nf_flowtable *flowtable) + { +- if (nf_flowtable_hw_offload(flowtable)) +- flush_workqueue(nf_flow_offload_wq); ++ if (nf_flowtable_hw_offload(flowtable)) { ++ flush_workqueue(nf_flow_offload_add_wq); ++ flush_workqueue(nf_flow_offload_del_wq); ++ flush_workqueue(nf_flow_offload_stats_wq); ++ } + } + + static int nf_flow_table_block_setup(struct nf_flowtable *flowtable, +@@ -1013,15 +1023,33 @@ EXPORT_SYMBOL_GPL(nf_flow_table_offload_ + + int nf_flow_table_offload_init(void) + { +- nf_flow_offload_wq = alloc_workqueue("nf_flow_table_offload", +- WQ_UNBOUND, 0); +- if (!nf_flow_offload_wq) ++ nf_flow_offload_add_wq = alloc_workqueue("nf_ft_offload_add", ++ WQ_UNBOUND | WQ_SYSFS, 0); ++ if (!nf_flow_offload_add_wq) + return -ENOMEM; + ++ nf_flow_offload_del_wq = alloc_workqueue("nf_ft_offload_del", ++ WQ_UNBOUND | WQ_SYSFS, 0); ++ if (!nf_flow_offload_del_wq) ++ goto err_del_wq; ++ ++ nf_flow_offload_stats_wq = alloc_workqueue("nf_ft_offload_stats", ++ WQ_UNBOUND | WQ_SYSFS, 0); ++ if (!nf_flow_offload_stats_wq) ++ goto err_stats_wq; ++ + return 0; ++ ++err_stats_wq: ++ destroy_workqueue(nf_flow_offload_del_wq); ++err_del_wq: ++ destroy_workqueue(nf_flow_offload_add_wq); ++ return -ENOMEM; + } + + void nf_flow_table_offload_exit(void) + { +- destroy_workqueue(nf_flow_offload_wq); ++ destroy_workqueue(nf_flow_offload_add_wq); ++ destroy_workqueue(nf_flow_offload_del_wq); ++ destroy_workqueue(nf_flow_offload_stats_wq); + } diff --git a/target/linux/generic/backport-5.15/610-v5.13-03-netfilter-conntrack-Remove-unused-variable-declarati.patch b/target/linux/generic/backport-5.15/610-v5.13-03-netfilter-conntrack-Remove-unused-variable-declarati.patch new file mode 100644 index 0000000000..37e80d989d --- /dev/null +++ b/target/linux/generic/backport-5.15/610-v5.13-03-netfilter-conntrack-Remove-unused-variable-declarati.patch @@ -0,0 +1,22 @@ +From: YueHaibing <yuehaibing@huawei.com> +Date: Tue, 23 Mar 2021 00:56:21 +0100 +Subject: [PATCH] netfilter: conntrack: Remove unused variable + declaration + +commit e97c3e278e95 ("tproxy: split off ipv6 defragmentation to a separate +module") left behind this. + +Signed-off-by: YueHaibing <yuehaibing@huawei.com> +Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org> +--- + +--- a/include/net/netfilter/ipv6/nf_conntrack_ipv6.h ++++ b/include/net/netfilter/ipv6/nf_conntrack_ipv6.h +@@ -4,7 +4,4 @@ + + extern const struct nf_conntrack_l4proto nf_conntrack_l4proto_icmpv6; + +-#include <linux/sysctl.h> +-extern struct ctl_table nf_ct_ipv6_sysctl_table[]; +- + #endif /* _NF_CONNTRACK_IPV6_H*/ diff --git a/target/linux/generic/backport-5.15/610-v5.13-04-netfilter-flowtable-consolidate-skb_try_make_writabl.patch b/target/linux/generic/backport-5.15/610-v5.13-04-netfilter-flowtable-consolidate-skb_try_make_writabl.patch new file mode 100644 index 0000000000..9fd01b465e --- /dev/null +++ b/target/linux/generic/backport-5.15/610-v5.13-04-netfilter-flowtable-consolidate-skb_try_make_writabl.patch @@ -0,0 +1,291 @@ +From: Pablo Neira Ayuso <pablo@netfilter.org> +Date: Tue, 23 Mar 2021 00:56:22 +0100 +Subject: [PATCH] netfilter: flowtable: consolidate + skb_try_make_writable() call + +Fetch the layer 4 header size to be mangled by NAT when building the +tuple, then use it to make writable the network and the transport +headers. After this update, the NAT routines now assumes that the skbuff +area is writable. Do the pointer refetch only after the single +skb_try_make_writable() call. + +Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org> +--- + +--- a/net/netfilter/nf_flow_table_core.c ++++ b/net/netfilter/nf_flow_table_core.c +@@ -394,9 +394,6 @@ static int nf_flow_nat_port_tcp(struct s + { + struct tcphdr *tcph; + +- if (skb_try_make_writable(skb, thoff + sizeof(*tcph))) +- return -1; +- + tcph = (void *)(skb_network_header(skb) + thoff); + inet_proto_csum_replace2(&tcph->check, skb, port, new_port, false); + +@@ -408,9 +405,6 @@ static int nf_flow_nat_port_udp(struct s + { + struct udphdr *udph; + +- if (skb_try_make_writable(skb, thoff + sizeof(*udph))) +- return -1; +- + udph = (void *)(skb_network_header(skb) + thoff); + if (udph->check || skb->ip_summed == CHECKSUM_PARTIAL) { + inet_proto_csum_replace2(&udph->check, skb, port, +@@ -446,9 +440,6 @@ int nf_flow_snat_port(const struct flow_ + struct flow_ports *hdr; + __be16 port, new_port; + +- if (skb_try_make_writable(skb, thoff + sizeof(*hdr))) +- return -1; +- + hdr = (void *)(skb_network_header(skb) + thoff); + + switch (dir) { +@@ -477,9 +468,6 @@ int nf_flow_dnat_port(const struct flow_ + struct flow_ports *hdr; + __be16 port, new_port; + +- if (skb_try_make_writable(skb, thoff + sizeof(*hdr))) +- return -1; +- + hdr = (void *)(skb_network_header(skb) + thoff); + + switch (dir) { +--- a/net/netfilter/nf_flow_table_ip.c ++++ b/net/netfilter/nf_flow_table_ip.c +@@ -39,9 +39,6 @@ static int nf_flow_nat_ip_tcp(struct sk_ + { + struct tcphdr *tcph; + +- if (skb_try_make_writable(skb, thoff + sizeof(*tcph))) +- return -1; +- + tcph = (void *)(skb_network_header(skb) + thoff); + inet_proto_csum_replace4(&tcph->check, skb, addr, new_addr, true); + +@@ -53,9 +50,6 @@ static int nf_flow_nat_ip_udp(struct sk_ + { + struct udphdr *udph; + +- if (skb_try_make_writable(skb, thoff + sizeof(*udph))) +- return -1; +- + udph = (void *)(skb_network_header(skb) + thoff); + if (udph->check || skb->ip_summed == CHECKSUM_PARTIAL) { + inet_proto_csum_replace4(&udph->check, skb, addr, +@@ -136,19 +130,17 @@ static int nf_flow_dnat_ip(const struct + } + + static int nf_flow_nat_ip(const struct flow_offload *flow, struct sk_buff *skb, +- unsigned int thoff, enum flow_offload_tuple_dir dir) ++ unsigned int thoff, enum flow_offload_tuple_dir dir, ++ struct iphdr *iph) + { +- struct iphdr *iph = ip_hdr(skb); +- + if (test_bit(NF_FLOW_SNAT, &flow->flags) && + (nf_flow_snat_port(flow, skb, thoff, iph->protocol, dir) < 0 || +- nf_flow_snat_ip(flow, skb, ip_hdr(skb), thoff, dir) < 0)) ++ nf_flow_snat_ip(flow, skb, iph, thoff, dir) < 0)) + return -1; + +- iph = ip_hdr(skb); + if (test_bit(NF_FLOW_DNAT, &flow->flags) && + (nf_flow_dnat_port(flow, skb, thoff, iph->protocol, dir) < 0 || +- nf_flow_dnat_ip(flow, skb, ip_hdr(skb), thoff, dir) < 0)) ++ nf_flow_dnat_ip(flow, skb, iph, thoff, dir) < 0)) + return -1; + + return 0; +@@ -160,10 +152,10 @@ static bool ip_has_options(unsigned int + } + + static int nf_flow_tuple_ip(struct sk_buff *skb, const struct net_device *dev, +- struct flow_offload_tuple *tuple) ++ struct flow_offload_tuple *tuple, u32 *hdrsize) + { +- unsigned int thoff, hdrsize; + struct flow_ports *ports; ++ unsigned int thoff; + struct iphdr *iph; + + if (!pskb_may_pull(skb, sizeof(*iph))) +@@ -178,10 +170,10 @@ static int nf_flow_tuple_ip(struct sk_bu + + switch (iph->protocol) { + case IPPROTO_TCP: +- hdrsize = sizeof(struct tcphdr); ++ *hdrsize = sizeof(struct tcphdr); + break; + case IPPROTO_UDP: +- hdrsize = sizeof(struct udphdr); ++ *hdrsize = sizeof(struct udphdr); + break; + default: + return -1; +@@ -191,7 +183,7 @@ static int nf_flow_tuple_ip(struct sk_bu + return -1; + + thoff = iph->ihl * 4; +- if (!pskb_may_pull(skb, thoff + hdrsize)) ++ if (!pskb_may_pull(skb, thoff + *hdrsize)) + return -1; + + iph = ip_hdr(skb); +@@ -252,11 +244,12 @@ nf_flow_offload_ip_hook(void *priv, stru + unsigned int thoff; + struct iphdr *iph; + __be32 nexthop; ++ u32 hdrsize; + + if (skb->protocol != htons(ETH_P_IP)) + return NF_ACCEPT; + +- if (nf_flow_tuple_ip(skb, state->in, &tuple) < 0) ++ if (nf_flow_tuple_ip(skb, state->in, &tuple, &hdrsize) < 0) + return NF_ACCEPT; + + tuplehash = flow_offload_lookup(flow_table, &tuple); +@@ -271,11 +264,13 @@ nf_flow_offload_ip_hook(void *priv, stru + if (unlikely(nf_flow_exceeds_mtu(skb, flow->tuplehash[dir].tuple.mtu))) + return NF_ACCEPT; + +- if (skb_try_make_writable(skb, sizeof(*iph))) ++ iph = ip_hdr(skb); ++ thoff = iph->ihl * 4; ++ if (skb_try_make_writable(skb, thoff + hdrsize)) + return NF_DROP; + +- thoff = ip_hdr(skb)->ihl * 4; +- if (nf_flow_state_check(flow, ip_hdr(skb)->protocol, skb, thoff)) ++ iph = ip_hdr(skb); ++ if (nf_flow_state_check(flow, iph->protocol, skb, thoff)) + return NF_ACCEPT; + + flow_offload_refresh(flow_table, flow); +@@ -285,10 +280,9 @@ nf_flow_offload_ip_hook(void *priv, stru + return NF_ACCEPT; + } + +- if (nf_flow_nat_ip(flow, skb, thoff, dir) < 0) ++ if (nf_flow_nat_ip(flow, skb, thoff, dir, iph) < 0) + return NF_DROP; + +- iph = ip_hdr(skb); + ip_decrease_ttl(iph); + skb->tstamp = 0; + +@@ -317,9 +311,6 @@ static int nf_flow_nat_ipv6_tcp(struct s + { + struct tcphdr *tcph; + +- if (skb_try_make_writable(skb, thoff + sizeof(*tcph))) +- return -1; +- + tcph = (void *)(skb_network_header(skb) + thoff); + inet_proto_csum_replace16(&tcph->check, skb, addr->s6_addr32, + new_addr->s6_addr32, true); +@@ -333,9 +324,6 @@ static int nf_flow_nat_ipv6_udp(struct s + { + struct udphdr *udph; + +- if (skb_try_make_writable(skb, thoff + sizeof(*udph))) +- return -1; +- + udph = (void *)(skb_network_header(skb) + thoff); + if (udph->check || skb->ip_summed == CHECKSUM_PARTIAL) { + inet_proto_csum_replace16(&udph->check, skb, addr->s6_addr32, +@@ -417,31 +405,30 @@ static int nf_flow_dnat_ipv6(const struc + + static int nf_flow_nat_ipv6(const struct flow_offload *flow, + struct sk_buff *skb, +- enum flow_offload_tuple_dir dir) ++ enum flow_offload_tuple_dir dir, ++ struct ipv6hdr *ip6h) + { +- struct ipv6hdr *ip6h = ipv6_hdr(skb); + unsigned int thoff = sizeof(*ip6h); + + if (test_bit(NF_FLOW_SNAT, &flow->flags) && + (nf_flow_snat_port(flow, skb, thoff, ip6h->nexthdr, dir) < 0 || +- nf_flow_snat_ipv6(flow, skb, ipv6_hdr(skb), thoff, dir) < 0)) ++ nf_flow_snat_ipv6(flow, skb, ip6h, thoff, dir) < 0)) + return -1; + +- ip6h = ipv6_hdr(skb); + if (test_bit(NF_FLOW_DNAT, &flow->flags) && + (nf_flow_dnat_port(flow, skb, thoff, ip6h->nexthdr, dir) < 0 || +- nf_flow_dnat_ipv6(flow, skb, ipv6_hdr(skb), thoff, dir) < 0)) ++ nf_flow_dnat_ipv6(flow, skb, ip6h, thoff, dir) < 0)) + return -1; + + return 0; + } + + static int nf_flow_tuple_ipv6(struct sk_buff *skb, const struct net_device *dev, +- struct flow_offload_tuple *tuple) ++ struct flow_offload_tuple *tuple, u32 *hdrsize) + { +- unsigned int thoff, hdrsize; + struct flow_ports *ports; + struct ipv6hdr *ip6h; ++ unsigned int thoff; + + if (!pskb_may_pull(skb, sizeof(*ip6h))) + return -1; +@@ -450,10 +437,10 @@ static int nf_flow_tuple_ipv6(struct sk_ + + switch (ip6h->nexthdr) { + case IPPROTO_TCP: +- hdrsize = sizeof(struct tcphdr); ++ *hdrsize = sizeof(struct tcphdr); + break; + case IPPROTO_UDP: +- hdrsize = sizeof(struct udphdr); ++ *hdrsize = sizeof(struct udphdr); + break; + default: + return -1; +@@ -463,7 +450,7 @@ static int nf_flow_tuple_ipv6(struct sk_ + return -1; + + thoff = sizeof(*ip6h); +- if (!pskb_may_pull(skb, thoff + hdrsize)) ++ if (!pskb_may_pull(skb, thoff + *hdrsize)) + return -1; + + ip6h = ipv6_hdr(skb); +@@ -493,11 +480,12 @@ nf_flow_offload_ipv6_hook(void *priv, st + struct net_device *outdev; + struct ipv6hdr *ip6h; + struct rt6_info *rt; ++ u32 hdrsize; + + if (skb->protocol != htons(ETH_P_IPV6)) + return NF_ACCEPT; + +- if (nf_flow_tuple_ipv6(skb, state->in, &tuple) < 0) ++ if (nf_flow_tuple_ipv6(skb, state->in, &tuple, &hdrsize) < 0) + return NF_ACCEPT; + + tuplehash = flow_offload_lookup(flow_table, &tuple); +@@ -523,13 +511,13 @@ nf_flow_offload_ipv6_hook(void *priv, st + return NF_ACCEPT; + } + +- if (skb_try_make_writable(skb, sizeof(*ip6h))) ++ if (skb_try_make_writable(skb, sizeof(*ip6h) + hdrsize)) + return NF_DROP; + +- if (nf_flow_nat_ipv6(flow, skb, dir) < 0) ++ ip6h = ipv6_hdr(skb); ++ if (nf_flow_nat_ipv6(flow, skb, dir, ip6h) < 0) + return NF_DROP; + +- ip6h = ipv6_hdr(skb); + ip6h->hop_limit--; + skb->tstamp = 0; + diff --git a/target/linux/generic/backport-5.15/610-v5.13-05-netfilter-flowtable-move-skb_try_make_writable-befor.patch b/target/linux/generic/backport-5.15/610-v5.13-05-netfilter-flowtable-move-skb_try_make_writable-befor.patch new file mode 100644 index 0000000000..84e294de7a --- /dev/null +++ b/target/linux/generic/backport-5.15/610-v5.13-05-netfilter-flowtable-move-skb_try_make_writable-befor.patch @@ -0,0 +1,35 @@ +From: Pablo Neira Ayuso <pablo@netfilter.org> +Date: Tue, 23 Mar 2021 00:56:23 +0100 +Subject: [PATCH] netfilter: flowtable: move skb_try_make_writable() + before NAT in IPv4 + +For consistency with the IPv6 flowtable datapath and to make sure the +skbuff is writable right before the NAT header updates. + +Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org> +--- + +--- a/net/netfilter/nf_flow_table_ip.c ++++ b/net/netfilter/nf_flow_table_ip.c +@@ -266,10 +266,6 @@ nf_flow_offload_ip_hook(void *priv, stru + + iph = ip_hdr(skb); + thoff = iph->ihl * 4; +- if (skb_try_make_writable(skb, thoff + hdrsize)) +- return NF_DROP; +- +- iph = ip_hdr(skb); + if (nf_flow_state_check(flow, iph->protocol, skb, thoff)) + return NF_ACCEPT; + +@@ -280,6 +276,10 @@ nf_flow_offload_ip_hook(void *priv, stru + return NF_ACCEPT; + } + ++ if (skb_try_make_writable(skb, thoff + hdrsize)) ++ return NF_DROP; ++ ++ iph = ip_hdr(skb); + if (nf_flow_nat_ip(flow, skb, thoff, dir, iph) < 0) + return NF_DROP; + diff --git a/target/linux/generic/backport-5.15/610-v5.13-06-netfilter-flowtable-move-FLOW_OFFLOAD_DIR_MAX-away-f.patch b/target/linux/generic/backport-5.15/610-v5.13-06-netfilter-flowtable-move-FLOW_OFFLOAD_DIR_MAX-away-f.patch new file mode 100644 index 0000000000..64a0e42079 --- /dev/null +++ b/target/linux/generic/backport-5.15/610-v5.13-06-netfilter-flowtable-move-FLOW_OFFLOAD_DIR_MAX-away-f.patch @@ -0,0 +1,82 @@ +From: Pablo Neira Ayuso <pablo@netfilter.org> +Date: Tue, 23 Mar 2021 00:56:24 +0100 +Subject: [PATCH] netfilter: flowtable: move FLOW_OFFLOAD_DIR_MAX away + from enumeration + +This allows to remove the default case which should not ever happen and +that was added to avoid gcc warnings on unhandled FLOW_OFFLOAD_DIR_MAX +enumeration case. + +Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org> +--- + +--- a/include/net/netfilter/nf_flow_table.h ++++ b/include/net/netfilter/nf_flow_table.h +@@ -86,8 +86,8 @@ static inline bool nf_flowtable_hw_offlo + enum flow_offload_tuple_dir { + FLOW_OFFLOAD_DIR_ORIGINAL = IP_CT_DIR_ORIGINAL, + FLOW_OFFLOAD_DIR_REPLY = IP_CT_DIR_REPLY, +- FLOW_OFFLOAD_DIR_MAX = IP_CT_DIR_MAX + }; ++#define FLOW_OFFLOAD_DIR_MAX IP_CT_DIR_MAX + + struct flow_offload_tuple { + union { +--- a/net/netfilter/nf_flow_table_core.c ++++ b/net/netfilter/nf_flow_table_core.c +@@ -453,8 +453,6 @@ int nf_flow_snat_port(const struct flow_ + new_port = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.src_port; + hdr->dest = new_port; + break; +- default: +- return -1; + } + + return nf_flow_nat_port(skb, thoff, protocol, port, new_port); +@@ -481,8 +479,6 @@ int nf_flow_dnat_port(const struct flow_ + new_port = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.dst_port; + hdr->source = new_port; + break; +- default: +- return -1; + } + + return nf_flow_nat_port(skb, thoff, protocol, port, new_port); +--- a/net/netfilter/nf_flow_table_ip.c ++++ b/net/netfilter/nf_flow_table_ip.c +@@ -96,8 +96,6 @@ static int nf_flow_snat_ip(const struct + new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.src_v4.s_addr; + iph->daddr = new_addr; + break; +- default: +- return -1; + } + csum_replace4(&iph->check, addr, new_addr); + +@@ -121,8 +119,6 @@ static int nf_flow_dnat_ip(const struct + new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.dst_v4.s_addr; + iph->saddr = new_addr; + break; +- default: +- return -1; + } + csum_replace4(&iph->check, addr, new_addr); + +@@ -371,8 +367,6 @@ static int nf_flow_snat_ipv6(const struc + new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.src_v6; + ip6h->daddr = new_addr; + break; +- default: +- return -1; + } + + return nf_flow_nat_ipv6_l4proto(skb, ip6h, thoff, &addr, &new_addr); +@@ -396,8 +390,6 @@ static int nf_flow_dnat_ipv6(const struc + new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.dst_v6; + ip6h->saddr = new_addr; + break; +- default: +- return -1; + } + + return nf_flow_nat_ipv6_l4proto(skb, ip6h, thoff, &addr, &new_addr); diff --git a/target/linux/generic/backport-5.15/610-v5.13-07-netfilter-flowtable-fast-NAT-functions-never-fail.patch b/target/linux/generic/backport-5.15/610-v5.13-07-netfilter-flowtable-fast-NAT-functions-never-fail.patch new file mode 100644 index 0000000000..2224e095c9 --- /dev/null +++ b/target/linux/generic/backport-5.15/610-v5.13-07-netfilter-flowtable-fast-NAT-functions-never-fail.patch @@ -0,0 +1,394 @@ +From: Pablo Neira Ayuso <pablo@netfilter.org> +Date: Tue, 23 Mar 2021 00:56:25 +0100 +Subject: [PATCH] netfilter: flowtable: fast NAT functions never fail + +Simplify existing fast NAT routines by returning void. After the +skb_try_make_writable() call consolidation, these routines cannot ever +fail. + +Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org> +--- + +--- a/include/net/netfilter/nf_flow_table.h ++++ b/include/net/netfilter/nf_flow_table.h +@@ -228,12 +228,12 @@ void nf_flow_table_free(struct nf_flowta + + void flow_offload_teardown(struct flow_offload *flow); + +-int nf_flow_snat_port(const struct flow_offload *flow, +- struct sk_buff *skb, unsigned int thoff, +- u8 protocol, enum flow_offload_tuple_dir dir); +-int nf_flow_dnat_port(const struct flow_offload *flow, +- struct sk_buff *skb, unsigned int thoff, +- u8 protocol, enum flow_offload_tuple_dir dir); ++void nf_flow_snat_port(const struct flow_offload *flow, ++ struct sk_buff *skb, unsigned int thoff, ++ u8 protocol, enum flow_offload_tuple_dir dir); ++void nf_flow_dnat_port(const struct flow_offload *flow, ++ struct sk_buff *skb, unsigned int thoff, ++ u8 protocol, enum flow_offload_tuple_dir dir); + + struct flow_ports { + __be16 source, dest; +--- a/net/netfilter/nf_flow_table_core.c ++++ b/net/netfilter/nf_flow_table_core.c +@@ -388,20 +388,17 @@ static void nf_flow_offload_work_gc(stru + queue_delayed_work(system_power_efficient_wq, &flow_table->gc_work, HZ); + } + +- +-static int nf_flow_nat_port_tcp(struct sk_buff *skb, unsigned int thoff, +- __be16 port, __be16 new_port) ++static void nf_flow_nat_port_tcp(struct sk_buff *skb, unsigned int thoff, ++ __be16 port, __be16 new_port) + { + struct tcphdr *tcph; + + tcph = (void *)(skb_network_header(skb) + thoff); + inet_proto_csum_replace2(&tcph->check, skb, port, new_port, false); +- +- return 0; + } + +-static int nf_flow_nat_port_udp(struct sk_buff *skb, unsigned int thoff, +- __be16 port, __be16 new_port) ++static void nf_flow_nat_port_udp(struct sk_buff *skb, unsigned int thoff, ++ __be16 port, __be16 new_port) + { + struct udphdr *udph; + +@@ -412,30 +409,24 @@ static int nf_flow_nat_port_udp(struct s + if (!udph->check) + udph->check = CSUM_MANGLED_0; + } +- +- return 0; + } + +-static int nf_flow_nat_port(struct sk_buff *skb, unsigned int thoff, +- u8 protocol, __be16 port, __be16 new_port) ++static void nf_flow_nat_port(struct sk_buff *skb, unsigned int thoff, ++ u8 protocol, __be16 port, __be16 new_port) + { + switch (protocol) { + case IPPROTO_TCP: +- if (nf_flow_nat_port_tcp(skb, thoff, port, new_port) < 0) +- return NF_DROP; ++ nf_flow_nat_port_tcp(skb, thoff, port, new_port); + break; + case IPPROTO_UDP: +- if (nf_flow_nat_port_udp(skb, thoff, port, new_port) < 0) +- return NF_DROP; ++ nf_flow_nat_port_udp(skb, thoff, port, new_port); + break; + } +- +- return 0; + } + +-int nf_flow_snat_port(const struct flow_offload *flow, +- struct sk_buff *skb, unsigned int thoff, +- u8 protocol, enum flow_offload_tuple_dir dir) ++void nf_flow_snat_port(const struct flow_offload *flow, ++ struct sk_buff *skb, unsigned int thoff, ++ u8 protocol, enum flow_offload_tuple_dir dir) + { + struct flow_ports *hdr; + __be16 port, new_port; +@@ -455,13 +446,13 @@ int nf_flow_snat_port(const struct flow_ + break; + } + +- return nf_flow_nat_port(skb, thoff, protocol, port, new_port); ++ nf_flow_nat_port(skb, thoff, protocol, port, new_port); + } + EXPORT_SYMBOL_GPL(nf_flow_snat_port); + +-int nf_flow_dnat_port(const struct flow_offload *flow, +- struct sk_buff *skb, unsigned int thoff, +- u8 protocol, enum flow_offload_tuple_dir dir) ++void nf_flow_dnat_port(const struct flow_offload *flow, struct sk_buff *skb, ++ unsigned int thoff, u8 protocol, ++ enum flow_offload_tuple_dir dir) + { + struct flow_ports *hdr; + __be16 port, new_port; +@@ -481,7 +472,7 @@ int nf_flow_dnat_port(const struct flow_ + break; + } + +- return nf_flow_nat_port(skb, thoff, protocol, port, new_port); ++ nf_flow_nat_port(skb, thoff, protocol, port, new_port); + } + EXPORT_SYMBOL_GPL(nf_flow_dnat_port); + +--- a/net/netfilter/nf_flow_table_ip.c ++++ b/net/netfilter/nf_flow_table_ip.c +@@ -34,19 +34,17 @@ static int nf_flow_state_check(struct fl + return 0; + } + +-static int nf_flow_nat_ip_tcp(struct sk_buff *skb, unsigned int thoff, +- __be32 addr, __be32 new_addr) ++static void nf_flow_nat_ip_tcp(struct sk_buff *skb, unsigned int thoff, ++ __be32 addr, __be32 new_addr) + { + struct tcphdr *tcph; + + tcph = (void *)(skb_network_header(skb) + thoff); + inet_proto_csum_replace4(&tcph->check, skb, addr, new_addr, true); +- +- return 0; + } + +-static int nf_flow_nat_ip_udp(struct sk_buff *skb, unsigned int thoff, +- __be32 addr, __be32 new_addr) ++static void nf_flow_nat_ip_udp(struct sk_buff *skb, unsigned int thoff, ++ __be32 addr, __be32 new_addr) + { + struct udphdr *udph; + +@@ -57,31 +55,25 @@ static int nf_flow_nat_ip_udp(struct sk_ + if (!udph->check) + udph->check = CSUM_MANGLED_0; + } +- +- return 0; + } + +-static int nf_flow_nat_ip_l4proto(struct sk_buff *skb, struct iphdr *iph, +- unsigned int thoff, __be32 addr, +- __be32 new_addr) ++static void nf_flow_nat_ip_l4proto(struct sk_buff *skb, struct iphdr *iph, ++ unsigned int thoff, __be32 addr, ++ __be32 new_addr) + { + switch (iph->protocol) { + case IPPROTO_TCP: +- if (nf_flow_nat_ip_tcp(skb, thoff, addr, new_addr) < 0) +- return NF_DROP; ++ nf_flow_nat_ip_tcp(skb, thoff, addr, new_addr); + break; + case IPPROTO_UDP: +- if (nf_flow_nat_ip_udp(skb, thoff, addr, new_addr) < 0) +- return NF_DROP; ++ nf_flow_nat_ip_udp(skb, thoff, addr, new_addr); + break; + } +- +- return 0; + } + +-static int nf_flow_snat_ip(const struct flow_offload *flow, struct sk_buff *skb, +- struct iphdr *iph, unsigned int thoff, +- enum flow_offload_tuple_dir dir) ++static void nf_flow_snat_ip(const struct flow_offload *flow, ++ struct sk_buff *skb, struct iphdr *iph, ++ unsigned int thoff, enum flow_offload_tuple_dir dir) + { + __be32 addr, new_addr; + +@@ -99,12 +91,12 @@ static int nf_flow_snat_ip(const struct + } + csum_replace4(&iph->check, addr, new_addr); + +- return nf_flow_nat_ip_l4proto(skb, iph, thoff, addr, new_addr); ++ nf_flow_nat_ip_l4proto(skb, iph, thoff, addr, new_addr); + } + +-static int nf_flow_dnat_ip(const struct flow_offload *flow, struct sk_buff *skb, +- struct iphdr *iph, unsigned int thoff, +- enum flow_offload_tuple_dir dir) ++static void nf_flow_dnat_ip(const struct flow_offload *flow, ++ struct sk_buff *skb, struct iphdr *iph, ++ unsigned int thoff, enum flow_offload_tuple_dir dir) + { + __be32 addr, new_addr; + +@@ -122,24 +114,21 @@ static int nf_flow_dnat_ip(const struct + } + csum_replace4(&iph->check, addr, new_addr); + +- return nf_flow_nat_ip_l4proto(skb, iph, thoff, addr, new_addr); ++ nf_flow_nat_ip_l4proto(skb, iph, thoff, addr, new_addr); + } + +-static int nf_flow_nat_ip(const struct flow_offload *flow, struct sk_buff *skb, ++static void nf_flow_nat_ip(const struct flow_offload *flow, struct sk_buff *skb, + unsigned int thoff, enum flow_offload_tuple_dir dir, + struct iphdr *iph) + { +- if (test_bit(NF_FLOW_SNAT, &flow->flags) && +- (nf_flow_snat_port(flow, skb, thoff, iph->protocol, dir) < 0 || +- nf_flow_snat_ip(flow, skb, iph, thoff, dir) < 0)) +- return -1; +- +- if (test_bit(NF_FLOW_DNAT, &flow->flags) && +- (nf_flow_dnat_port(flow, skb, thoff, iph->protocol, dir) < 0 || +- nf_flow_dnat_ip(flow, skb, iph, thoff, dir) < 0)) +- return -1; +- +- return 0; ++ if (test_bit(NF_FLOW_SNAT, &flow->flags)) { ++ nf_flow_snat_port(flow, skb, thoff, iph->protocol, dir); ++ nf_flow_snat_ip(flow, skb, iph, thoff, dir); ++ } ++ if (test_bit(NF_FLOW_DNAT, &flow->flags)) { ++ nf_flow_dnat_port(flow, skb, thoff, iph->protocol, dir); ++ nf_flow_dnat_ip(flow, skb, iph, thoff, dir); ++ } + } + + static bool ip_has_options(unsigned int thoff) +@@ -276,8 +265,7 @@ nf_flow_offload_ip_hook(void *priv, stru + return NF_DROP; + + iph = ip_hdr(skb); +- if (nf_flow_nat_ip(flow, skb, thoff, dir, iph) < 0) +- return NF_DROP; ++ nf_flow_nat_ip(flow, skb, thoff, dir, iph); + + ip_decrease_ttl(iph); + skb->tstamp = 0; +@@ -301,22 +289,21 @@ nf_flow_offload_ip_hook(void *priv, stru + } + EXPORT_SYMBOL_GPL(nf_flow_offload_ip_hook); + +-static int nf_flow_nat_ipv6_tcp(struct sk_buff *skb, unsigned int thoff, +- struct in6_addr *addr, +- struct in6_addr *new_addr) ++static void nf_flow_nat_ipv6_tcp(struct sk_buff *skb, unsigned int thoff, ++ struct in6_addr *addr, ++ struct in6_addr *new_addr, ++ struct ipv6hdr *ip6h) + { + struct tcphdr *tcph; + + tcph = (void *)(skb_network_header(skb) + thoff); + inet_proto_csum_replace16(&tcph->check, skb, addr->s6_addr32, + new_addr->s6_addr32, true); +- +- return 0; + } + +-static int nf_flow_nat_ipv6_udp(struct sk_buff *skb, unsigned int thoff, +- struct in6_addr *addr, +- struct in6_addr *new_addr) ++static void nf_flow_nat_ipv6_udp(struct sk_buff *skb, unsigned int thoff, ++ struct in6_addr *addr, ++ struct in6_addr *new_addr) + { + struct udphdr *udph; + +@@ -327,32 +314,26 @@ static int nf_flow_nat_ipv6_udp(struct s + if (!udph->check) + udph->check = CSUM_MANGLED_0; + } +- +- return 0; + } + +-static int nf_flow_nat_ipv6_l4proto(struct sk_buff *skb, struct ipv6hdr *ip6h, +- unsigned int thoff, struct in6_addr *addr, +- struct in6_addr *new_addr) ++static void nf_flow_nat_ipv6_l4proto(struct sk_buff *skb, struct ipv6hdr *ip6h, ++ unsigned int thoff, struct in6_addr *addr, ++ struct in6_addr *new_addr) + { + switch (ip6h->nexthdr) { + case IPPROTO_TCP: +- if (nf_flow_nat_ipv6_tcp(skb, thoff, addr, new_addr) < 0) +- return NF_DROP; ++ nf_flow_nat_ipv6_tcp(skb, thoff, addr, new_addr, ip6h); + break; + case IPPROTO_UDP: +- if (nf_flow_nat_ipv6_udp(skb, thoff, addr, new_addr) < 0) +- return NF_DROP; ++ nf_flow_nat_ipv6_udp(skb, thoff, addr, new_addr); + break; + } +- +- return 0; + } + +-static int nf_flow_snat_ipv6(const struct flow_offload *flow, +- struct sk_buff *skb, struct ipv6hdr *ip6h, +- unsigned int thoff, +- enum flow_offload_tuple_dir dir) ++static void nf_flow_snat_ipv6(const struct flow_offload *flow, ++ struct sk_buff *skb, struct ipv6hdr *ip6h, ++ unsigned int thoff, ++ enum flow_offload_tuple_dir dir) + { + struct in6_addr addr, new_addr; + +@@ -369,13 +350,13 @@ static int nf_flow_snat_ipv6(const struc + break; + } + +- return nf_flow_nat_ipv6_l4proto(skb, ip6h, thoff, &addr, &new_addr); ++ nf_flow_nat_ipv6_l4proto(skb, ip6h, thoff, &addr, &new_addr); + } + +-static int nf_flow_dnat_ipv6(const struct flow_offload *flow, +- struct sk_buff *skb, struct ipv6hdr *ip6h, +- unsigned int thoff, +- enum flow_offload_tuple_dir dir) ++static void nf_flow_dnat_ipv6(const struct flow_offload *flow, ++ struct sk_buff *skb, struct ipv6hdr *ip6h, ++ unsigned int thoff, ++ enum flow_offload_tuple_dir dir) + { + struct in6_addr addr, new_addr; + +@@ -392,27 +373,24 @@ static int nf_flow_dnat_ipv6(const struc + break; + } + +- return nf_flow_nat_ipv6_l4proto(skb, ip6h, thoff, &addr, &new_addr); ++ nf_flow_nat_ipv6_l4proto(skb, ip6h, thoff, &addr, &new_addr); + } + +-static int nf_flow_nat_ipv6(const struct flow_offload *flow, +- struct sk_buff *skb, +- enum flow_offload_tuple_dir dir, +- struct ipv6hdr *ip6h) ++static void nf_flow_nat_ipv6(const struct flow_offload *flow, ++ struct sk_buff *skb, ++ enum flow_offload_tuple_dir dir, ++ struct ipv6hdr *ip6h) + { + unsigned int thoff = sizeof(*ip6h); + +- if (test_bit(NF_FLOW_SNAT, &flow->flags) && +- (nf_flow_snat_port(flow, skb, thoff, ip6h->nexthdr, dir) < 0 || +- nf_flow_snat_ipv6(flow, skb, ip6h, thoff, dir) < 0)) +- return -1; +- +- if (test_bit(NF_FLOW_DNAT, &flow->flags) && +- (nf_flow_dnat_port(flow, skb, thoff, ip6h->nexthdr, dir) < 0 || +- nf_flow_dnat_ipv6(flow, skb, ip6h, thoff, dir) < 0)) +- return -1; +- +- return 0; ++ if (test_bit(NF_FLOW_SNAT, &flow->flags)) { ++ nf_flow_snat_port(flow, skb, thoff, ip6h->nexthdr, dir); ++ nf_flow_snat_ipv6(flow, skb, ip6h, thoff, dir); ++ } ++ if (test_bit(NF_FLOW_DNAT, &flow->flags)) { ++ nf_flow_dnat_port(flow, skb, thoff, ip6h->nexthdr, dir); ++ nf_flow_dnat_ipv6(flow, skb, ip6h, thoff, dir); ++ } + } + + static int nf_flow_tuple_ipv6(struct sk_buff *skb, const struct net_device *dev, +@@ -507,8 +485,7 @@ nf_flow_offload_ipv6_hook(void *priv, st + return NF_DROP; + + ip6h = ipv6_hdr(skb); +- if (nf_flow_nat_ipv6(flow, skb, dir, ip6h) < 0) +- return NF_DROP; ++ nf_flow_nat_ipv6(flow, skb, dir, ip6h); + + ip6h->hop_limit--; + skb->tstamp = 0; diff --git a/target/linux/generic/backport-5.15/610-v5.13-08-netfilter-flowtable-call-dst_check-to-fall-back-to-c.patch b/target/linux/generic/backport-5.15/610-v5.13-08-netfilter-flowtable-call-dst_check-to-fall-back-to-c.patch new file mode 100644 index 0000000000..276785030d --- /dev/null +++ b/target/linux/generic/backport-5.15/610-v5.13-08-netfilter-flowtable-call-dst_check-to-fall-back-to-c.patch @@ -0,0 +1,46 @@ +From: Pablo Neira Ayuso <pablo@netfilter.org> +Date: Tue, 23 Mar 2021 00:56:26 +0100 +Subject: [PATCH] netfilter: flowtable: call dst_check() to fall back to + classic forwarding + +In case the route is stale, pass up the packet to the classic forwarding +path for re-evaluation and schedule this flow entry for removal. + +Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org> +--- + +--- a/net/netfilter/nf_flow_table_ip.c ++++ b/net/netfilter/nf_flow_table_ip.c +@@ -197,14 +197,6 @@ static bool nf_flow_exceeds_mtu(const st + return true; + } + +-static int nf_flow_offload_dst_check(struct dst_entry *dst) +-{ +- if (unlikely(dst_xfrm(dst))) +- return dst_check(dst, 0) ? 0 : -1; +- +- return 0; +-} +- + static unsigned int nf_flow_xmit_xfrm(struct sk_buff *skb, + const struct nf_hook_state *state, + struct dst_entry *dst) +@@ -256,7 +248,7 @@ nf_flow_offload_ip_hook(void *priv, stru + + flow_offload_refresh(flow_table, flow); + +- if (nf_flow_offload_dst_check(&rt->dst)) { ++ if (!dst_check(&rt->dst, 0)) { + flow_offload_teardown(flow); + return NF_ACCEPT; + } +@@ -476,7 +468,7 @@ nf_flow_offload_ipv6_hook(void *priv, st + + flow_offload_refresh(flow_table, flow); + +- if (nf_flow_offload_dst_check(&rt->dst)) { ++ if (!dst_check(&rt->dst, 0)) { + flow_offload_teardown(flow); + return NF_ACCEPT; + } diff --git a/target/linux/generic/backport-5.15/610-v5.13-09-netfilter-flowtable-refresh-timeout-after-dst-and-wr.patch b/target/linux/generic/backport-5.15/610-v5.13-09-netfilter-flowtable-refresh-timeout-after-dst-and-wr.patch new file mode 100644 index 0000000000..14ac2ee295 --- /dev/null +++ b/target/linux/generic/backport-5.15/610-v5.13-09-netfilter-flowtable-refresh-timeout-after-dst-and-wr.patch @@ -0,0 +1,49 @@ +From: Pablo Neira Ayuso <pablo@netfilter.org> +Date: Tue, 23 Mar 2021 00:56:27 +0100 +Subject: [PATCH] netfilter: flowtable: refresh timeout after dst and + writable checks + +Refresh the timeout (and retry hardware offload) once the skbuff dst +is confirmed to be current and after the skbuff is made writable. + +Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org> +--- + +--- a/net/netfilter/nf_flow_table_ip.c ++++ b/net/netfilter/nf_flow_table_ip.c +@@ -246,8 +246,6 @@ nf_flow_offload_ip_hook(void *priv, stru + if (nf_flow_state_check(flow, iph->protocol, skb, thoff)) + return NF_ACCEPT; + +- flow_offload_refresh(flow_table, flow); +- + if (!dst_check(&rt->dst, 0)) { + flow_offload_teardown(flow); + return NF_ACCEPT; +@@ -256,6 +254,8 @@ nf_flow_offload_ip_hook(void *priv, stru + if (skb_try_make_writable(skb, thoff + hdrsize)) + return NF_DROP; + ++ flow_offload_refresh(flow_table, flow); ++ + iph = ip_hdr(skb); + nf_flow_nat_ip(flow, skb, thoff, dir, iph); + +@@ -466,8 +466,6 @@ nf_flow_offload_ipv6_hook(void *priv, st + sizeof(*ip6h))) + return NF_ACCEPT; + +- flow_offload_refresh(flow_table, flow); +- + if (!dst_check(&rt->dst, 0)) { + flow_offload_teardown(flow); + return NF_ACCEPT; +@@ -476,6 +474,8 @@ nf_flow_offload_ipv6_hook(void *priv, st + if (skb_try_make_writable(skb, sizeof(*ip6h) + hdrsize)) + return NF_DROP; + ++ flow_offload_refresh(flow_table, flow); ++ + ip6h = ipv6_hdr(skb); + nf_flow_nat_ipv6(flow, skb, dir, ip6h); + diff --git a/target/linux/generic/backport-5.15/610-v5.13-10-netfilter-nftables-update-table-flags-from-the-commi.patch b/target/linux/generic/backport-5.15/610-v5.13-10-netfilter-nftables-update-table-flags-from-the-commi.patch new file mode 100644 index 0000000000..fa6e0c4173 --- /dev/null +++ b/target/linux/generic/backport-5.15/610-v5.13-10-netfilter-nftables-update-table-flags-from-the-commi.patch @@ -0,0 +1,103 @@ +From: Pablo Neira Ayuso <pablo@netfilter.org> +Date: Tue, 23 Mar 2021 00:56:28 +0100 +Subject: [PATCH] netfilter: nftables: update table flags from the commit + phase + +Do not update table flags from the preparation phase. Store the flags +update into the transaction, then update the flags from the commit +phase. + +Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org> +--- + +--- a/include/net/netfilter/nf_tables.h ++++ b/include/net/netfilter/nf_tables.h +@@ -1470,13 +1470,16 @@ struct nft_trans_chain { + + struct nft_trans_table { + bool update; +- bool enable; ++ u8 state; ++ u32 flags; + }; + + #define nft_trans_table_update(trans) \ + (((struct nft_trans_table *)trans->data)->update) +-#define nft_trans_table_enable(trans) \ +- (((struct nft_trans_table *)trans->data)->enable) ++#define nft_trans_table_state(trans) \ ++ (((struct nft_trans_table *)trans->data)->state) ++#define nft_trans_table_flags(trans) \ ++ (((struct nft_trans_table *)trans->data)->flags) + + struct nft_trans_elem { + struct nft_set *set; +--- a/net/netfilter/nf_tables_api.c ++++ b/net/netfilter/nf_tables_api.c +@@ -891,6 +891,12 @@ static void nf_tables_table_disable(stru + nft_table_disable(net, table, 0); + } + ++enum { ++ NFT_TABLE_STATE_UNCHANGED = 0, ++ NFT_TABLE_STATE_DORMANT, ++ NFT_TABLE_STATE_WAKEUP ++}; ++ + static int nf_tables_updtable(struct nft_ctx *ctx) + { + struct nft_trans *trans; +@@ -914,19 +920,17 @@ static int nf_tables_updtable(struct nft + + if ((flags & NFT_TABLE_F_DORMANT) && + !(ctx->table->flags & NFT_TABLE_F_DORMANT)) { +- nft_trans_table_enable(trans) = false; ++ nft_trans_table_state(trans) = NFT_TABLE_STATE_DORMANT; + } else if (!(flags & NFT_TABLE_F_DORMANT) && + ctx->table->flags & NFT_TABLE_F_DORMANT) { +- ctx->table->flags &= ~NFT_TABLE_F_DORMANT; + ret = nf_tables_table_enable(ctx->net, ctx->table); + if (ret >= 0) +- nft_trans_table_enable(trans) = true; +- else +- ctx->table->flags |= NFT_TABLE_F_DORMANT; ++ nft_trans_table_state(trans) = NFT_TABLE_STATE_WAKEUP; + } + if (ret < 0) + goto err; + ++ nft_trans_table_flags(trans) = flags; + nft_trans_table_update(trans) = true; + list_add_tail(&trans->list, &ctx->net->nft.commit_list); + return 0; +@@ -7908,11 +7912,10 @@ static int nf_tables_commit(struct net * + switch (trans->msg_type) { + case NFT_MSG_NEWTABLE: + if (nft_trans_table_update(trans)) { +- if (!nft_trans_table_enable(trans)) { +- nf_tables_table_disable(net, +- trans->ctx.table); +- trans->ctx.table->flags |= NFT_TABLE_F_DORMANT; +- } ++ if (nft_trans_table_state(trans) == NFT_TABLE_STATE_DORMANT) ++ nf_tables_table_disable(net, trans->ctx.table); ++ ++ trans->ctx.table->flags = nft_trans_table_flags(trans); + } else { + nft_clear(net, trans->ctx.table); + } +@@ -8125,11 +8128,9 @@ static int __nf_tables_abort(struct net + switch (trans->msg_type) { + case NFT_MSG_NEWTABLE: + if (nft_trans_table_update(trans)) { +- if (nft_trans_table_enable(trans)) { +- nf_tables_table_disable(net, +- trans->ctx.table); +- trans->ctx.table->flags |= NFT_TABLE_F_DORMANT; +- } ++ if (nft_trans_table_state(trans) == NFT_TABLE_STATE_WAKEUP) ++ nf_tables_table_disable(net, trans->ctx.table); ++ + nft_trans_destroy(trans); + } else { + list_del_rcu(&trans->ctx.table->list); diff --git a/target/linux/generic/backport-5.15/610-v5.13-11-net-resolve-forwarding-path-from-virtual-netdevice-a.patch b/target/linux/generic/backport-5.15/610-v5.13-11-net-resolve-forwarding-path-from-virtual-netdevice-a.patch new file mode 100644 index 0000000000..a8fc1cabe2 --- /dev/null +++ b/target/linux/generic/backport-5.15/610-v5.13-11-net-resolve-forwarding-path-from-virtual-netdevice-a.patch @@ -0,0 +1,170 @@ +From: Pablo Neira Ayuso <pablo@netfilter.org> +Date: Wed, 24 Mar 2021 02:30:32 +0100 +Subject: [PATCH] net: resolve forwarding path from virtual netdevice and + HW destination address + +This patch adds dev_fill_forward_path() which resolves the path to reach +the real netdevice from the IP forwarding side. This function takes as +input the netdevice and the destination hardware address and it walks +down the devices calling .ndo_fill_forward_path() for each device until +the real device is found. + +For instance, assuming the following topology: + + IP forwarding + / \ + br0 eth0 + / \ + eth1 eth2 + . + . + . + ethX + ab:cd:ef:ab:cd:ef + +where eth1 and eth2 are bridge ports and eth0 provides WAN connectivity. +ethX is the interface in another box which is connected to the eth1 +bridge port. + +For packets going through IP forwarding to br0 whose destination MAC +address is ab:cd:ef:ab:cd:ef, dev_fill_forward_path() provides the +following path: + + br0 -> eth1 + +.ndo_fill_forward_path for br0 looks up at the FDB for the bridge port +from the destination MAC address to get the bridge port eth1. + +This information allows to create a fast path that bypasses the classic +bridge and IP forwarding paths, so packets go directly from the bridge +port eth1 to eth0 (wan interface) and vice versa. + + fast path + .------------------------. + / \ + | IP forwarding | + | / \ \/ + | br0 eth0 + . / \ + -> eth1 eth2 + . + . + . + ethX + ab:cd:ef:ab:cd:ef + +Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org> +--- + +--- a/include/linux/netdevice.h ++++ b/include/linux/netdevice.h +@@ -827,6 +827,27 @@ typedef u16 (*select_queue_fallback_t)(s + struct sk_buff *skb, + struct net_device *sb_dev); + ++enum net_device_path_type { ++ DEV_PATH_ETHERNET = 0, ++}; ++ ++struct net_device_path { ++ enum net_device_path_type type; ++ const struct net_device *dev; ++}; ++ ++#define NET_DEVICE_PATH_STACK_MAX 5 ++ ++struct net_device_path_stack { ++ int num_paths; ++ struct net_device_path path[NET_DEVICE_PATH_STACK_MAX]; ++}; ++ ++struct net_device_path_ctx { ++ const struct net_device *dev; ++ const u8 *daddr; ++}; ++ + enum tc_setup_type { + TC_SETUP_QDISC_MQPRIO, + TC_SETUP_CLSU32, +@@ -1273,6 +1294,8 @@ struct netdev_net_notifier { + * struct net_device *(*ndo_get_peer_dev)(struct net_device *dev); + * If a device is paired with a peer device, return the peer instance. + * The caller must be under RCU read context. ++ * int (*ndo_fill_forward_path)(struct net_device_path_ctx *ctx, struct net_device_path *path); ++ * Get the forwarding path to reach the real device from the HW destination address + */ + struct net_device_ops { + int (*ndo_init)(struct net_device *dev); +@@ -1481,6 +1504,8 @@ struct net_device_ops { + int (*ndo_tunnel_ctl)(struct net_device *dev, + struct ip_tunnel_parm *p, int cmd); + struct net_device * (*ndo_get_peer_dev)(struct net_device *dev); ++ int (*ndo_fill_forward_path)(struct net_device_path_ctx *ctx, ++ struct net_device_path *path); + }; + + /** +@@ -2828,6 +2853,8 @@ void dev_remove_offload(struct packet_of + + int dev_get_iflink(const struct net_device *dev); + int dev_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb); ++int dev_fill_forward_path(const struct net_device *dev, const u8 *daddr, ++ struct net_device_path_stack *stack); + struct net_device *__dev_get_by_flags(struct net *net, unsigned short flags, + unsigned short mask); + struct net_device *dev_get_by_name(struct net *net, const char *name); +--- a/net/core/dev.c ++++ b/net/core/dev.c +@@ -847,6 +847,52 @@ int dev_fill_metadata_dst(struct net_dev + } + EXPORT_SYMBOL_GPL(dev_fill_metadata_dst); + ++static struct net_device_path *dev_fwd_path(struct net_device_path_stack *stack) ++{ ++ int k = stack->num_paths++; ++ ++ if (WARN_ON_ONCE(k >= NET_DEVICE_PATH_STACK_MAX)) ++ return NULL; ++ ++ return &stack->path[k]; ++} ++ ++int dev_fill_forward_path(const struct net_device *dev, const u8 *daddr, ++ struct net_device_path_stack *stack) ++{ ++ const struct net_device *last_dev; ++ struct net_device_path_ctx ctx = { ++ .dev = dev, ++ .daddr = daddr, ++ }; ++ struct net_device_path *path; ++ int ret = 0; ++ ++ stack->num_paths = 0; ++ while (ctx.dev && ctx.dev->netdev_ops->ndo_fill_forward_path) { ++ last_dev = ctx.dev; ++ path = dev_fwd_path(stack); ++ if (!path) ++ return -1; ++ ++ memset(path, 0, sizeof(struct net_device_path)); ++ ret = ctx.dev->netdev_ops->ndo_fill_forward_path(&ctx, path); ++ if (ret < 0) ++ return -1; ++ ++ if (WARN_ON_ONCE(last_dev == ctx.dev)) ++ return -1; ++ } ++ path = dev_fwd_path(stack); ++ if (!path) ++ return -1; ++ path->type = DEV_PATH_ETHERNET; ++ path->dev = ctx.dev; ++ ++ return ret; ++} ++EXPORT_SYMBOL_GPL(dev_fill_forward_path); ++ + /** + * __dev_get_by_name - find a device by its name + * @net: the applicable net namespace diff --git a/target/linux/generic/backport-5.15/610-v5.13-12-net-8021q-resolve-forwarding-path-for-vlan-devices.patch b/target/linux/generic/backport-5.15/610-v5.13-12-net-8021q-resolve-forwarding-path-for-vlan-devices.patch new file mode 100644 index 0000000000..32c8c9bb78 --- /dev/null +++ b/target/linux/generic/backport-5.15/610-v5.13-12-net-8021q-resolve-forwarding-path-for-vlan-devices.patch @@ -0,0 +1,80 @@ +From: Pablo Neira Ayuso <pablo@netfilter.org> +Date: Wed, 24 Mar 2021 02:30:33 +0100 +Subject: [PATCH] net: 8021q: resolve forwarding path for vlan devices + +Add .ndo_fill_forward_path for vlan devices. + +For instance, assuming the following topology: + + IP forwarding + / \ + eth0.100 eth0 + | + eth0 + . + . + . + ethX + ab:cd:ef:ab:cd:ef + +For packets going through IP forwarding to eth0.100 whose destination +MAC address is ab:cd:ef:ab:cd:ef, dev_fill_forward_path() provides the +following path: + + eth0.100 -> eth0 + +Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org> +--- + +--- a/include/linux/netdevice.h ++++ b/include/linux/netdevice.h +@@ -829,11 +829,18 @@ typedef u16 (*select_queue_fallback_t)(s + + enum net_device_path_type { + DEV_PATH_ETHERNET = 0, ++ DEV_PATH_VLAN, + }; + + struct net_device_path { + enum net_device_path_type type; + const struct net_device *dev; ++ union { ++ struct { ++ u16 id; ++ __be16 proto; ++ } encap; ++ }; + }; + + #define NET_DEVICE_PATH_STACK_MAX 5 +--- a/net/8021q/vlan_dev.c ++++ b/net/8021q/vlan_dev.c +@@ -770,6 +770,20 @@ static int vlan_dev_get_iflink(const str + return real_dev->ifindex; + } + ++static int vlan_dev_fill_forward_path(struct net_device_path_ctx *ctx, ++ struct net_device_path *path) ++{ ++ struct vlan_dev_priv *vlan = vlan_dev_priv(ctx->dev); ++ ++ path->type = DEV_PATH_VLAN; ++ path->encap.id = vlan->vlan_id; ++ path->encap.proto = vlan->vlan_proto; ++ path->dev = ctx->dev; ++ ctx->dev = vlan->real_dev; ++ ++ return 0; ++} ++ + static const struct ethtool_ops vlan_ethtool_ops = { + .get_link_ksettings = vlan_ethtool_get_link_ksettings, + .get_drvinfo = vlan_ethtool_get_drvinfo, +@@ -808,6 +822,7 @@ static const struct net_device_ops vlan_ + #endif + .ndo_fix_features = vlan_dev_fix_features, + .ndo_get_iflink = vlan_dev_get_iflink, ++ .ndo_fill_forward_path = vlan_dev_fill_forward_path, + }; + + static void vlan_dev_free(struct net_device *dev) diff --git a/target/linux/generic/backport-5.15/610-v5.13-13-net-bridge-resolve-forwarding-path-for-bridge-device.patch b/target/linux/generic/backport-5.15/610-v5.13-13-net-bridge-resolve-forwarding-path-for-bridge-device.patch new file mode 100644 index 0000000000..bdb12b13a6 --- /dev/null +++ b/target/linux/generic/backport-5.15/610-v5.13-13-net-bridge-resolve-forwarding-path-for-bridge-device.patch @@ -0,0 +1,62 @@ +From: Pablo Neira Ayuso <pablo@netfilter.org> +Date: Wed, 24 Mar 2021 02:30:34 +0100 +Subject: [PATCH] net: bridge: resolve forwarding path for bridge devices + +Add .ndo_fill_forward_path for bridge devices. + +Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org> +--- + +--- a/include/linux/netdevice.h ++++ b/include/linux/netdevice.h +@@ -830,6 +830,7 @@ typedef u16 (*select_queue_fallback_t)(s + enum net_device_path_type { + DEV_PATH_ETHERNET = 0, + DEV_PATH_VLAN, ++ DEV_PATH_BRIDGE, + }; + + struct net_device_path { +--- a/net/bridge/br_device.c ++++ b/net/bridge/br_device.c +@@ -398,6 +398,32 @@ static int br_del_slave(struct net_devic + return br_del_if(br, slave_dev); + } + ++static int br_fill_forward_path(struct net_device_path_ctx *ctx, ++ struct net_device_path *path) ++{ ++ struct net_bridge_fdb_entry *f; ++ struct net_bridge_port *dst; ++ struct net_bridge *br; ++ ++ if (netif_is_bridge_port(ctx->dev)) ++ return -1; ++ ++ br = netdev_priv(ctx->dev); ++ f = br_fdb_find_rcu(br, ctx->daddr, 0); ++ if (!f || !f->dst) ++ return -1; ++ ++ dst = READ_ONCE(f->dst); ++ if (!dst) ++ return -1; ++ ++ path->type = DEV_PATH_BRIDGE; ++ path->dev = dst->br->dev; ++ ctx->dev = dst->dev; ++ ++ return 0; ++} ++ + static const struct ethtool_ops br_ethtool_ops = { + .get_drvinfo = br_getinfo, + .get_link = ethtool_op_get_link, +@@ -432,6 +458,7 @@ static const struct net_device_ops br_ne + .ndo_bridge_setlink = br_setlink, + .ndo_bridge_dellink = br_dellink, + .ndo_features_check = passthru_features_check, ++ .ndo_fill_forward_path = br_fill_forward_path, + }; + + static struct device_type br_type = { diff --git a/target/linux/generic/backport-5.15/610-v5.13-14-net-bridge-resolve-forwarding-path-for-VLAN-tag-acti.patch b/target/linux/generic/backport-5.15/610-v5.13-14-net-bridge-resolve-forwarding-path-for-VLAN-tag-acti.patch new file mode 100644 index 0000000000..ba2b1a2877 --- /dev/null +++ b/target/linux/generic/backport-5.15/610-v5.13-14-net-bridge-resolve-forwarding-path-for-VLAN-tag-acti.patch @@ -0,0 +1,207 @@ +From: Felix Fietkau <nbd@nbd.name> +Date: Wed, 24 Mar 2021 02:30:35 +0100 +Subject: [PATCH] net: bridge: resolve forwarding path for VLAN tag + actions in bridge devices + +Depending on the VLAN settings of the bridge and the port, the bridge can +either add or remove a tag. When vlan filtering is enabled, the fdb lookup +also needs to know the VLAN tag/proto for the destination address +To provide this, keep track of the stack of VLAN tags for the path in the +lookup context + +Signed-off-by: Felix Fietkau <nbd@nbd.name> +Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org> +--- + +--- a/include/linux/netdevice.h ++++ b/include/linux/netdevice.h +@@ -841,10 +841,20 @@ struct net_device_path { + u16 id; + __be16 proto; + } encap; ++ struct { ++ enum { ++ DEV_PATH_BR_VLAN_KEEP, ++ DEV_PATH_BR_VLAN_TAG, ++ DEV_PATH_BR_VLAN_UNTAG, ++ } vlan_mode; ++ u16 vlan_id; ++ __be16 vlan_proto; ++ } bridge; + }; + }; + + #define NET_DEVICE_PATH_STACK_MAX 5 ++#define NET_DEVICE_PATH_VLAN_MAX 2 + + struct net_device_path_stack { + int num_paths; +@@ -854,6 +864,12 @@ struct net_device_path_stack { + struct net_device_path_ctx { + const struct net_device *dev; + const u8 *daddr; ++ ++ int num_vlans; ++ struct { ++ u16 id; ++ __be16 proto; ++ } vlan[NET_DEVICE_PATH_VLAN_MAX]; + }; + + enum tc_setup_type { +--- a/net/8021q/vlan_dev.c ++++ b/net/8021q/vlan_dev.c +@@ -780,6 +780,12 @@ static int vlan_dev_fill_forward_path(st + path->encap.proto = vlan->vlan_proto; + path->dev = ctx->dev; + ctx->dev = vlan->real_dev; ++ if (ctx->num_vlans >= ARRAY_SIZE(ctx->vlan)) ++ return -ENOSPC; ++ ++ ctx->vlan[ctx->num_vlans].id = vlan->vlan_id; ++ ctx->vlan[ctx->num_vlans].proto = vlan->vlan_proto; ++ ctx->num_vlans++; + + return 0; + } +--- a/net/bridge/br_device.c ++++ b/net/bridge/br_device.c +@@ -409,7 +409,10 @@ static int br_fill_forward_path(struct n + return -1; + + br = netdev_priv(ctx->dev); +- f = br_fdb_find_rcu(br, ctx->daddr, 0); ++ ++ br_vlan_fill_forward_path_pvid(br, ctx, path); ++ ++ f = br_fdb_find_rcu(br, ctx->daddr, path->bridge.vlan_id); + if (!f || !f->dst) + return -1; + +@@ -417,10 +420,28 @@ static int br_fill_forward_path(struct n + if (!dst) + return -1; + ++ if (br_vlan_fill_forward_path_mode(br, dst, path)) ++ return -1; ++ + path->type = DEV_PATH_BRIDGE; + path->dev = dst->br->dev; + ctx->dev = dst->dev; + ++ switch (path->bridge.vlan_mode) { ++ case DEV_PATH_BR_VLAN_TAG: ++ if (ctx->num_vlans >= ARRAY_SIZE(ctx->vlan)) ++ return -ENOSPC; ++ ctx->vlan[ctx->num_vlans].id = path->bridge.vlan_id; ++ ctx->vlan[ctx->num_vlans].proto = path->bridge.vlan_proto; ++ ctx->num_vlans++; ++ break; ++ case DEV_PATH_BR_VLAN_UNTAG: ++ ctx->num_vlans--; ++ break; ++ case DEV_PATH_BR_VLAN_KEEP: ++ break; ++ } ++ + return 0; + } + +--- a/net/bridge/br_private.h ++++ b/net/bridge/br_private.h +@@ -1093,6 +1093,13 @@ void br_vlan_notify(const struct net_bri + bool br_vlan_can_enter_range(const struct net_bridge_vlan *v_curr, + const struct net_bridge_vlan *range_end); + ++void br_vlan_fill_forward_path_pvid(struct net_bridge *br, ++ struct net_device_path_ctx *ctx, ++ struct net_device_path *path); ++int br_vlan_fill_forward_path_mode(struct net_bridge *br, ++ struct net_bridge_port *dst, ++ struct net_device_path *path); ++ + static inline struct net_bridge_vlan_group *br_vlan_group( + const struct net_bridge *br) + { +@@ -1250,6 +1257,19 @@ static inline int nbp_get_num_vlan_infos + { + return 0; + } ++ ++static inline void br_vlan_fill_forward_path_pvid(struct net_bridge *br, ++ struct net_device_path_ctx *ctx, ++ struct net_device_path *path) ++{ ++} ++ ++static inline int br_vlan_fill_forward_path_mode(struct net_bridge *br, ++ struct net_bridge_port *dst, ++ struct net_device_path *path) ++{ ++ return 0; ++} + + static inline struct net_bridge_vlan_group *br_vlan_group( + const struct net_bridge *br) +--- a/net/bridge/br_vlan.c ++++ b/net/bridge/br_vlan.c +@@ -1327,6 +1327,59 @@ int br_vlan_get_pvid_rcu(const struct ne + } + EXPORT_SYMBOL_GPL(br_vlan_get_pvid_rcu); + ++void br_vlan_fill_forward_path_pvid(struct net_bridge *br, ++ struct net_device_path_ctx *ctx, ++ struct net_device_path *path) ++{ ++ struct net_bridge_vlan_group *vg; ++ int idx = ctx->num_vlans - 1; ++ u16 vid; ++ ++ path->bridge.vlan_mode = DEV_PATH_BR_VLAN_KEEP; ++ ++ if (!br_opt_get(br, BROPT_VLAN_ENABLED)) ++ return; ++ ++ vg = br_vlan_group(br); ++ ++ if (idx >= 0 && ++ ctx->vlan[idx].proto == br->vlan_proto) { ++ vid = ctx->vlan[idx].id; ++ } else { ++ path->bridge.vlan_mode = DEV_PATH_BR_VLAN_TAG; ++ vid = br_get_pvid(vg); ++ } ++ ++ path->bridge.vlan_id = vid; ++ path->bridge.vlan_proto = br->vlan_proto; ++} ++ ++int br_vlan_fill_forward_path_mode(struct net_bridge *br, ++ struct net_bridge_port *dst, ++ struct net_device_path *path) ++{ ++ struct net_bridge_vlan_group *vg; ++ struct net_bridge_vlan *v; ++ ++ if (!br_opt_get(br, BROPT_VLAN_ENABLED)) ++ return 0; ++ ++ vg = nbp_vlan_group_rcu(dst); ++ v = br_vlan_find(vg, path->bridge.vlan_id); ++ if (!v || !br_vlan_should_use(v)) ++ return -EINVAL; ++ ++ if (!(v->flags & BRIDGE_VLAN_INFO_UNTAGGED)) ++ return 0; ++ ++ if (path->bridge.vlan_mode == DEV_PATH_BR_VLAN_TAG) ++ path->bridge.vlan_mode = DEV_PATH_BR_VLAN_KEEP; ++ else ++ path->bridge.vlan_mode = DEV_PATH_BR_VLAN_UNTAG; ++ ++ return 0; ++} ++ + int br_vlan_get_info(const struct net_device *dev, u16 vid, + struct bridge_vlan_info *p_vinfo) + { diff --git a/target/linux/generic/backport-5.15/610-v5.13-15-net-ppp-resolve-forwarding-path-for-bridge-pppoe-dev.patch b/target/linux/generic/backport-5.15/610-v5.13-15-net-ppp-resolve-forwarding-path-for-bridge-pppoe-dev.patch new file mode 100644 index 0000000000..712bb9ec97 --- /dev/null +++ b/target/linux/generic/backport-5.15/610-v5.13-15-net-ppp-resolve-forwarding-path-for-bridge-pppoe-dev.patch @@ -0,0 +1,113 @@ +From: Felix Fietkau <nbd@nbd.name> +Date: Wed, 24 Mar 2021 02:30:36 +0100 +Subject: [PATCH] net: ppp: resolve forwarding path for bridge pppoe + devices + +Pass on the PPPoE session ID, destination hardware address and the real +device. + +Signed-off-by: Felix Fietkau <nbd@nbd.name> +Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org> +--- + +--- a/drivers/net/ppp/ppp_generic.c ++++ b/drivers/net/ppp/ppp_generic.c +@@ -1466,12 +1466,34 @@ static void ppp_dev_priv_destructor(stru + ppp_destroy_interface(ppp); + } + ++static int ppp_fill_forward_path(struct net_device_path_ctx *ctx, ++ struct net_device_path *path) ++{ ++ struct ppp *ppp = netdev_priv(ctx->dev); ++ struct ppp_channel *chan; ++ struct channel *pch; ++ ++ if (ppp->flags & SC_MULTILINK) ++ return -EOPNOTSUPP; ++ ++ if (list_empty(&ppp->channels)) ++ return -ENODEV; ++ ++ pch = list_first_entry(&ppp->channels, struct channel, clist); ++ chan = pch->chan; ++ if (!chan->ops->fill_forward_path) ++ return -EOPNOTSUPP; ++ ++ return chan->ops->fill_forward_path(ctx, path, chan); ++} ++ + static const struct net_device_ops ppp_netdev_ops = { + .ndo_init = ppp_dev_init, + .ndo_uninit = ppp_dev_uninit, + .ndo_start_xmit = ppp_start_xmit, + .ndo_do_ioctl = ppp_net_ioctl, + .ndo_get_stats64 = ppp_get_stats64, ++ .ndo_fill_forward_path = ppp_fill_forward_path, + }; + + static struct device_type ppp_type = { +--- a/drivers/net/ppp/pppoe.c ++++ b/drivers/net/ppp/pppoe.c +@@ -972,8 +972,31 @@ static int pppoe_xmit(struct ppp_channel + return __pppoe_xmit(sk, skb); + } + ++static int pppoe_fill_forward_path(struct net_device_path_ctx *ctx, ++ struct net_device_path *path, ++ const struct ppp_channel *chan) ++{ ++ struct sock *sk = (struct sock *)chan->private; ++ struct pppox_sock *po = pppox_sk(sk); ++ struct net_device *dev = po->pppoe_dev; ++ ++ if (sock_flag(sk, SOCK_DEAD) || ++ !(sk->sk_state & PPPOX_CONNECTED) || !dev) ++ return -1; ++ ++ path->type = DEV_PATH_PPPOE; ++ path->encap.proto = htons(ETH_P_PPP_SES); ++ path->encap.id = be16_to_cpu(po->num); ++ memcpy(path->encap.h_dest, po->pppoe_pa.remote, ETH_ALEN); ++ path->dev = ctx->dev; ++ ctx->dev = dev; ++ ++ return 0; ++} ++ + static const struct ppp_channel_ops pppoe_chan_ops = { + .start_xmit = pppoe_xmit, ++ .fill_forward_path = pppoe_fill_forward_path, + }; + + static int pppoe_recvmsg(struct socket *sock, struct msghdr *m, +--- a/include/linux/netdevice.h ++++ b/include/linux/netdevice.h +@@ -831,6 +831,7 @@ enum net_device_path_type { + DEV_PATH_ETHERNET = 0, + DEV_PATH_VLAN, + DEV_PATH_BRIDGE, ++ DEV_PATH_PPPOE, + }; + + struct net_device_path { +@@ -840,6 +841,7 @@ struct net_device_path { + struct { + u16 id; + __be16 proto; ++ u8 h_dest[ETH_ALEN]; + } encap; + struct { + enum { +--- a/include/linux/ppp_channel.h ++++ b/include/linux/ppp_channel.h +@@ -28,6 +28,9 @@ struct ppp_channel_ops { + int (*start_xmit)(struct ppp_channel *, struct sk_buff *); + /* Handle an ioctl call that has come in via /dev/ppp. */ + int (*ioctl)(struct ppp_channel *, unsigned int, unsigned long); ++ int (*fill_forward_path)(struct net_device_path_ctx *, ++ struct net_device_path *, ++ const struct ppp_channel *); + }; + + struct ppp_channel { diff --git a/target/linux/generic/backport-5.15/610-v5.13-16-net-dsa-resolve-forwarding-path-for-dsa-slave-ports.patch b/target/linux/generic/backport-5.15/610-v5.13-16-net-dsa-resolve-forwarding-path-for-dsa-slave-ports.patch new file mode 100644 index 0000000000..1d3021a575 --- /dev/null +++ b/target/linux/generic/backport-5.15/610-v5.13-16-net-dsa-resolve-forwarding-path-for-dsa-slave-ports.patch @@ -0,0 +1,63 @@ +From: Felix Fietkau <nbd@nbd.name> +Date: Wed, 24 Mar 2021 02:30:37 +0100 +Subject: [PATCH] net: dsa: resolve forwarding path for dsa slave ports + +Add .ndo_fill_forward_path for dsa slave port devices + +Signed-off-by: Felix Fietkau <nbd@nbd.name> +Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org> +--- + +--- a/include/linux/netdevice.h ++++ b/include/linux/netdevice.h +@@ -832,6 +832,7 @@ enum net_device_path_type { + DEV_PATH_VLAN, + DEV_PATH_BRIDGE, + DEV_PATH_PPPOE, ++ DEV_PATH_DSA, + }; + + struct net_device_path { +@@ -852,6 +853,10 @@ struct net_device_path { + u16 vlan_id; + __be16 vlan_proto; + } bridge; ++ struct { ++ int port; ++ u16 proto; ++ } dsa; + }; + }; + +--- a/net/dsa/slave.c ++++ b/net/dsa/slave.c +@@ -1619,6 +1619,21 @@ static struct devlink_port *dsa_slave_ge + return dp->ds->devlink ? &dp->devlink_port : NULL; + } + ++static int dsa_slave_fill_forward_path(struct net_device_path_ctx *ctx, ++ struct net_device_path *path) ++{ ++ struct dsa_port *dp = dsa_slave_to_port(ctx->dev); ++ struct dsa_port *cpu_dp = dp->cpu_dp; ++ ++ path->dev = ctx->dev; ++ path->type = DEV_PATH_DSA; ++ path->dsa.proto = cpu_dp->tag_ops->proto; ++ path->dsa.port = dp->index; ++ ctx->dev = cpu_dp->master; ++ ++ return 0; ++} ++ + static const struct net_device_ops dsa_slave_netdev_ops = { + .ndo_open = dsa_slave_open, + .ndo_stop = dsa_slave_close, +@@ -1644,6 +1659,7 @@ static const struct net_device_ops dsa_s + .ndo_vlan_rx_kill_vid = dsa_slave_vlan_rx_kill_vid, + .ndo_get_devlink_port = dsa_slave_get_devlink_port, + .ndo_change_mtu = dsa_slave_change_mtu, ++ .ndo_fill_forward_path = dsa_slave_fill_forward_path, + }; + + static struct device_type dsa_type = { diff --git a/target/linux/generic/backport-5.15/610-v5.13-17-netfilter-flowtable-add-xmit-path-types.patch b/target/linux/generic/backport-5.15/610-v5.13-17-netfilter-flowtable-add-xmit-path-types.patch new file mode 100644 index 0000000000..6052f67faa --- /dev/null +++ b/target/linux/generic/backport-5.15/610-v5.13-17-netfilter-flowtable-add-xmit-path-types.patch @@ -0,0 +1,147 @@ +From: Pablo Neira Ayuso <pablo@netfilter.org> +Date: Wed, 24 Mar 2021 02:30:38 +0100 +Subject: [PATCH] netfilter: flowtable: add xmit path types + +Add the xmit_type field that defines the two supported xmit paths in the +flowtable data plane, which are the neighbour and the xfrm xmit paths. +This patch prepares for new flowtable xmit path types to come. + +Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org> +--- + +--- a/include/net/netfilter/nf_flow_table.h ++++ b/include/net/netfilter/nf_flow_table.h +@@ -89,6 +89,11 @@ enum flow_offload_tuple_dir { + }; + #define FLOW_OFFLOAD_DIR_MAX IP_CT_DIR_MAX + ++enum flow_offload_xmit_type { ++ FLOW_OFFLOAD_XMIT_NEIGH = 0, ++ FLOW_OFFLOAD_XMIT_XFRM, ++}; ++ + struct flow_offload_tuple { + union { + struct in_addr src_v4; +@@ -111,7 +116,8 @@ struct flow_offload_tuple { + /* All members above are keys for lookups, see flow_offload_hash(). */ + struct { } __hash; + +- u8 dir; ++ u8 dir:6, ++ xmit_type:2; + + u16 mtu; + +@@ -157,7 +163,8 @@ static inline __s32 nf_flow_timeout_delt + + struct nf_flow_route { + struct { +- struct dst_entry *dst; ++ struct dst_entry *dst; ++ enum flow_offload_xmit_type xmit_type; + } tuple[FLOW_OFFLOAD_DIR_MAX]; + }; + +--- a/net/netfilter/nf_flow_table_core.c ++++ b/net/netfilter/nf_flow_table_core.c +@@ -95,6 +95,7 @@ static int flow_offload_fill_route(struc + } + + flow_tuple->iifidx = other_dst->dev->ifindex; ++ flow_tuple->xmit_type = route->tuple[dir].xmit_type; + flow_tuple->dst_cache = dst; + + return 0; +--- a/net/netfilter/nf_flow_table_ip.c ++++ b/net/netfilter/nf_flow_table_ip.c +@@ -235,8 +235,6 @@ nf_flow_offload_ip_hook(void *priv, stru + + dir = tuplehash->tuple.dir; + flow = container_of(tuplehash, struct flow_offload, tuplehash[dir]); +- rt = (struct rtable *)flow->tuplehash[dir].tuple.dst_cache; +- outdev = rt->dst.dev; + + if (unlikely(nf_flow_exceeds_mtu(skb, flow->tuplehash[dir].tuple.mtu))) + return NF_ACCEPT; +@@ -265,13 +263,16 @@ nf_flow_offload_ip_hook(void *priv, stru + if (flow_table->flags & NF_FLOWTABLE_COUNTER) + nf_ct_acct_update(flow->ct, tuplehash->tuple.dir, skb->len); + +- if (unlikely(dst_xfrm(&rt->dst))) { ++ rt = (struct rtable *)tuplehash->tuple.dst_cache; ++ ++ if (unlikely(tuplehash->tuple.xmit_type == FLOW_OFFLOAD_XMIT_XFRM)) { + memset(skb->cb, 0, sizeof(struct inet_skb_parm)); + IPCB(skb)->iif = skb->dev->ifindex; + IPCB(skb)->flags = IPSKB_FORWARDED; + return nf_flow_xmit_xfrm(skb, state, &rt->dst); + } + ++ outdev = rt->dst.dev; + skb->dev = outdev; + nexthop = rt_nexthop(rt, flow->tuplehash[!dir].tuple.src_v4.s_addr); + skb_dst_set_noref(skb, &rt->dst); +@@ -456,8 +457,6 @@ nf_flow_offload_ipv6_hook(void *priv, st + + dir = tuplehash->tuple.dir; + flow = container_of(tuplehash, struct flow_offload, tuplehash[dir]); +- rt = (struct rt6_info *)flow->tuplehash[dir].tuple.dst_cache; +- outdev = rt->dst.dev; + + if (unlikely(nf_flow_exceeds_mtu(skb, flow->tuplehash[dir].tuple.mtu))) + return NF_ACCEPT; +@@ -485,13 +484,16 @@ nf_flow_offload_ipv6_hook(void *priv, st + if (flow_table->flags & NF_FLOWTABLE_COUNTER) + nf_ct_acct_update(flow->ct, tuplehash->tuple.dir, skb->len); + +- if (unlikely(dst_xfrm(&rt->dst))) { ++ rt = (struct rt6_info *)tuplehash->tuple.dst_cache; ++ ++ if (unlikely(tuplehash->tuple.xmit_type == FLOW_OFFLOAD_XMIT_XFRM)) { + memset(skb->cb, 0, sizeof(struct inet6_skb_parm)); + IP6CB(skb)->iif = skb->dev->ifindex; + IP6CB(skb)->flags = IP6SKB_FORWARDED; + return nf_flow_xmit_xfrm(skb, state, &rt->dst); + } + ++ outdev = rt->dst.dev; + skb->dev = outdev; + nexthop = rt6_nexthop(rt, &flow->tuplehash[!dir].tuple.src_v6); + skb_dst_set_noref(skb, &rt->dst); +--- a/net/netfilter/nft_flow_offload.c ++++ b/net/netfilter/nft_flow_offload.c +@@ -19,6 +19,22 @@ struct nft_flow_offload { + struct nft_flowtable *flowtable; + }; + ++static enum flow_offload_xmit_type nft_xmit_type(struct dst_entry *dst) ++{ ++ if (dst_xfrm(dst)) ++ return FLOW_OFFLOAD_XMIT_XFRM; ++ ++ return FLOW_OFFLOAD_XMIT_NEIGH; ++} ++ ++static void nft_default_forward_path(struct nf_flow_route *route, ++ struct dst_entry *dst_cache, ++ enum ip_conntrack_dir dir) ++{ ++ route->tuple[dir].dst = dst_cache; ++ route->tuple[dir].xmit_type = nft_xmit_type(dst_cache); ++} ++ + static int nft_flow_route(const struct nft_pktinfo *pkt, + const struct nf_conn *ct, + struct nf_flow_route *route, +@@ -44,8 +60,8 @@ static int nft_flow_route(const struct n + if (!other_dst) + return -ENOENT; + +- route->tuple[dir].dst = this_dst; +- route->tuple[!dir].dst = other_dst; ++ nft_default_forward_path(route, this_dst, dir); ++ nft_default_forward_path(route, other_dst, !dir); + + return 0; + } diff --git a/target/linux/generic/backport-5.15/610-v5.13-18-netfilter-flowtable-use-dev_fill_forward_path-to-obt.patch b/target/linux/generic/backport-5.15/610-v5.13-18-netfilter-flowtable-use-dev_fill_forward_path-to-obt.patch new file mode 100644 index 0000000000..9541ce8867 --- /dev/null +++ b/target/linux/generic/backport-5.15/610-v5.13-18-netfilter-flowtable-use-dev_fill_forward_path-to-obt.patch @@ -0,0 +1,191 @@ +From: Pablo Neira Ayuso <pablo@netfilter.org> +Date: Wed, 24 Mar 2021 02:30:39 +0100 +Subject: [PATCH] netfilter: flowtable: use dev_fill_forward_path() to + obtain ingress device + +Obtain the ingress device in the tuple from the route in the reply +direction. Use dev_fill_forward_path() instead to get the real ingress +device for this flow. + +Fall back to use the ingress device that the IP forwarding route +provides if: + +- dev_fill_forward_path() finds no real ingress device. +- the ingress device that is obtained is not part of the flowtable + devices. +- this route has a xfrm policy. + +Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org> +--- + +--- a/include/net/netfilter/nf_flow_table.h ++++ b/include/net/netfilter/nf_flow_table.h +@@ -164,6 +164,9 @@ static inline __s32 nf_flow_timeout_delt + struct nf_flow_route { + struct { + struct dst_entry *dst; ++ struct { ++ u32 ifindex; ++ } in; + enum flow_offload_xmit_type xmit_type; + } tuple[FLOW_OFFLOAD_DIR_MAX]; + }; +--- a/net/netfilter/nf_flow_table_core.c ++++ b/net/netfilter/nf_flow_table_core.c +@@ -79,7 +79,6 @@ static int flow_offload_fill_route(struc + enum flow_offload_tuple_dir dir) + { + struct flow_offload_tuple *flow_tuple = &flow->tuplehash[dir].tuple; +- struct dst_entry *other_dst = route->tuple[!dir].dst; + struct dst_entry *dst = route->tuple[dir].dst; + + if (!dst_hold_safe(route->tuple[dir].dst)) +@@ -94,7 +93,7 @@ static int flow_offload_fill_route(struc + break; + } + +- flow_tuple->iifidx = other_dst->dev->ifindex; ++ flow_tuple->iifidx = route->tuple[dir].in.ifindex; + flow_tuple->xmit_type = route->tuple[dir].xmit_type; + flow_tuple->dst_cache = dst; + +--- a/net/netfilter/nft_flow_offload.c ++++ b/net/netfilter/nft_flow_offload.c +@@ -31,14 +31,104 @@ static void nft_default_forward_path(str + struct dst_entry *dst_cache, + enum ip_conntrack_dir dir) + { ++ route->tuple[!dir].in.ifindex = dst_cache->dev->ifindex; + route->tuple[dir].dst = dst_cache; + route->tuple[dir].xmit_type = nft_xmit_type(dst_cache); + } + ++static int nft_dev_fill_forward_path(const struct nf_flow_route *route, ++ const struct dst_entry *dst_cache, ++ const struct nf_conn *ct, ++ enum ip_conntrack_dir dir, ++ struct net_device_path_stack *stack) ++{ ++ const void *daddr = &ct->tuplehash[!dir].tuple.src.u3; ++ struct net_device *dev = dst_cache->dev; ++ unsigned char ha[ETH_ALEN]; ++ struct neighbour *n; ++ u8 nud_state; ++ ++ n = dst_neigh_lookup(dst_cache, daddr); ++ if (!n) ++ return -1; ++ ++ read_lock_bh(&n->lock); ++ nud_state = n->nud_state; ++ ether_addr_copy(ha, n->ha); ++ read_unlock_bh(&n->lock); ++ neigh_release(n); ++ ++ if (!(nud_state & NUD_VALID)) ++ return -1; ++ ++ return dev_fill_forward_path(dev, ha, stack); ++} ++ ++struct nft_forward_info { ++ const struct net_device *indev; ++}; ++ ++static void nft_dev_path_info(const struct net_device_path_stack *stack, ++ struct nft_forward_info *info) ++{ ++ const struct net_device_path *path; ++ int i; ++ ++ for (i = 0; i < stack->num_paths; i++) { ++ path = &stack->path[i]; ++ switch (path->type) { ++ case DEV_PATH_ETHERNET: ++ info->indev = path->dev; ++ break; ++ case DEV_PATH_VLAN: ++ case DEV_PATH_BRIDGE: ++ default: ++ info->indev = NULL; ++ break; ++ } ++ } ++} ++ ++static bool nft_flowtable_find_dev(const struct net_device *dev, ++ struct nft_flowtable *ft) ++{ ++ struct nft_hook *hook; ++ bool found = false; ++ ++ list_for_each_entry_rcu(hook, &ft->hook_list, list) { ++ if (hook->ops.dev != dev) ++ continue; ++ ++ found = true; ++ break; ++ } ++ ++ return found; ++} ++ ++static void nft_dev_forward_path(struct nf_flow_route *route, ++ const struct nf_conn *ct, ++ enum ip_conntrack_dir dir, ++ struct nft_flowtable *ft) ++{ ++ const struct dst_entry *dst = route->tuple[dir].dst; ++ struct net_device_path_stack stack; ++ struct nft_forward_info info = {}; ++ ++ if (nft_dev_fill_forward_path(route, dst, ct, dir, &stack) >= 0) ++ nft_dev_path_info(&stack, &info); ++ ++ if (!info.indev || !nft_flowtable_find_dev(info.indev, ft)) ++ return; ++ ++ route->tuple[!dir].in.ifindex = info.indev->ifindex; ++} ++ + static int nft_flow_route(const struct nft_pktinfo *pkt, + const struct nf_conn *ct, + struct nf_flow_route *route, +- enum ip_conntrack_dir dir) ++ enum ip_conntrack_dir dir, ++ struct nft_flowtable *ft) + { + struct dst_entry *this_dst = skb_dst(pkt->skb); + struct dst_entry *other_dst = NULL; +@@ -63,6 +153,12 @@ static int nft_flow_route(const struct n + nft_default_forward_path(route, this_dst, dir); + nft_default_forward_path(route, other_dst, !dir); + ++ if (route->tuple[dir].xmit_type == FLOW_OFFLOAD_XMIT_NEIGH && ++ route->tuple[!dir].xmit_type == FLOW_OFFLOAD_XMIT_NEIGH) { ++ nft_dev_forward_path(route, ct, dir, ft); ++ nft_dev_forward_path(route, ct, !dir, ft); ++ } ++ + return 0; + } + +@@ -90,8 +186,8 @@ static void nft_flow_offload_eval(const + struct nft_flow_offload *priv = nft_expr_priv(expr); + struct nf_flowtable *flowtable = &priv->flowtable->data; + struct tcphdr _tcph, *tcph = NULL; ++ struct nf_flow_route route = {}; + enum ip_conntrack_info ctinfo; +- struct nf_flow_route route; + struct flow_offload *flow; + enum ip_conntrack_dir dir; + struct nf_conn *ct; +@@ -128,7 +224,7 @@ static void nft_flow_offload_eval(const + goto out; + + dir = CTINFO2DIR(ctinfo); +- if (nft_flow_route(pkt, ct, &route, dir) < 0) ++ if (nft_flow_route(pkt, ct, &route, dir, priv->flowtable) < 0) + goto err_flow_route; + + flow = flow_offload_alloc(ct); diff --git a/target/linux/generic/backport-5.15/610-v5.13-19-netfilter-flowtable-use-dev_fill_forward_path-to-obt.patch b/target/linux/generic/backport-5.15/610-v5.13-19-netfilter-flowtable-use-dev_fill_forward_path-to-obt.patch new file mode 100644 index 0000000000..457e218d9b --- /dev/null +++ b/target/linux/generic/backport-5.15/610-v5.13-19-netfilter-flowtable-use-dev_fill_forward_path-to-obt.patch @@ -0,0 +1,374 @@ +From: Pablo Neira Ayuso <pablo@netfilter.org> +Date: Wed, 24 Mar 2021 02:30:40 +0100 +Subject: [PATCH] netfilter: flowtable: use dev_fill_forward_path() to + obtain egress device + +The egress device in the tuple is obtained from route. Use +dev_fill_forward_path() instead to provide the real egress device for +this flow whenever this is available. + +The new FLOW_OFFLOAD_XMIT_DIRECT type uses dev_queue_xmit() to transmit +ethernet frames. Cache the source and destination hardware address to +use dev_queue_xmit() to transfer packets. + +The FLOW_OFFLOAD_XMIT_DIRECT replaces FLOW_OFFLOAD_XMIT_NEIGH if +dev_fill_forward_path() finds a direct transmit path. + +In case of topology updates, if peer is moved to different bridge port, +the connection will time out, reconnect will result in a new entry with +the correct path. Snooping fdb updates would allow for cleaning up stale +flowtable entries. + +Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org> +--- + +--- a/include/net/netfilter/nf_flow_table.h ++++ b/include/net/netfilter/nf_flow_table.h +@@ -92,6 +92,7 @@ enum flow_offload_tuple_dir { + enum flow_offload_xmit_type { + FLOW_OFFLOAD_XMIT_NEIGH = 0, + FLOW_OFFLOAD_XMIT_XFRM, ++ FLOW_OFFLOAD_XMIT_DIRECT, + }; + + struct flow_offload_tuple { +@@ -120,8 +121,14 @@ struct flow_offload_tuple { + xmit_type:2; + + u16 mtu; +- +- struct dst_entry *dst_cache; ++ union { ++ struct dst_entry *dst_cache; ++ struct { ++ u32 ifidx; ++ u8 h_source[ETH_ALEN]; ++ u8 h_dest[ETH_ALEN]; ++ } out; ++ }; + }; + + struct flow_offload_tuple_rhash { +@@ -167,6 +174,11 @@ struct nf_flow_route { + struct { + u32 ifindex; + } in; ++ struct { ++ u32 ifindex; ++ u8 h_source[ETH_ALEN]; ++ u8 h_dest[ETH_ALEN]; ++ } out; + enum flow_offload_xmit_type xmit_type; + } tuple[FLOW_OFFLOAD_DIR_MAX]; + }; +--- a/net/netfilter/nf_flow_table_core.c ++++ b/net/netfilter/nf_flow_table_core.c +@@ -81,9 +81,6 @@ static int flow_offload_fill_route(struc + struct flow_offload_tuple *flow_tuple = &flow->tuplehash[dir].tuple; + struct dst_entry *dst = route->tuple[dir].dst; + +- if (!dst_hold_safe(route->tuple[dir].dst)) +- return -1; +- + switch (flow_tuple->l3proto) { + case NFPROTO_IPV4: + flow_tuple->mtu = ip_dst_mtu_maybe_forward(dst, true); +@@ -94,12 +91,36 @@ static int flow_offload_fill_route(struc + } + + flow_tuple->iifidx = route->tuple[dir].in.ifindex; ++ ++ switch (route->tuple[dir].xmit_type) { ++ case FLOW_OFFLOAD_XMIT_DIRECT: ++ memcpy(flow_tuple->out.h_dest, route->tuple[dir].out.h_dest, ++ ETH_ALEN); ++ memcpy(flow_tuple->out.h_source, route->tuple[dir].out.h_source, ++ ETH_ALEN); ++ flow_tuple->out.ifidx = route->tuple[dir].out.ifindex; ++ break; ++ case FLOW_OFFLOAD_XMIT_XFRM: ++ case FLOW_OFFLOAD_XMIT_NEIGH: ++ if (!dst_hold_safe(route->tuple[dir].dst)) ++ return -1; ++ ++ flow_tuple->dst_cache = dst; ++ break; ++ } + flow_tuple->xmit_type = route->tuple[dir].xmit_type; +- flow_tuple->dst_cache = dst; + + return 0; + } + ++static void nft_flow_dst_release(struct flow_offload *flow, ++ enum flow_offload_tuple_dir dir) ++{ ++ if (flow->tuplehash[dir].tuple.xmit_type == FLOW_OFFLOAD_XMIT_NEIGH || ++ flow->tuplehash[dir].tuple.xmit_type == FLOW_OFFLOAD_XMIT_XFRM) ++ dst_release(flow->tuplehash[dir].tuple.dst_cache); ++} ++ + int flow_offload_route_init(struct flow_offload *flow, + const struct nf_flow_route *route) + { +@@ -118,7 +139,7 @@ int flow_offload_route_init(struct flow_ + return 0; + + err_route_reply: +- dst_release(route->tuple[FLOW_OFFLOAD_DIR_ORIGINAL].dst); ++ nft_flow_dst_release(flow, FLOW_OFFLOAD_DIR_ORIGINAL); + + return err; + } +@@ -169,8 +190,8 @@ static void flow_offload_fixup_ct(struct + + static void flow_offload_route_release(struct flow_offload *flow) + { +- dst_release(flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.dst_cache); +- dst_release(flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.dst_cache); ++ nft_flow_dst_release(flow, FLOW_OFFLOAD_DIR_ORIGINAL); ++ nft_flow_dst_release(flow, FLOW_OFFLOAD_DIR_REPLY); + } + + void flow_offload_free(struct flow_offload *flow) +--- a/net/netfilter/nf_flow_table_ip.c ++++ b/net/netfilter/nf_flow_table_ip.c +@@ -207,6 +207,24 @@ static unsigned int nf_flow_xmit_xfrm(st + return NF_STOLEN; + } + ++static unsigned int nf_flow_queue_xmit(struct net *net, struct sk_buff *skb, ++ const struct flow_offload_tuple_rhash *tuplehash, ++ unsigned short type) ++{ ++ struct net_device *outdev; ++ ++ outdev = dev_get_by_index_rcu(net, tuplehash->tuple.out.ifidx); ++ if (!outdev) ++ return NF_DROP; ++ ++ skb->dev = outdev; ++ dev_hard_header(skb, skb->dev, type, tuplehash->tuple.out.h_dest, ++ tuplehash->tuple.out.h_source, skb->len); ++ dev_queue_xmit(skb); ++ ++ return NF_STOLEN; ++} ++ + unsigned int + nf_flow_offload_ip_hook(void *priv, struct sk_buff *skb, + const struct nf_hook_state *state) +@@ -222,6 +240,7 @@ nf_flow_offload_ip_hook(void *priv, stru + struct iphdr *iph; + __be32 nexthop; + u32 hdrsize; ++ int ret; + + if (skb->protocol != htons(ETH_P_IP)) + return NF_ACCEPT; +@@ -244,9 +263,13 @@ nf_flow_offload_ip_hook(void *priv, stru + if (nf_flow_state_check(flow, iph->protocol, skb, thoff)) + return NF_ACCEPT; + +- if (!dst_check(&rt->dst, 0)) { +- flow_offload_teardown(flow); +- return NF_ACCEPT; ++ if (tuplehash->tuple.xmit_type == FLOW_OFFLOAD_XMIT_NEIGH || ++ tuplehash->tuple.xmit_type == FLOW_OFFLOAD_XMIT_XFRM) { ++ rt = (struct rtable *)tuplehash->tuple.dst_cache; ++ if (!dst_check(&rt->dst, 0)) { ++ flow_offload_teardown(flow); ++ return NF_ACCEPT; ++ } + } + + if (skb_try_make_writable(skb, thoff + hdrsize)) +@@ -263,8 +286,6 @@ nf_flow_offload_ip_hook(void *priv, stru + if (flow_table->flags & NF_FLOWTABLE_COUNTER) + nf_ct_acct_update(flow->ct, tuplehash->tuple.dir, skb->len); + +- rt = (struct rtable *)tuplehash->tuple.dst_cache; +- + if (unlikely(tuplehash->tuple.xmit_type == FLOW_OFFLOAD_XMIT_XFRM)) { + memset(skb->cb, 0, sizeof(struct inet_skb_parm)); + IPCB(skb)->iif = skb->dev->ifindex; +@@ -272,13 +293,23 @@ nf_flow_offload_ip_hook(void *priv, stru + return nf_flow_xmit_xfrm(skb, state, &rt->dst); + } + +- outdev = rt->dst.dev; +- skb->dev = outdev; +- nexthop = rt_nexthop(rt, flow->tuplehash[!dir].tuple.src_v4.s_addr); +- skb_dst_set_noref(skb, &rt->dst); +- neigh_xmit(NEIGH_ARP_TABLE, outdev, &nexthop, skb); ++ switch (tuplehash->tuple.xmit_type) { ++ case FLOW_OFFLOAD_XMIT_NEIGH: ++ outdev = rt->dst.dev; ++ skb->dev = outdev; ++ nexthop = rt_nexthop(rt, flow->tuplehash[!dir].tuple.src_v4.s_addr); ++ skb_dst_set_noref(skb, &rt->dst); ++ neigh_xmit(NEIGH_ARP_TABLE, outdev, &nexthop, skb); ++ ret = NF_STOLEN; ++ break; ++ case FLOW_OFFLOAD_XMIT_DIRECT: ++ ret = nf_flow_queue_xmit(state->net, skb, tuplehash, ETH_P_IP); ++ if (ret == NF_DROP) ++ flow_offload_teardown(flow); ++ break; ++ } + +- return NF_STOLEN; ++ return ret; + } + EXPORT_SYMBOL_GPL(nf_flow_offload_ip_hook); + +@@ -444,6 +475,7 @@ nf_flow_offload_ipv6_hook(void *priv, st + struct ipv6hdr *ip6h; + struct rt6_info *rt; + u32 hdrsize; ++ int ret; + + if (skb->protocol != htons(ETH_P_IPV6)) + return NF_ACCEPT; +@@ -465,9 +497,13 @@ nf_flow_offload_ipv6_hook(void *priv, st + sizeof(*ip6h))) + return NF_ACCEPT; + +- if (!dst_check(&rt->dst, 0)) { +- flow_offload_teardown(flow); +- return NF_ACCEPT; ++ if (tuplehash->tuple.xmit_type == FLOW_OFFLOAD_XMIT_NEIGH || ++ tuplehash->tuple.xmit_type == FLOW_OFFLOAD_XMIT_XFRM) { ++ rt = (struct rt6_info *)tuplehash->tuple.dst_cache; ++ if (!dst_check(&rt->dst, 0)) { ++ flow_offload_teardown(flow); ++ return NF_ACCEPT; ++ } + } + + if (skb_try_make_writable(skb, sizeof(*ip6h) + hdrsize)) +@@ -484,8 +520,6 @@ nf_flow_offload_ipv6_hook(void *priv, st + if (flow_table->flags & NF_FLOWTABLE_COUNTER) + nf_ct_acct_update(flow->ct, tuplehash->tuple.dir, skb->len); + +- rt = (struct rt6_info *)tuplehash->tuple.dst_cache; +- + if (unlikely(tuplehash->tuple.xmit_type == FLOW_OFFLOAD_XMIT_XFRM)) { + memset(skb->cb, 0, sizeof(struct inet6_skb_parm)); + IP6CB(skb)->iif = skb->dev->ifindex; +@@ -493,12 +527,22 @@ nf_flow_offload_ipv6_hook(void *priv, st + return nf_flow_xmit_xfrm(skb, state, &rt->dst); + } + +- outdev = rt->dst.dev; +- skb->dev = outdev; +- nexthop = rt6_nexthop(rt, &flow->tuplehash[!dir].tuple.src_v6); +- skb_dst_set_noref(skb, &rt->dst); +- neigh_xmit(NEIGH_ND_TABLE, outdev, nexthop, skb); ++ switch (tuplehash->tuple.xmit_type) { ++ case FLOW_OFFLOAD_XMIT_NEIGH: ++ outdev = rt->dst.dev; ++ skb->dev = outdev; ++ nexthop = rt6_nexthop(rt, &flow->tuplehash[!dir].tuple.src_v6); ++ skb_dst_set_noref(skb, &rt->dst); ++ neigh_xmit(NEIGH_ND_TABLE, outdev, nexthop, skb); ++ ret = NF_STOLEN; ++ break; ++ case FLOW_OFFLOAD_XMIT_DIRECT: ++ ret = nf_flow_queue_xmit(state->net, skb, tuplehash, ETH_P_IPV6); ++ if (ret == NF_DROP) ++ flow_offload_teardown(flow); ++ break; ++ } + +- return NF_STOLEN; ++ return ret; + } + EXPORT_SYMBOL_GPL(nf_flow_offload_ipv6_hook); +--- a/net/netfilter/nft_flow_offload.c ++++ b/net/netfilter/nft_flow_offload.c +@@ -39,12 +39,11 @@ static void nft_default_forward_path(str + static int nft_dev_fill_forward_path(const struct nf_flow_route *route, + const struct dst_entry *dst_cache, + const struct nf_conn *ct, +- enum ip_conntrack_dir dir, ++ enum ip_conntrack_dir dir, u8 *ha, + struct net_device_path_stack *stack) + { + const void *daddr = &ct->tuplehash[!dir].tuple.src.u3; + struct net_device *dev = dst_cache->dev; +- unsigned char ha[ETH_ALEN]; + struct neighbour *n; + u8 nud_state; + +@@ -66,27 +65,43 @@ static int nft_dev_fill_forward_path(con + + struct nft_forward_info { + const struct net_device *indev; ++ const struct net_device *outdev; ++ u8 h_source[ETH_ALEN]; ++ u8 h_dest[ETH_ALEN]; ++ enum flow_offload_xmit_type xmit_type; + }; + + static void nft_dev_path_info(const struct net_device_path_stack *stack, +- struct nft_forward_info *info) ++ struct nft_forward_info *info, ++ unsigned char *ha) + { + const struct net_device_path *path; + int i; + ++ memcpy(info->h_dest, ha, ETH_ALEN); ++ + for (i = 0; i < stack->num_paths; i++) { + path = &stack->path[i]; + switch (path->type) { + case DEV_PATH_ETHERNET: + info->indev = path->dev; ++ if (is_zero_ether_addr(info->h_source)) ++ memcpy(info->h_source, path->dev->dev_addr, ETH_ALEN); + break; +- case DEV_PATH_VLAN: + case DEV_PATH_BRIDGE: ++ if (is_zero_ether_addr(info->h_source)) ++ memcpy(info->h_source, path->dev->dev_addr, ETH_ALEN); ++ ++ info->xmit_type = FLOW_OFFLOAD_XMIT_DIRECT; ++ break; ++ case DEV_PATH_VLAN: + default: + info->indev = NULL; + break; + } + } ++ if (!info->outdev) ++ info->outdev = info->indev; + } + + static bool nft_flowtable_find_dev(const struct net_device *dev, +@@ -114,14 +129,22 @@ static void nft_dev_forward_path(struct + const struct dst_entry *dst = route->tuple[dir].dst; + struct net_device_path_stack stack; + struct nft_forward_info info = {}; ++ unsigned char ha[ETH_ALEN]; + +- if (nft_dev_fill_forward_path(route, dst, ct, dir, &stack) >= 0) +- nft_dev_path_info(&stack, &info); ++ if (nft_dev_fill_forward_path(route, dst, ct, dir, ha, &stack) >= 0) ++ nft_dev_path_info(&stack, &info, ha); + + if (!info.indev || !nft_flowtable_find_dev(info.indev, ft)) + return; + + route->tuple[!dir].in.ifindex = info.indev->ifindex; ++ ++ if (info.xmit_type == FLOW_OFFLOAD_XMIT_DIRECT) { ++ memcpy(route->tuple[dir].out.h_source, info.h_source, ETH_ALEN); ++ memcpy(route->tuple[dir].out.h_dest, info.h_dest, ETH_ALEN); ++ route->tuple[dir].out.ifindex = info.outdev->ifindex; ++ route->tuple[dir].xmit_type = info.xmit_type; ++ } + } + + static int nft_flow_route(const struct nft_pktinfo *pkt, diff --git a/target/linux/generic/backport-5.15/610-v5.13-20-netfilter-flowtable-add-vlan-support.patch b/target/linux/generic/backport-5.15/610-v5.13-20-netfilter-flowtable-add-vlan-support.patch new file mode 100644 index 0000000000..86a1129880 --- /dev/null +++ b/target/linux/generic/backport-5.15/610-v5.13-20-netfilter-flowtable-add-vlan-support.patch @@ -0,0 +1,410 @@ +From: Pablo Neira Ayuso <pablo@netfilter.org> +Date: Wed, 24 Mar 2021 02:30:41 +0100 +Subject: [PATCH] netfilter: flowtable: add vlan support + +Add the vlan id and protocol to the flow tuple to uniquely identify +flows from the receive path. For the transmit path, dev_hard_header() on +the vlan device push the headers. This patch includes support for two +vlan headers (QinQ) from the ingress path. + +Add a generic encap field to the flowtable entry which stores the +protocol and the tag id. This allows to reuse these fields in the PPPoE +support coming in a later patch. + +Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org> +--- + +--- a/include/net/netfilter/nf_flow_table.h ++++ b/include/net/netfilter/nf_flow_table.h +@@ -95,6 +95,8 @@ enum flow_offload_xmit_type { + FLOW_OFFLOAD_XMIT_DIRECT, + }; + ++#define NF_FLOW_TABLE_ENCAP_MAX 2 ++ + struct flow_offload_tuple { + union { + struct in_addr src_v4; +@@ -113,13 +115,17 @@ struct flow_offload_tuple { + + u8 l3proto; + u8 l4proto; ++ struct { ++ u16 id; ++ __be16 proto; ++ } encap[NF_FLOW_TABLE_ENCAP_MAX]; + + /* All members above are keys for lookups, see flow_offload_hash(). */ + struct { } __hash; + +- u8 dir:6, +- xmit_type:2; +- ++ u8 dir:4, ++ xmit_type:2, ++ encap_num:2; + u16 mtu; + union { + struct dst_entry *dst_cache; +@@ -173,6 +179,11 @@ struct nf_flow_route { + struct dst_entry *dst; + struct { + u32 ifindex; ++ struct { ++ u16 id; ++ __be16 proto; ++ } encap[NF_FLOW_TABLE_ENCAP_MAX]; ++ u8 num_encaps; + } in; + struct { + u32 ifindex; +--- a/net/netfilter/nf_flow_table_core.c ++++ b/net/netfilter/nf_flow_table_core.c +@@ -80,6 +80,7 @@ static int flow_offload_fill_route(struc + { + struct flow_offload_tuple *flow_tuple = &flow->tuplehash[dir].tuple; + struct dst_entry *dst = route->tuple[dir].dst; ++ int i, j = 0; + + switch (flow_tuple->l3proto) { + case NFPROTO_IPV4: +@@ -91,6 +92,12 @@ static int flow_offload_fill_route(struc + } + + flow_tuple->iifidx = route->tuple[dir].in.ifindex; ++ for (i = route->tuple[dir].in.num_encaps - 1; i >= 0; i--) { ++ flow_tuple->encap[j].id = route->tuple[dir].in.encap[i].id; ++ flow_tuple->encap[j].proto = route->tuple[dir].in.encap[i].proto; ++ j++; ++ } ++ flow_tuple->encap_num = route->tuple[dir].in.num_encaps; + + switch (route->tuple[dir].xmit_type) { + case FLOW_OFFLOAD_XMIT_DIRECT: +--- a/net/netfilter/nf_flow_table_ip.c ++++ b/net/netfilter/nf_flow_table_ip.c +@@ -136,23 +136,44 @@ static bool ip_has_options(unsigned int + return thoff != sizeof(struct iphdr); + } + ++static void nf_flow_tuple_encap(struct sk_buff *skb, ++ struct flow_offload_tuple *tuple) ++{ ++ int i = 0; ++ ++ if (skb_vlan_tag_present(skb)) { ++ tuple->encap[i].id = skb_vlan_tag_get(skb); ++ tuple->encap[i].proto = skb->vlan_proto; ++ i++; ++ } ++ if (skb->protocol == htons(ETH_P_8021Q)) { ++ struct vlan_ethhdr *veth = (struct vlan_ethhdr *)skb_mac_header(skb); ++ ++ tuple->encap[i].id = ntohs(veth->h_vlan_TCI); ++ tuple->encap[i].proto = skb->protocol; ++ } ++} ++ + static int nf_flow_tuple_ip(struct sk_buff *skb, const struct net_device *dev, +- struct flow_offload_tuple *tuple, u32 *hdrsize) ++ struct flow_offload_tuple *tuple, u32 *hdrsize, ++ u32 offset) + { + struct flow_ports *ports; + unsigned int thoff; + struct iphdr *iph; + +- if (!pskb_may_pull(skb, sizeof(*iph))) ++ if (!pskb_may_pull(skb, sizeof(*iph) + offset)) + return -1; + +- iph = ip_hdr(skb); +- thoff = iph->ihl * 4; ++ iph = (struct iphdr *)(skb_network_header(skb) + offset); ++ thoff = (iph->ihl * 4); + + if (ip_is_fragment(iph) || + unlikely(ip_has_options(thoff))) + return -1; + ++ thoff += offset; ++ + switch (iph->protocol) { + case IPPROTO_TCP: + *hdrsize = sizeof(struct tcphdr); +@@ -167,11 +188,10 @@ static int nf_flow_tuple_ip(struct sk_bu + if (iph->ttl <= 1) + return -1; + +- thoff = iph->ihl * 4; + if (!pskb_may_pull(skb, thoff + *hdrsize)) + return -1; + +- iph = ip_hdr(skb); ++ iph = (struct iphdr *)(skb_network_header(skb) + offset); + ports = (struct flow_ports *)(skb_network_header(skb) + thoff); + + tuple->src_v4.s_addr = iph->saddr; +@@ -181,6 +201,7 @@ static int nf_flow_tuple_ip(struct sk_bu + tuple->l3proto = AF_INET; + tuple->l4proto = iph->protocol; + tuple->iifidx = dev->ifindex; ++ nf_flow_tuple_encap(skb, tuple); + + return 0; + } +@@ -207,6 +228,43 @@ static unsigned int nf_flow_xmit_xfrm(st + return NF_STOLEN; + } + ++static bool nf_flow_skb_encap_protocol(const struct sk_buff *skb, __be16 proto, ++ u32 *offset) ++{ ++ if (skb->protocol == htons(ETH_P_8021Q)) { ++ struct vlan_ethhdr *veth; ++ ++ veth = (struct vlan_ethhdr *)skb_mac_header(skb); ++ if (veth->h_vlan_encapsulated_proto == proto) { ++ *offset += VLAN_HLEN; ++ return true; ++ } ++ } ++ ++ return false; ++} ++ ++static void nf_flow_encap_pop(struct sk_buff *skb, ++ struct flow_offload_tuple_rhash *tuplehash) ++{ ++ struct vlan_hdr *vlan_hdr; ++ int i; ++ ++ for (i = 0; i < tuplehash->tuple.encap_num; i++) { ++ if (skb_vlan_tag_present(skb)) { ++ __vlan_hwaccel_clear_tag(skb); ++ continue; ++ } ++ if (skb->protocol == htons(ETH_P_8021Q)) { ++ vlan_hdr = (struct vlan_hdr *)skb->data; ++ __skb_pull(skb, VLAN_HLEN); ++ vlan_set_encap_proto(skb, vlan_hdr); ++ skb_reset_network_header(skb); ++ break; ++ } ++ } ++} ++ + static unsigned int nf_flow_queue_xmit(struct net *net, struct sk_buff *skb, + const struct flow_offload_tuple_rhash *tuplehash, + unsigned short type) +@@ -235,17 +293,18 @@ nf_flow_offload_ip_hook(void *priv, stru + enum flow_offload_tuple_dir dir; + struct flow_offload *flow; + struct net_device *outdev; ++ u32 hdrsize, offset = 0; ++ unsigned int thoff, mtu; + struct rtable *rt; +- unsigned int thoff; + struct iphdr *iph; + __be32 nexthop; +- u32 hdrsize; + int ret; + +- if (skb->protocol != htons(ETH_P_IP)) ++ if (skb->protocol != htons(ETH_P_IP) && ++ !nf_flow_skb_encap_protocol(skb, htons(ETH_P_IP), &offset)) + return NF_ACCEPT; + +- if (nf_flow_tuple_ip(skb, state->in, &tuple, &hdrsize) < 0) ++ if (nf_flow_tuple_ip(skb, state->in, &tuple, &hdrsize, offset) < 0) + return NF_ACCEPT; + + tuplehash = flow_offload_lookup(flow_table, &tuple); +@@ -255,11 +314,12 @@ nf_flow_offload_ip_hook(void *priv, stru + dir = tuplehash->tuple.dir; + flow = container_of(tuplehash, struct flow_offload, tuplehash[dir]); + +- if (unlikely(nf_flow_exceeds_mtu(skb, flow->tuplehash[dir].tuple.mtu))) ++ mtu = flow->tuplehash[dir].tuple.mtu + offset; ++ if (unlikely(nf_flow_exceeds_mtu(skb, mtu))) + return NF_ACCEPT; + +- iph = ip_hdr(skb); +- thoff = iph->ihl * 4; ++ iph = (struct iphdr *)(skb_network_header(skb) + offset); ++ thoff = (iph->ihl * 4) + offset; + if (nf_flow_state_check(flow, iph->protocol, skb, thoff)) + return NF_ACCEPT; + +@@ -277,6 +337,9 @@ nf_flow_offload_ip_hook(void *priv, stru + + flow_offload_refresh(flow_table, flow); + ++ nf_flow_encap_pop(skb, tuplehash); ++ thoff -= offset; ++ + iph = ip_hdr(skb); + nf_flow_nat_ip(flow, skb, thoff, dir, iph); + +@@ -418,16 +481,18 @@ static void nf_flow_nat_ipv6(const struc + } + + static int nf_flow_tuple_ipv6(struct sk_buff *skb, const struct net_device *dev, +- struct flow_offload_tuple *tuple, u32 *hdrsize) ++ struct flow_offload_tuple *tuple, u32 *hdrsize, ++ u32 offset) + { + struct flow_ports *ports; + struct ipv6hdr *ip6h; + unsigned int thoff; + +- if (!pskb_may_pull(skb, sizeof(*ip6h))) ++ thoff = sizeof(*ip6h) + offset; ++ if (!pskb_may_pull(skb, thoff)) + return -1; + +- ip6h = ipv6_hdr(skb); ++ ip6h = (struct ipv6hdr *)(skb_network_header(skb) + offset); + + switch (ip6h->nexthdr) { + case IPPROTO_TCP: +@@ -443,11 +508,10 @@ static int nf_flow_tuple_ipv6(struct sk_ + if (ip6h->hop_limit <= 1) + return -1; + +- thoff = sizeof(*ip6h); + if (!pskb_may_pull(skb, thoff + *hdrsize)) + return -1; + +- ip6h = ipv6_hdr(skb); ++ ip6h = (struct ipv6hdr *)(skb_network_header(skb) + offset); + ports = (struct flow_ports *)(skb_network_header(skb) + thoff); + + tuple->src_v6 = ip6h->saddr; +@@ -457,6 +521,7 @@ static int nf_flow_tuple_ipv6(struct sk_ + tuple->l3proto = AF_INET6; + tuple->l4proto = ip6h->nexthdr; + tuple->iifidx = dev->ifindex; ++ nf_flow_tuple_encap(skb, tuple); + + return 0; + } +@@ -472,15 +537,17 @@ nf_flow_offload_ipv6_hook(void *priv, st + const struct in6_addr *nexthop; + struct flow_offload *flow; + struct net_device *outdev; ++ unsigned int thoff, mtu; ++ u32 hdrsize, offset = 0; + struct ipv6hdr *ip6h; + struct rt6_info *rt; +- u32 hdrsize; + int ret; + +- if (skb->protocol != htons(ETH_P_IPV6)) ++ if (skb->protocol != htons(ETH_P_IPV6) && ++ !nf_flow_skb_encap_protocol(skb, htons(ETH_P_IPV6), &offset)) + return NF_ACCEPT; + +- if (nf_flow_tuple_ipv6(skb, state->in, &tuple, &hdrsize) < 0) ++ if (nf_flow_tuple_ipv6(skb, state->in, &tuple, &hdrsize, offset) < 0) + return NF_ACCEPT; + + tuplehash = flow_offload_lookup(flow_table, &tuple); +@@ -490,11 +557,13 @@ nf_flow_offload_ipv6_hook(void *priv, st + dir = tuplehash->tuple.dir; + flow = container_of(tuplehash, struct flow_offload, tuplehash[dir]); + +- if (unlikely(nf_flow_exceeds_mtu(skb, flow->tuplehash[dir].tuple.mtu))) ++ mtu = flow->tuplehash[dir].tuple.mtu + offset; ++ if (unlikely(nf_flow_exceeds_mtu(skb, mtu))) + return NF_ACCEPT; + +- if (nf_flow_state_check(flow, ipv6_hdr(skb)->nexthdr, skb, +- sizeof(*ip6h))) ++ ip6h = (struct ipv6hdr *)(skb_network_header(skb) + offset); ++ thoff = sizeof(*ip6h) + offset; ++ if (nf_flow_state_check(flow, ip6h->nexthdr, skb, thoff)) + return NF_ACCEPT; + + if (tuplehash->tuple.xmit_type == FLOW_OFFLOAD_XMIT_NEIGH || +@@ -506,11 +575,13 @@ nf_flow_offload_ipv6_hook(void *priv, st + } + } + +- if (skb_try_make_writable(skb, sizeof(*ip6h) + hdrsize)) ++ if (skb_try_make_writable(skb, thoff + hdrsize)) + return NF_DROP; + + flow_offload_refresh(flow_table, flow); + ++ nf_flow_encap_pop(skb, tuplehash); ++ + ip6h = ipv6_hdr(skb); + nf_flow_nat_ipv6(flow, skb, dir, ip6h); + +--- a/net/netfilter/nft_flow_offload.c ++++ b/net/netfilter/nft_flow_offload.c +@@ -66,6 +66,11 @@ static int nft_dev_fill_forward_path(con + struct nft_forward_info { + const struct net_device *indev; + const struct net_device *outdev; ++ struct id { ++ __u16 id; ++ __be16 proto; ++ } encap[NF_FLOW_TABLE_ENCAP_MAX]; ++ u8 num_encaps; + u8 h_source[ETH_ALEN]; + u8 h_dest[ETH_ALEN]; + enum flow_offload_xmit_type xmit_type; +@@ -84,9 +89,23 @@ static void nft_dev_path_info(const stru + path = &stack->path[i]; + switch (path->type) { + case DEV_PATH_ETHERNET: ++ case DEV_PATH_VLAN: + info->indev = path->dev; + if (is_zero_ether_addr(info->h_source)) + memcpy(info->h_source, path->dev->dev_addr, ETH_ALEN); ++ ++ if (path->type == DEV_PATH_ETHERNET) ++ break; ++ ++ /* DEV_PATH_VLAN */ ++ if (info->num_encaps >= NF_FLOW_TABLE_ENCAP_MAX) { ++ info->indev = NULL; ++ break; ++ } ++ info->outdev = path->dev; ++ info->encap[info->num_encaps].id = path->encap.id; ++ info->encap[info->num_encaps].proto = path->encap.proto; ++ info->num_encaps++; + break; + case DEV_PATH_BRIDGE: + if (is_zero_ether_addr(info->h_source)) +@@ -94,7 +113,6 @@ static void nft_dev_path_info(const stru + + info->xmit_type = FLOW_OFFLOAD_XMIT_DIRECT; + break; +- case DEV_PATH_VLAN: + default: + info->indev = NULL; + break; +@@ -130,6 +148,7 @@ static void nft_dev_forward_path(struct + struct net_device_path_stack stack; + struct nft_forward_info info = {}; + unsigned char ha[ETH_ALEN]; ++ int i; + + if (nft_dev_fill_forward_path(route, dst, ct, dir, ha, &stack) >= 0) + nft_dev_path_info(&stack, &info, ha); +@@ -138,6 +157,11 @@ static void nft_dev_forward_path(struct + return; + + route->tuple[!dir].in.ifindex = info.indev->ifindex; ++ for (i = 0; i < info.num_encaps; i++) { ++ route->tuple[!dir].in.encap[i].id = info.encap[i].id; ++ route->tuple[!dir].in.encap[i].proto = info.encap[i].proto; ++ } ++ route->tuple[!dir].in.num_encaps = info.num_encaps; + + if (info.xmit_type == FLOW_OFFLOAD_XMIT_DIRECT) { + memcpy(route->tuple[dir].out.h_source, info.h_source, ETH_ALEN); diff --git a/target/linux/generic/backport-5.15/610-v5.13-21-netfilter-flowtable-add-bridge-vlan-filtering-suppor.patch b/target/linux/generic/backport-5.15/610-v5.13-21-netfilter-flowtable-add-bridge-vlan-filtering-suppor.patch new file mode 100644 index 0000000000..cb3ef99029 --- /dev/null +++ b/target/linux/generic/backport-5.15/610-v5.13-21-netfilter-flowtable-add-bridge-vlan-filtering-suppor.patch @@ -0,0 +1,30 @@ +From: Pablo Neira Ayuso <pablo@netfilter.org> +Date: Wed, 24 Mar 2021 02:30:42 +0100 +Subject: [PATCH] netfilter: flowtable: add bridge vlan filtering support + +Add the vlan tag based when PVID is set on. + +Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org> +--- + +--- a/net/netfilter/nft_flow_offload.c ++++ b/net/netfilter/nft_flow_offload.c +@@ -111,6 +111,18 @@ static void nft_dev_path_info(const stru + if (is_zero_ether_addr(info->h_source)) + memcpy(info->h_source, path->dev->dev_addr, ETH_ALEN); + ++ switch (path->bridge.vlan_mode) { ++ case DEV_PATH_BR_VLAN_TAG: ++ info->encap[info->num_encaps].id = path->bridge.vlan_id; ++ info->encap[info->num_encaps].proto = path->bridge.vlan_proto; ++ info->num_encaps++; ++ break; ++ case DEV_PATH_BR_VLAN_UNTAG: ++ info->num_encaps--; ++ break; ++ case DEV_PATH_BR_VLAN_KEEP: ++ break; ++ } + info->xmit_type = FLOW_OFFLOAD_XMIT_DIRECT; + break; + default: diff --git a/target/linux/generic/backport-5.15/610-v5.13-22-netfilter-flowtable-add-pppoe-support.patch b/target/linux/generic/backport-5.15/610-v5.13-22-netfilter-flowtable-add-pppoe-support.patch new file mode 100644 index 0000000000..d5789cbad2 --- /dev/null +++ b/target/linux/generic/backport-5.15/610-v5.13-22-netfilter-flowtable-add-pppoe-support.patch @@ -0,0 +1,145 @@ +From: Pablo Neira Ayuso <pablo@netfilter.org> +Date: Wed, 24 Mar 2021 02:30:43 +0100 +Subject: [PATCH] netfilter: flowtable: add pppoe support + +Add the PPPoE protocol and session id to the flow tuple using the encap +fields to uniquely identify flows from the receive path. For the +transmit path, dev_hard_header() on the vlan device push the headers. + +Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org> +--- + +--- a/net/netfilter/nf_flow_table_ip.c ++++ b/net/netfilter/nf_flow_table_ip.c +@@ -7,6 +7,9 @@ + #include <linux/ip.h> + #include <linux/ipv6.h> + #include <linux/netdevice.h> ++#include <linux/if_ether.h> ++#include <linux/if_pppox.h> ++#include <linux/ppp_defs.h> + #include <net/ip.h> + #include <net/ipv6.h> + #include <net/ip6_route.h> +@@ -139,6 +142,8 @@ static bool ip_has_options(unsigned int + static void nf_flow_tuple_encap(struct sk_buff *skb, + struct flow_offload_tuple *tuple) + { ++ struct vlan_ethhdr *veth; ++ struct pppoe_hdr *phdr; + int i = 0; + + if (skb_vlan_tag_present(skb)) { +@@ -146,11 +151,17 @@ static void nf_flow_tuple_encap(struct s + tuple->encap[i].proto = skb->vlan_proto; + i++; + } +- if (skb->protocol == htons(ETH_P_8021Q)) { +- struct vlan_ethhdr *veth = (struct vlan_ethhdr *)skb_mac_header(skb); +- ++ switch (skb->protocol) { ++ case htons(ETH_P_8021Q): ++ veth = (struct vlan_ethhdr *)skb_mac_header(skb); + tuple->encap[i].id = ntohs(veth->h_vlan_TCI); + tuple->encap[i].proto = skb->protocol; ++ break; ++ case htons(ETH_P_PPP_SES): ++ phdr = (struct pppoe_hdr *)skb_mac_header(skb); ++ tuple->encap[i].id = ntohs(phdr->sid); ++ tuple->encap[i].proto = skb->protocol; ++ break; + } + } + +@@ -228,17 +239,41 @@ static unsigned int nf_flow_xmit_xfrm(st + return NF_STOLEN; + } + ++static inline __be16 nf_flow_pppoe_proto(const struct sk_buff *skb) ++{ ++ __be16 proto; ++ ++ proto = *((__be16 *)(skb_mac_header(skb) + ETH_HLEN + ++ sizeof(struct pppoe_hdr))); ++ switch (proto) { ++ case htons(PPP_IP): ++ return htons(ETH_P_IP); ++ case htons(PPP_IPV6): ++ return htons(ETH_P_IPV6); ++ } ++ ++ return 0; ++} ++ + static bool nf_flow_skb_encap_protocol(const struct sk_buff *skb, __be16 proto, + u32 *offset) + { +- if (skb->protocol == htons(ETH_P_8021Q)) { +- struct vlan_ethhdr *veth; ++ struct vlan_ethhdr *veth; + ++ switch (skb->protocol) { ++ case htons(ETH_P_8021Q): + veth = (struct vlan_ethhdr *)skb_mac_header(skb); + if (veth->h_vlan_encapsulated_proto == proto) { + *offset += VLAN_HLEN; + return true; + } ++ break; ++ case htons(ETH_P_PPP_SES): ++ if (nf_flow_pppoe_proto(skb) == proto) { ++ *offset += PPPOE_SES_HLEN; ++ return true; ++ } ++ break; + } + + return false; +@@ -255,12 +290,18 @@ static void nf_flow_encap_pop(struct sk_ + __vlan_hwaccel_clear_tag(skb); + continue; + } +- if (skb->protocol == htons(ETH_P_8021Q)) { ++ switch (skb->protocol) { ++ case htons(ETH_P_8021Q): + vlan_hdr = (struct vlan_hdr *)skb->data; + __skb_pull(skb, VLAN_HLEN); + vlan_set_encap_proto(skb, vlan_hdr); + skb_reset_network_header(skb); + break; ++ case htons(ETH_P_PPP_SES): ++ skb->protocol = nf_flow_pppoe_proto(skb); ++ skb_pull(skb, PPPOE_SES_HLEN); ++ skb_reset_network_header(skb); ++ break; + } + } + } +--- a/net/netfilter/nft_flow_offload.c ++++ b/net/netfilter/nft_flow_offload.c +@@ -90,6 +90,7 @@ static void nft_dev_path_info(const stru + switch (path->type) { + case DEV_PATH_ETHERNET: + case DEV_PATH_VLAN: ++ case DEV_PATH_PPPOE: + info->indev = path->dev; + if (is_zero_ether_addr(info->h_source)) + memcpy(info->h_source, path->dev->dev_addr, ETH_ALEN); +@@ -97,7 +98,7 @@ static void nft_dev_path_info(const stru + if (path->type == DEV_PATH_ETHERNET) + break; + +- /* DEV_PATH_VLAN */ ++ /* DEV_PATH_VLAN and DEV_PATH_PPPOE */ + if (info->num_encaps >= NF_FLOW_TABLE_ENCAP_MAX) { + info->indev = NULL; + break; +@@ -106,6 +107,8 @@ static void nft_dev_path_info(const stru + info->encap[info->num_encaps].id = path->encap.id; + info->encap[info->num_encaps].proto = path->encap.proto; + info->num_encaps++; ++ if (path->type == DEV_PATH_PPPOE) ++ memcpy(info->h_dest, path->encap.h_dest, ETH_ALEN); + break; + case DEV_PATH_BRIDGE: + if (is_zero_ether_addr(info->h_source)) diff --git a/target/linux/generic/backport-5.15/610-v5.13-23-netfilter-flowtable-add-dsa-support.patch b/target/linux/generic/backport-5.15/610-v5.13-23-netfilter-flowtable-add-dsa-support.patch new file mode 100644 index 0000000000..b4931830d3 --- /dev/null +++ b/target/linux/generic/backport-5.15/610-v5.13-23-netfilter-flowtable-add-dsa-support.patch @@ -0,0 +1,32 @@ +From: Pablo Neira Ayuso <pablo@netfilter.org> +Date: Wed, 24 Mar 2021 02:30:44 +0100 +Subject: [PATCH] netfilter: flowtable: add dsa support + +Replace the master ethernet device by the dsa slave port. Packets coming +in from the software ingress path use the dsa slave port as input +device. + +Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org> +--- + +--- a/net/netfilter/nft_flow_offload.c ++++ b/net/netfilter/nft_flow_offload.c +@@ -89,6 +89,7 @@ static void nft_dev_path_info(const stru + path = &stack->path[i]; + switch (path->type) { + case DEV_PATH_ETHERNET: ++ case DEV_PATH_DSA: + case DEV_PATH_VLAN: + case DEV_PATH_PPPOE: + info->indev = path->dev; +@@ -97,6 +98,10 @@ static void nft_dev_path_info(const stru + + if (path->type == DEV_PATH_ETHERNET) + break; ++ if (path->type == DEV_PATH_DSA) { ++ i = stack->num_paths; ++ break; ++ } + + /* DEV_PATH_VLAN and DEV_PATH_PPPOE */ + if (info->num_encaps >= NF_FLOW_TABLE_ENCAP_MAX) { diff --git a/target/linux/generic/backport-5.15/610-v5.13-24-selftests-netfilter-flowtable-bridge-and-vlan-suppor.patch b/target/linux/generic/backport-5.15/610-v5.13-24-selftests-netfilter-flowtable-bridge-and-vlan-suppor.patch new file mode 100644 index 0000000000..3f332c70d3 --- /dev/null +++ b/target/linux/generic/backport-5.15/610-v5.13-24-selftests-netfilter-flowtable-bridge-and-vlan-suppor.patch @@ -0,0 +1,107 @@ +From: Pablo Neira Ayuso <pablo@netfilter.org> +Date: Wed, 24 Mar 2021 02:30:45 +0100 +Subject: [PATCH] selftests: netfilter: flowtable bridge and vlan support + +This patch adds two new tests to cover bridge and vlan support: + +- Add a bridge device to the Router1 (nsr1) container and attach the + veth0 device to the bridge. Set the IP address to the bridge device + to exercise the bridge forwarding path. + +- Add vlan encapsulation between to the bridge device in the Router1 and + one of the sender containers (ns1). + +Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org> +--- + +--- a/tools/testing/selftests/netfilter/nft_flowtable.sh ++++ b/tools/testing/selftests/netfilter/nft_flowtable.sh +@@ -370,6 +370,88 @@ else + ip netns exec nsr1 nft list ruleset + fi + ++# Another test: ++# Add bridge interface br0 to Router1, with NAT enabled. ++ip -net nsr1 link add name br0 type bridge ++ip -net nsr1 addr flush dev veth0 ++ip -net nsr1 link set up dev veth0 ++ip -net nsr1 link set veth0 master br0 ++ip -net nsr1 addr add 10.0.1.1/24 dev br0 ++ip -net nsr1 addr add dead:1::1/64 dev br0 ++ip -net nsr1 link set up dev br0 ++ ++ip netns exec nsr1 sysctl net.ipv4.conf.br0.forwarding=1 > /dev/null ++ ++# br0 with NAT enabled. ++ip netns exec nsr1 nft -f - <<EOF ++flush table ip nat ++table ip nat { ++ chain prerouting { ++ type nat hook prerouting priority 0; policy accept; ++ meta iif "br0" ip daddr 10.6.6.6 tcp dport 1666 counter dnat ip to 10.0.2.99:12345 ++ } ++ ++ chain postrouting { ++ type nat hook postrouting priority 0; policy accept; ++ meta oifname "veth1" counter masquerade ++ } ++} ++EOF ++ ++if test_tcp_forwarding_nat ns1 ns2; then ++ echo "PASS: flow offloaded for ns1/ns2 with bridge NAT" ++else ++ echo "FAIL: flow offload for ns1/ns2 with bridge NAT" 1>&2 ++ ip netns exec nsr1 nft list ruleset ++ ret=1 ++fi ++ ++# Another test: ++# Add bridge interface br0 to Router1, with NAT and VLAN. ++ip -net nsr1 link set veth0 nomaster ++ip -net nsr1 link set down dev veth0 ++ip -net nsr1 link add link veth0 name veth0.10 type vlan id 10 ++ip -net nsr1 link set up dev veth0 ++ip -net nsr1 link set up dev veth0.10 ++ip -net nsr1 link set veth0.10 master br0 ++ ++ip -net ns1 addr flush dev eth0 ++ip -net ns1 link add link eth0 name eth0.10 type vlan id 10 ++ip -net ns1 link set eth0 up ++ip -net ns1 link set eth0.10 up ++ip -net ns1 addr add 10.0.1.99/24 dev eth0.10 ++ip -net ns1 route add default via 10.0.1.1 ++ip -net ns1 addr add dead:1::99/64 dev eth0.10 ++ ++if test_tcp_forwarding_nat ns1 ns2; then ++ echo "PASS: flow offloaded for ns1/ns2 with bridge NAT and VLAN" ++else ++ echo "FAIL: flow offload for ns1/ns2 with bridge NAT and VLAN" 1>&2 ++ ip netns exec nsr1 nft list ruleset ++ ret=1 ++fi ++ ++# restore test topology (remove bridge and VLAN) ++ip -net nsr1 link set veth0 nomaster ++ip -net nsr1 link set veth0 down ++ip -net nsr1 link set veth0.10 down ++ip -net nsr1 link delete veth0.10 type vlan ++ip -net nsr1 link delete br0 type bridge ++ip -net ns1 addr flush dev eth0.10 ++ip -net ns1 link set eth0.10 down ++ip -net ns1 link set eth0 down ++ip -net ns1 link delete eth0.10 type vlan ++ ++# restore address in ns1 and nsr1 ++ip -net ns1 link set eth0 up ++ip -net ns1 addr add 10.0.1.99/24 dev eth0 ++ip -net ns1 route add default via 10.0.1.1 ++ip -net ns1 addr add dead:1::99/64 dev eth0 ++ip -net ns1 route add default via dead:1::1 ++ip -net nsr1 addr add 10.0.1.1/24 dev veth0 ++ip -net nsr1 addr add dead:1::1/64 dev veth0 ++ip -net nsr1 link set up dev veth0 ++ + KEY_SHA="0x"$(ps -xaf | sha1sum | cut -d " " -f 1) + KEY_AES="0x"$(ps -xaf | md5sum | cut -d " " -f 1) + SPI1=$RANDOM diff --git a/target/linux/generic/backport-5.15/610-v5.13-25-netfilter-flowtable-add-offload-support-for-xmit-pat.patch b/target/linux/generic/backport-5.15/610-v5.13-25-netfilter-flowtable-add-offload-support-for-xmit-pat.patch new file mode 100644 index 0000000000..7b6ec68d55 --- /dev/null +++ b/target/linux/generic/backport-5.15/610-v5.13-25-netfilter-flowtable-add-offload-support-for-xmit-pat.patch @@ -0,0 +1,310 @@ +From: Pablo Neira Ayuso <pablo@netfilter.org> +Date: Wed, 24 Mar 2021 02:30:46 +0100 +Subject: [PATCH] netfilter: flowtable: add offload support for xmit path + types + +When the flow tuple xmit_type is set to FLOW_OFFLOAD_XMIT_DIRECT, the +dst_cache pointer is not valid, and the h_source/h_dest/ifidx out fields +need to be used. + +This patch also adds the FLOW_ACTION_VLAN_PUSH action to pass the VLAN +tag to the driver. + +Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org> +--- + +--- a/net/netfilter/nf_flow_table_offload.c ++++ b/net/netfilter/nf_flow_table_offload.c +@@ -177,28 +177,45 @@ static int flow_offload_eth_src(struct n + enum flow_offload_tuple_dir dir, + struct nf_flow_rule *flow_rule) + { +- const struct flow_offload_tuple *tuple = &flow->tuplehash[!dir].tuple; + struct flow_action_entry *entry0 = flow_action_entry_next(flow_rule); + struct flow_action_entry *entry1 = flow_action_entry_next(flow_rule); +- struct net_device *dev; ++ const struct flow_offload_tuple *other_tuple, *this_tuple; ++ struct net_device *dev = NULL; ++ const unsigned char *addr; + u32 mask, val; + u16 val16; + +- dev = dev_get_by_index(net, tuple->iifidx); +- if (!dev) +- return -ENOENT; ++ this_tuple = &flow->tuplehash[dir].tuple; ++ ++ switch (this_tuple->xmit_type) { ++ case FLOW_OFFLOAD_XMIT_DIRECT: ++ addr = this_tuple->out.h_source; ++ break; ++ case FLOW_OFFLOAD_XMIT_NEIGH: ++ other_tuple = &flow->tuplehash[!dir].tuple; ++ dev = dev_get_by_index(net, other_tuple->iifidx); ++ if (!dev) ++ return -ENOENT; ++ ++ addr = dev->dev_addr; ++ break; ++ default: ++ return -EOPNOTSUPP; ++ } + + mask = ~0xffff0000; +- memcpy(&val16, dev->dev_addr, 2); ++ memcpy(&val16, addr, 2); + val = val16 << 16; + flow_offload_mangle(entry0, FLOW_ACT_MANGLE_HDR_TYPE_ETH, 4, + &val, &mask); + + mask = ~0xffffffff; +- memcpy(&val, dev->dev_addr + 2, 4); ++ memcpy(&val, addr + 2, 4); + flow_offload_mangle(entry1, FLOW_ACT_MANGLE_HDR_TYPE_ETH, 8, + &val, &mask); +- dev_put(dev); ++ ++ if (dev) ++ dev_put(dev); + + return 0; + } +@@ -210,27 +227,40 @@ static int flow_offload_eth_dst(struct n + { + struct flow_action_entry *entry0 = flow_action_entry_next(flow_rule); + struct flow_action_entry *entry1 = flow_action_entry_next(flow_rule); +- const void *daddr = &flow->tuplehash[!dir].tuple.src_v4; ++ const struct flow_offload_tuple *other_tuple, *this_tuple; + const struct dst_entry *dst_cache; + unsigned char ha[ETH_ALEN]; + struct neighbour *n; ++ const void *daddr; + u32 mask, val; + u8 nud_state; + u16 val16; + +- dst_cache = flow->tuplehash[dir].tuple.dst_cache; +- n = dst_neigh_lookup(dst_cache, daddr); +- if (!n) +- return -ENOENT; +- +- read_lock_bh(&n->lock); +- nud_state = n->nud_state; +- ether_addr_copy(ha, n->ha); +- read_unlock_bh(&n->lock); ++ this_tuple = &flow->tuplehash[dir].tuple; + +- if (!(nud_state & NUD_VALID)) { ++ switch (this_tuple->xmit_type) { ++ case FLOW_OFFLOAD_XMIT_DIRECT: ++ ether_addr_copy(ha, this_tuple->out.h_dest); ++ break; ++ case FLOW_OFFLOAD_XMIT_NEIGH: ++ other_tuple = &flow->tuplehash[!dir].tuple; ++ daddr = &other_tuple->src_v4; ++ dst_cache = this_tuple->dst_cache; ++ n = dst_neigh_lookup(dst_cache, daddr); ++ if (!n) ++ return -ENOENT; ++ ++ read_lock_bh(&n->lock); ++ nud_state = n->nud_state; ++ ether_addr_copy(ha, n->ha); ++ read_unlock_bh(&n->lock); + neigh_release(n); +- return -ENOENT; ++ ++ if (!(nud_state & NUD_VALID)) ++ return -ENOENT; ++ break; ++ default: ++ return -EOPNOTSUPP; + } + + mask = ~0xffffffff; +@@ -243,7 +273,6 @@ static int flow_offload_eth_dst(struct n + val = val16; + flow_offload_mangle(entry1, FLOW_ACT_MANGLE_HDR_TYPE_ETH, 4, + &val, &mask); +- neigh_release(n); + + return 0; + } +@@ -465,27 +494,52 @@ static void flow_offload_ipv4_checksum(s + } + } + +-static void flow_offload_redirect(const struct flow_offload *flow, ++static void flow_offload_redirect(struct net *net, ++ const struct flow_offload *flow, + enum flow_offload_tuple_dir dir, + struct nf_flow_rule *flow_rule) + { +- struct flow_action_entry *entry = flow_action_entry_next(flow_rule); +- struct rtable *rt; ++ const struct flow_offload_tuple *this_tuple, *other_tuple; ++ struct flow_action_entry *entry; ++ struct net_device *dev; ++ int ifindex; ++ ++ this_tuple = &flow->tuplehash[dir].tuple; ++ switch (this_tuple->xmit_type) { ++ case FLOW_OFFLOAD_XMIT_DIRECT: ++ this_tuple = &flow->tuplehash[dir].tuple; ++ ifindex = this_tuple->out.ifidx; ++ break; ++ case FLOW_OFFLOAD_XMIT_NEIGH: ++ other_tuple = &flow->tuplehash[!dir].tuple; ++ ifindex = other_tuple->iifidx; ++ break; ++ default: ++ return; ++ } + +- rt = (struct rtable *)flow->tuplehash[dir].tuple.dst_cache; ++ dev = dev_get_by_index(net, ifindex); ++ if (!dev) ++ return; ++ ++ entry = flow_action_entry_next(flow_rule); + entry->id = FLOW_ACTION_REDIRECT; +- entry->dev = rt->dst.dev; +- dev_hold(rt->dst.dev); ++ entry->dev = dev; + } + + static void flow_offload_encap_tunnel(const struct flow_offload *flow, + enum flow_offload_tuple_dir dir, + struct nf_flow_rule *flow_rule) + { ++ const struct flow_offload_tuple *this_tuple; + struct flow_action_entry *entry; + struct dst_entry *dst; + +- dst = flow->tuplehash[dir].tuple.dst_cache; ++ this_tuple = &flow->tuplehash[dir].tuple; ++ if (this_tuple->xmit_type == FLOW_OFFLOAD_XMIT_DIRECT) ++ return; ++ ++ dst = this_tuple->dst_cache; + if (dst && dst->lwtstate) { + struct ip_tunnel_info *tun_info; + +@@ -502,10 +556,15 @@ static void flow_offload_decap_tunnel(co + enum flow_offload_tuple_dir dir, + struct nf_flow_rule *flow_rule) + { ++ const struct flow_offload_tuple *other_tuple; + struct flow_action_entry *entry; + struct dst_entry *dst; + +- dst = flow->tuplehash[!dir].tuple.dst_cache; ++ other_tuple = &flow->tuplehash[!dir].tuple; ++ if (other_tuple->xmit_type == FLOW_OFFLOAD_XMIT_DIRECT) ++ return; ++ ++ dst = other_tuple->dst_cache; + if (dst && dst->lwtstate) { + struct ip_tunnel_info *tun_info; + +@@ -517,10 +576,14 @@ static void flow_offload_decap_tunnel(co + } + } + +-int nf_flow_rule_route_ipv4(struct net *net, const struct flow_offload *flow, +- enum flow_offload_tuple_dir dir, +- struct nf_flow_rule *flow_rule) ++static int ++nf_flow_rule_route_common(struct net *net, const struct flow_offload *flow, ++ enum flow_offload_tuple_dir dir, ++ struct nf_flow_rule *flow_rule) + { ++ const struct flow_offload_tuple *other_tuple; ++ int i; ++ + flow_offload_decap_tunnel(flow, dir, flow_rule); + flow_offload_encap_tunnel(flow, dir, flow_rule); + +@@ -528,6 +591,26 @@ int nf_flow_rule_route_ipv4(struct net * + flow_offload_eth_dst(net, flow, dir, flow_rule) < 0) + return -1; + ++ other_tuple = &flow->tuplehash[!dir].tuple; ++ ++ for (i = 0; i < other_tuple->encap_num; i++) { ++ struct flow_action_entry *entry = flow_action_entry_next(flow_rule); ++ ++ entry->id = FLOW_ACTION_VLAN_PUSH; ++ entry->vlan.vid = other_tuple->encap[i].id; ++ entry->vlan.proto = other_tuple->encap[i].proto; ++ } ++ ++ return 0; ++} ++ ++int nf_flow_rule_route_ipv4(struct net *net, const struct flow_offload *flow, ++ enum flow_offload_tuple_dir dir, ++ struct nf_flow_rule *flow_rule) ++{ ++ if (nf_flow_rule_route_common(net, flow, dir, flow_rule) < 0) ++ return -1; ++ + if (test_bit(NF_FLOW_SNAT, &flow->flags)) { + flow_offload_ipv4_snat(net, flow, dir, flow_rule); + flow_offload_port_snat(net, flow, dir, flow_rule); +@@ -540,7 +623,7 @@ int nf_flow_rule_route_ipv4(struct net * + test_bit(NF_FLOW_DNAT, &flow->flags)) + flow_offload_ipv4_checksum(net, flow, flow_rule); + +- flow_offload_redirect(flow, dir, flow_rule); ++ flow_offload_redirect(net, flow, dir, flow_rule); + + return 0; + } +@@ -550,11 +633,7 @@ int nf_flow_rule_route_ipv6(struct net * + enum flow_offload_tuple_dir dir, + struct nf_flow_rule *flow_rule) + { +- flow_offload_decap_tunnel(flow, dir, flow_rule); +- flow_offload_encap_tunnel(flow, dir, flow_rule); +- +- if (flow_offload_eth_src(net, flow, dir, flow_rule) < 0 || +- flow_offload_eth_dst(net, flow, dir, flow_rule) < 0) ++ if (nf_flow_rule_route_common(net, flow, dir, flow_rule) < 0) + return -1; + + if (test_bit(NF_FLOW_SNAT, &flow->flags)) { +@@ -566,7 +645,7 @@ int nf_flow_rule_route_ipv6(struct net * + flow_offload_port_dnat(net, flow, dir, flow_rule); + } + +- flow_offload_redirect(flow, dir, flow_rule); ++ flow_offload_redirect(net, flow, dir, flow_rule); + + return 0; + } +@@ -580,10 +659,10 @@ nf_flow_offload_rule_alloc(struct net *n + enum flow_offload_tuple_dir dir) + { + const struct nf_flowtable *flowtable = offload->flowtable; ++ const struct flow_offload_tuple *tuple, *other_tuple; + const struct flow_offload *flow = offload->flow; +- const struct flow_offload_tuple *tuple; ++ struct dst_entry *other_dst = NULL; + struct nf_flow_rule *flow_rule; +- struct dst_entry *other_dst; + int err = -ENOMEM; + + flow_rule = kzalloc(sizeof(*flow_rule), GFP_KERNEL); +@@ -599,7 +678,10 @@ nf_flow_offload_rule_alloc(struct net *n + flow_rule->rule->match.key = &flow_rule->match.key; + + tuple = &flow->tuplehash[dir].tuple; +- other_dst = flow->tuplehash[!dir].tuple.dst_cache; ++ other_tuple = &flow->tuplehash[!dir].tuple; ++ if (other_tuple->xmit_type == FLOW_OFFLOAD_XMIT_NEIGH) ++ other_dst = other_tuple->dst_cache; ++ + err = nf_flow_rule_match(&flow_rule->match, tuple, other_dst); + if (err < 0) + goto err_flow_match; diff --git a/target/linux/generic/backport-5.15/610-v5.13-26-netfilter-nft_flow_offload-use-direct-xmit-if-hardwa.patch b/target/linux/generic/backport-5.15/610-v5.13-26-netfilter-nft_flow_offload-use-direct-xmit-if-hardwa.patch new file mode 100644 index 0000000000..56bb9fd56b --- /dev/null +++ b/target/linux/generic/backport-5.15/610-v5.13-26-netfilter-nft_flow_offload-use-direct-xmit-if-hardwa.patch @@ -0,0 +1,114 @@ +From: Pablo Neira Ayuso <pablo@netfilter.org> +Date: Wed, 24 Mar 2021 02:30:47 +0100 +Subject: [PATCH] netfilter: nft_flow_offload: use direct xmit if + hardware offload is enabled + +If there is a forward path to reach an ethernet device and hardware +offload is enabled, then use the direct xmit path. + +Moreover, store the real device in the direct xmit path info since +software datapath uses dev_hard_header() to push the layer encapsulation +headers while hardware offload refers to the real device. + +Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org> +--- + +--- a/include/net/netfilter/nf_flow_table.h ++++ b/include/net/netfilter/nf_flow_table.h +@@ -131,6 +131,7 @@ struct flow_offload_tuple { + struct dst_entry *dst_cache; + struct { + u32 ifidx; ++ u32 hw_ifidx; + u8 h_source[ETH_ALEN]; + u8 h_dest[ETH_ALEN]; + } out; +@@ -187,6 +188,7 @@ struct nf_flow_route { + } in; + struct { + u32 ifindex; ++ u32 hw_ifindex; + u8 h_source[ETH_ALEN]; + u8 h_dest[ETH_ALEN]; + } out; +--- a/net/netfilter/nf_flow_table_core.c ++++ b/net/netfilter/nf_flow_table_core.c +@@ -106,6 +106,7 @@ static int flow_offload_fill_route(struc + memcpy(flow_tuple->out.h_source, route->tuple[dir].out.h_source, + ETH_ALEN); + flow_tuple->out.ifidx = route->tuple[dir].out.ifindex; ++ flow_tuple->out.hw_ifidx = route->tuple[dir].out.hw_ifindex; + break; + case FLOW_OFFLOAD_XMIT_XFRM: + case FLOW_OFFLOAD_XMIT_NEIGH: +--- a/net/netfilter/nf_flow_table_offload.c ++++ b/net/netfilter/nf_flow_table_offload.c +@@ -508,7 +508,7 @@ static void flow_offload_redirect(struct + switch (this_tuple->xmit_type) { + case FLOW_OFFLOAD_XMIT_DIRECT: + this_tuple = &flow->tuplehash[dir].tuple; +- ifindex = this_tuple->out.ifidx; ++ ifindex = this_tuple->out.hw_ifidx; + break; + case FLOW_OFFLOAD_XMIT_NEIGH: + other_tuple = &flow->tuplehash[!dir].tuple; +--- a/net/netfilter/nft_flow_offload.c ++++ b/net/netfilter/nft_flow_offload.c +@@ -66,6 +66,7 @@ static int nft_dev_fill_forward_path(con + struct nft_forward_info { + const struct net_device *indev; + const struct net_device *outdev; ++ const struct net_device *hw_outdev; + struct id { + __u16 id; + __be16 proto; +@@ -76,9 +77,18 @@ struct nft_forward_info { + enum flow_offload_xmit_type xmit_type; + }; + ++static bool nft_is_valid_ether_device(const struct net_device *dev) ++{ ++ if (!dev || (dev->flags & IFF_LOOPBACK) || dev->type != ARPHRD_ETHER || ++ dev->addr_len != ETH_ALEN || !is_valid_ether_addr(dev->dev_addr)) ++ return false; ++ ++ return true; ++} ++ + static void nft_dev_path_info(const struct net_device_path_stack *stack, + struct nft_forward_info *info, +- unsigned char *ha) ++ unsigned char *ha, struct nf_flowtable *flowtable) + { + const struct net_device_path *path; + int i; +@@ -140,6 +150,12 @@ static void nft_dev_path_info(const stru + } + if (!info->outdev) + info->outdev = info->indev; ++ ++ info->hw_outdev = info->indev; ++ ++ if (nf_flowtable_hw_offload(flowtable) && ++ nft_is_valid_ether_device(info->indev)) ++ info->xmit_type = FLOW_OFFLOAD_XMIT_DIRECT; + } + + static bool nft_flowtable_find_dev(const struct net_device *dev, +@@ -171,7 +187,7 @@ static void nft_dev_forward_path(struct + int i; + + if (nft_dev_fill_forward_path(route, dst, ct, dir, ha, &stack) >= 0) +- nft_dev_path_info(&stack, &info, ha); ++ nft_dev_path_info(&stack, &info, ha, &ft->data); + + if (!info.indev || !nft_flowtable_find_dev(info.indev, ft)) + return; +@@ -187,6 +203,7 @@ static void nft_dev_forward_path(struct + memcpy(route->tuple[dir].out.h_source, info.h_source, ETH_ALEN); + memcpy(route->tuple[dir].out.h_dest, info.h_dest, ETH_ALEN); + route->tuple[dir].out.ifindex = info.outdev->ifindex; ++ route->tuple[dir].out.hw_ifindex = info.hw_outdev->ifindex; + route->tuple[dir].xmit_type = info.xmit_type; + } + } diff --git a/target/linux/generic/backport-5.15/610-v5.13-27-netfilter-flowtable-bridge-vlan-hardware-offload-and.patch b/target/linux/generic/backport-5.15/610-v5.13-27-netfilter-flowtable-bridge-vlan-hardware-offload-and.patch new file mode 100644 index 0000000000..7fa58ba844 --- /dev/null +++ b/target/linux/generic/backport-5.15/610-v5.13-27-netfilter-flowtable-bridge-vlan-hardware-offload-and.patch @@ -0,0 +1,123 @@ +From: Felix Fietkau <nbd@nbd.name> +Date: Wed, 24 Mar 2021 02:30:48 +0100 +Subject: [PATCH] netfilter: flowtable: bridge vlan hardware offload and + switchdev + +The switch might have already added the VLAN tag through PVID hardware +offload. Keep this extra VLAN in the flowtable but skip it on egress. + +Signed-off-by: Felix Fietkau <nbd@nbd.name> +Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org> +--- + +--- a/include/linux/netdevice.h ++++ b/include/linux/netdevice.h +@@ -849,6 +849,7 @@ struct net_device_path { + DEV_PATH_BR_VLAN_KEEP, + DEV_PATH_BR_VLAN_TAG, + DEV_PATH_BR_VLAN_UNTAG, ++ DEV_PATH_BR_VLAN_UNTAG_HW, + } vlan_mode; + u16 vlan_id; + __be16 vlan_proto; +--- a/include/net/netfilter/nf_flow_table.h ++++ b/include/net/netfilter/nf_flow_table.h +@@ -123,9 +123,10 @@ struct flow_offload_tuple { + /* All members above are keys for lookups, see flow_offload_hash(). */ + struct { } __hash; + +- u8 dir:4, ++ u8 dir:2, + xmit_type:2, +- encap_num:2; ++ encap_num:2, ++ in_vlan_ingress:2; + u16 mtu; + union { + struct dst_entry *dst_cache; +@@ -184,7 +185,8 @@ struct nf_flow_route { + u16 id; + __be16 proto; + } encap[NF_FLOW_TABLE_ENCAP_MAX]; +- u8 num_encaps; ++ u8 num_encaps:2, ++ ingress_vlans:2; + } in; + struct { + u32 ifindex; +--- a/net/bridge/br_device.c ++++ b/net/bridge/br_device.c +@@ -435,6 +435,7 @@ static int br_fill_forward_path(struct n + ctx->vlan[ctx->num_vlans].proto = path->bridge.vlan_proto; + ctx->num_vlans++; + break; ++ case DEV_PATH_BR_VLAN_UNTAG_HW: + case DEV_PATH_BR_VLAN_UNTAG: + ctx->num_vlans--; + break; +--- a/net/bridge/br_vlan.c ++++ b/net/bridge/br_vlan.c +@@ -1374,6 +1374,8 @@ int br_vlan_fill_forward_path_mode(struc + + if (path->bridge.vlan_mode == DEV_PATH_BR_VLAN_TAG) + path->bridge.vlan_mode = DEV_PATH_BR_VLAN_KEEP; ++ else if (v->priv_flags & BR_VLFLAG_ADDED_BY_SWITCHDEV) ++ path->bridge.vlan_mode = DEV_PATH_BR_VLAN_UNTAG_HW; + else + path->bridge.vlan_mode = DEV_PATH_BR_VLAN_UNTAG; + +--- a/net/netfilter/nf_flow_table_core.c ++++ b/net/netfilter/nf_flow_table_core.c +@@ -95,6 +95,8 @@ static int flow_offload_fill_route(struc + for (i = route->tuple[dir].in.num_encaps - 1; i >= 0; i--) { + flow_tuple->encap[j].id = route->tuple[dir].in.encap[i].id; + flow_tuple->encap[j].proto = route->tuple[dir].in.encap[i].proto; ++ if (route->tuple[dir].in.ingress_vlans & BIT(i)) ++ flow_tuple->in_vlan_ingress |= BIT(j); + j++; + } + flow_tuple->encap_num = route->tuple[dir].in.num_encaps; +--- a/net/netfilter/nf_flow_table_offload.c ++++ b/net/netfilter/nf_flow_table_offload.c +@@ -594,8 +594,12 @@ nf_flow_rule_route_common(struct net *ne + other_tuple = &flow->tuplehash[!dir].tuple; + + for (i = 0; i < other_tuple->encap_num; i++) { +- struct flow_action_entry *entry = flow_action_entry_next(flow_rule); ++ struct flow_action_entry *entry; + ++ if (other_tuple->in_vlan_ingress & BIT(i)) ++ continue; ++ ++ entry = flow_action_entry_next(flow_rule); + entry->id = FLOW_ACTION_VLAN_PUSH; + entry->vlan.vid = other_tuple->encap[i].id; + entry->vlan.proto = other_tuple->encap[i].proto; +--- a/net/netfilter/nft_flow_offload.c ++++ b/net/netfilter/nft_flow_offload.c +@@ -72,6 +72,7 @@ struct nft_forward_info { + __be16 proto; + } encap[NF_FLOW_TABLE_ENCAP_MAX]; + u8 num_encaps; ++ u8 ingress_vlans; + u8 h_source[ETH_ALEN]; + u8 h_dest[ETH_ALEN]; + enum flow_offload_xmit_type xmit_type; +@@ -130,6 +131,9 @@ static void nft_dev_path_info(const stru + memcpy(info->h_source, path->dev->dev_addr, ETH_ALEN); + + switch (path->bridge.vlan_mode) { ++ case DEV_PATH_BR_VLAN_UNTAG_HW: ++ info->ingress_vlans |= BIT(info->num_encaps - 1); ++ break; + case DEV_PATH_BR_VLAN_TAG: + info->encap[info->num_encaps].id = path->bridge.vlan_id; + info->encap[info->num_encaps].proto = path->bridge.vlan_proto; +@@ -198,6 +202,7 @@ static void nft_dev_forward_path(struct + route->tuple[!dir].in.encap[i].proto = info.encap[i].proto; + } + route->tuple[!dir].in.num_encaps = info.num_encaps; ++ route->tuple[!dir].in.ingress_vlans = info.ingress_vlans; + + if (info.xmit_type == FLOW_OFFLOAD_XMIT_DIRECT) { + memcpy(route->tuple[dir].out.h_source, info.h_source, ETH_ALEN); diff --git a/target/linux/generic/backport-5.15/610-v5.13-28-net-flow_offload-add-FLOW_ACTION_PPPOE_PUSH.patch b/target/linux/generic/backport-5.15/610-v5.13-28-net-flow_offload-add-FLOW_ACTION_PPPOE_PUSH.patch new file mode 100644 index 0000000000..64eae6871f --- /dev/null +++ b/target/linux/generic/backport-5.15/610-v5.13-28-net-flow_offload-add-FLOW_ACTION_PPPOE_PUSH.patch @@ -0,0 +1,30 @@ +From: Pablo Neira Ayuso <pablo@netfilter.org> +Date: Wed, 24 Mar 2021 02:30:49 +0100 +Subject: [PATCH] net: flow_offload: add FLOW_ACTION_PPPOE_PUSH + +Add an action to represent the PPPoE hardware offload support that +includes the session ID. + +Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org> +--- + +--- a/include/net/flow_offload.h ++++ b/include/net/flow_offload.h +@@ -147,6 +147,7 @@ enum flow_action_id { + FLOW_ACTION_MPLS_POP, + FLOW_ACTION_MPLS_MANGLE, + FLOW_ACTION_GATE, ++ FLOW_ACTION_PPPOE_PUSH, + NUM_FLOW_ACTIONS, + }; + +@@ -271,6 +272,9 @@ struct flow_action_entry { + u32 num_entries; + struct action_gate_entry *entries; + } gate; ++ struct { /* FLOW_ACTION_PPPOE_PUSH */ ++ u16 sid; ++ } pppoe; + }; + struct flow_action_cookie *cookie; /* user defined action cookie */ + }; diff --git a/target/linux/generic/backport-5.15/610-v5.13-29-netfilter-flowtable-support-for-FLOW_ACTION_PPPOE_PU.patch b/target/linux/generic/backport-5.15/610-v5.13-29-netfilter-flowtable-support-for-FLOW_ACTION_PPPOE_PU.patch new file mode 100644 index 0000000000..ed7346a61a --- /dev/null +++ b/target/linux/generic/backport-5.15/610-v5.13-29-netfilter-flowtable-support-for-FLOW_ACTION_PPPOE_PU.patch @@ -0,0 +1,35 @@ +From: Pablo Neira Ayuso <pablo@netfilter.org> +Date: Wed, 24 Mar 2021 02:30:50 +0100 +Subject: [PATCH] netfilter: flowtable: support for + FLOW_ACTION_PPPOE_PUSH + +Add a PPPoE push action if layer 2 protocol is ETH_P_PPP_SES to add +PPPoE flowtable hardware offload support. + +Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org> +--- + +--- a/net/netfilter/nf_flow_table_offload.c ++++ b/net/netfilter/nf_flow_table_offload.c +@@ -600,9 +600,18 @@ nf_flow_rule_route_common(struct net *ne + continue; + + entry = flow_action_entry_next(flow_rule); +- entry->id = FLOW_ACTION_VLAN_PUSH; +- entry->vlan.vid = other_tuple->encap[i].id; +- entry->vlan.proto = other_tuple->encap[i].proto; ++ ++ switch (other_tuple->encap[i].proto) { ++ case htons(ETH_P_PPP_SES): ++ entry->id = FLOW_ACTION_PPPOE_PUSH; ++ entry->pppoe.sid = other_tuple->encap[i].id; ++ break; ++ case htons(ETH_P_8021Q): ++ entry->id = FLOW_ACTION_VLAN_PUSH; ++ entry->vlan.vid = other_tuple->encap[i].id; ++ entry->vlan.proto = other_tuple->encap[i].proto; ++ break; ++ } + } + + return 0; diff --git a/target/linux/generic/backport-5.15/610-v5.13-30-dsa-slave-add-support-for-TC_SETUP_FT.patch b/target/linux/generic/backport-5.15/610-v5.13-30-dsa-slave-add-support-for-TC_SETUP_FT.patch new file mode 100644 index 0000000000..72675dc294 --- /dev/null +++ b/target/linux/generic/backport-5.15/610-v5.13-30-dsa-slave-add-support-for-TC_SETUP_FT.patch @@ -0,0 +1,53 @@ +From: Pablo Neira Ayuso <pablo@netfilter.org> +Date: Wed, 24 Mar 2021 02:30:51 +0100 +Subject: [PATCH] dsa: slave: add support for TC_SETUP_FT + +The dsa infrastructure provides a well-defined hierarchy of devices, +pass up the call to set up the flow block to the master device. From the +software dataplane, the netfilter infrastructure uses the dsa slave +devices to refer to the input and output device for the given skbuff. +Similarly, the flowtable definition in the ruleset refers to the dsa +slave port devices. + +This patch adds the glue code to call ndo_setup_tc with TC_SETUP_FT +with the master device via the dsa slave devices. + +Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org> +--- + +--- a/net/dsa/slave.c ++++ b/net/dsa/slave.c +@@ -1239,14 +1239,32 @@ static int dsa_slave_setup_tc_block(stru + } + } + ++static int dsa_slave_setup_ft_block(struct dsa_switch *ds, int port, ++ void *type_data) ++{ ++ struct dsa_port *cpu_dp = dsa_to_port(ds, port)->cpu_dp; ++ struct net_device *master = cpu_dp->master; ++ ++ if (!master->netdev_ops->ndo_setup_tc) ++ return -EOPNOTSUPP; ++ ++ return master->netdev_ops->ndo_setup_tc(master, TC_SETUP_FT, type_data); ++} ++ + static int dsa_slave_setup_tc(struct net_device *dev, enum tc_setup_type type, + void *type_data) + { + struct dsa_port *dp = dsa_slave_to_port(dev); + struct dsa_switch *ds = dp->ds; + +- if (type == TC_SETUP_BLOCK) ++ switch (type) { ++ case TC_SETUP_BLOCK: + return dsa_slave_setup_tc_block(dev, type_data); ++ case TC_SETUP_FT: ++ return dsa_slave_setup_ft_block(ds, dp->index, type_data); ++ default: ++ break; ++ } + + if (!ds->ops->port_setup_tc) + return -EOPNOTSUPP; diff --git a/target/linux/generic/backport-5.15/610-v5.13-31-net-ethernet-mtk_eth_soc-fix-parsing-packets-in-GDM.patch b/target/linux/generic/backport-5.15/610-v5.13-31-net-ethernet-mtk_eth_soc-fix-parsing-packets-in-GDM.patch new file mode 100644 index 0000000000..b763984732 --- /dev/null +++ b/target/linux/generic/backport-5.15/610-v5.13-31-net-ethernet-mtk_eth_soc-fix-parsing-packets-in-GDM.patch @@ -0,0 +1,68 @@ +From: Felix Fietkau <nbd@nbd.name> +Date: Wed, 24 Mar 2021 02:30:52 +0100 +Subject: [PATCH] net: ethernet: mtk_eth_soc: fix parsing packets in GDM + +When using DSA, set the special tag in GDM ingress control to allow the MAC +to parse packets properly earlier. This affects rx DMA source port reporting. + +Signed-off-by: Felix Fietkau <nbd@nbd.name> +Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org> +--- + +--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c ++++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c +@@ -19,6 +19,7 @@ + #include <linux/interrupt.h> + #include <linux/pinctrl/devinfo.h> + #include <linux/phylink.h> ++#include <net/dsa.h> + + #include "mtk_eth_soc.h" + +@@ -1285,13 +1286,12 @@ static int mtk_poll_rx(struct napi_struc + break; + + /* find out which mac the packet come from. values start at 1 */ +- if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) { ++ if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628) || ++ (trxd.rxd4 & RX_DMA_SPECIAL_TAG)) + mac = 0; +- } else { +- mac = (trxd.rxd4 >> RX_DMA_FPORT_SHIFT) & +- RX_DMA_FPORT_MASK; +- mac--; +- } ++ else ++ mac = ((trxd.rxd4 >> RX_DMA_FPORT_SHIFT) & ++ RX_DMA_FPORT_MASK) - 1; + + if (unlikely(mac < 0 || mac >= MTK_MAC_COUNT || + !eth->netdev[mac])) +@@ -2254,6 +2254,9 @@ static void mtk_gdm_config(struct mtk_et + + val |= config; + ++ if (!i && eth->netdev[0] && netdev_uses_dsa(eth->netdev[0])) ++ val |= MTK_GDMA_SPECIAL_TAG; ++ + mtk_w32(eth, val, MTK_GDMA_FWD_CFG(i)); + } + /* Reset and enable PSE */ +--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h ++++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h +@@ -81,6 +81,7 @@ + + /* GDM Exgress Control Register */ + #define MTK_GDMA_FWD_CFG(x) (0x500 + (x * 0x1000)) ++#define MTK_GDMA_SPECIAL_TAG BIT(24) + #define MTK_GDMA_ICS_EN BIT(22) + #define MTK_GDMA_TCS_EN BIT(21) + #define MTK_GDMA_UCS_EN BIT(20) +@@ -318,6 +319,7 @@ + #define RX_DMA_L4_VALID_PDMA BIT(30) /* when PDMA is used */ + #define RX_DMA_FPORT_SHIFT 19 + #define RX_DMA_FPORT_MASK 0x7 ++#define RX_DMA_SPECIAL_TAG BIT(22) + + /* PHY Indirect Access Control registers */ + #define MTK_PHY_IAC 0x10004 diff --git a/target/linux/generic/backport-5.15/610-v5.13-32-net-ethernet-mtk_eth_soc-add-support-for-initializin.patch b/target/linux/generic/backport-5.15/610-v5.13-32-net-ethernet-mtk_eth_soc-add-support-for-initializin.patch new file mode 100644 index 0000000000..4fd6f8a36b --- /dev/null +++ b/target/linux/generic/backport-5.15/610-v5.13-32-net-ethernet-mtk_eth_soc-add-support-for-initializin.patch @@ -0,0 +1,1308 @@ +From: Felix Fietkau <nbd@nbd.name> +Date: Wed, 24 Mar 2021 02:30:53 +0100 +Subject: [PATCH] net: ethernet: mtk_eth_soc: add support for + initializing the PPE + +The PPE (packet processing engine) is used to offload NAT/routed or even +bridged flows. This patch brings up the PPE and uses it to get a packet +hash. It also contains some functionality that will be used to bring up +flow offloading. + +Signed-off-by: Felix Fietkau <nbd@nbd.name> +Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org> +--- + create mode 100644 drivers/net/ethernet/mediatek/mtk_ppe.c + create mode 100644 drivers/net/ethernet/mediatek/mtk_ppe.h + create mode 100644 drivers/net/ethernet/mediatek/mtk_ppe_debugfs.c + create mode 100644 drivers/net/ethernet/mediatek/mtk_ppe_regs.h + +--- a/drivers/net/ethernet/mediatek/Makefile ++++ b/drivers/net/ethernet/mediatek/Makefile +@@ -4,5 +4,5 @@ + # + + obj-$(CONFIG_NET_MEDIATEK_SOC) += mtk_eth.o +-mtk_eth-y := mtk_eth_soc.o mtk_sgmii.o mtk_eth_path.o ++mtk_eth-y := mtk_eth_soc.o mtk_sgmii.o mtk_eth_path.o mtk_ppe.o mtk_ppe_debugfs.o + obj-$(CONFIG_NET_MEDIATEK_STAR_EMAC) += mtk_star_emac.o +--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c ++++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c +@@ -2279,12 +2279,17 @@ static int mtk_open(struct net_device *d + + /* we run 2 netdevs on the same dma ring so we only bring it up once */ + if (!refcount_read(ð->dma_refcnt)) { +- int err = mtk_start_dma(eth); ++ u32 gdm_config = MTK_GDMA_TO_PDMA; ++ int err; + ++ err = mtk_start_dma(eth); + if (err) + return err; + +- mtk_gdm_config(eth, MTK_GDMA_TO_PDMA); ++ if (eth->soc->offload_version && mtk_ppe_start(ð->ppe) == 0) ++ gdm_config = MTK_GDMA_TO_PPE; ++ ++ mtk_gdm_config(eth, gdm_config); + + napi_enable(ð->tx_napi); + napi_enable(ð->rx_napi); +@@ -2351,6 +2356,9 @@ static int mtk_stop(struct net_device *d + + mtk_dma_free(eth); + ++ if (eth->soc->offload_version) ++ mtk_ppe_stop(ð->ppe); ++ + return 0; + } + +@@ -3079,6 +3087,13 @@ static int mtk_probe(struct platform_dev + goto err_free_dev; + } + ++ if (eth->soc->offload_version) { ++ err = mtk_ppe_init(ð->ppe, eth->dev, ++ eth->base + MTK_ETH_PPE_BASE, 2); ++ if (err) ++ goto err_free_dev; ++ } ++ + for (i = 0; i < MTK_MAX_DEVS; i++) { + if (!eth->netdev[i]) + continue; +@@ -3153,6 +3168,7 @@ static const struct mtk_soc_data mt7621_ + .hw_features = MTK_HW_FEATURES, + .required_clks = MT7621_CLKS_BITMAP, + .required_pctl = false, ++ .offload_version = 2, + }; + + static const struct mtk_soc_data mt7622_data = { +@@ -3161,6 +3177,7 @@ static const struct mtk_soc_data mt7622_ + .hw_features = MTK_HW_FEATURES, + .required_clks = MT7622_CLKS_BITMAP, + .required_pctl = false, ++ .offload_version = 2, + }; + + static const struct mtk_soc_data mt7623_data = { +--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h ++++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h +@@ -15,6 +15,7 @@ + #include <linux/u64_stats_sync.h> + #include <linux/refcount.h> + #include <linux/phylink.h> ++#include "mtk_ppe.h" + + #define MTK_QDMA_PAGE_SIZE 2048 + #define MTK_MAX_RX_LENGTH 1536 +@@ -86,6 +87,7 @@ + #define MTK_GDMA_TCS_EN BIT(21) + #define MTK_GDMA_UCS_EN BIT(20) + #define MTK_GDMA_TO_PDMA 0x0 ++#define MTK_GDMA_TO_PPE 0x4444 + #define MTK_GDMA_DROP_ALL 0x7777 + + /* Unicast Filter MAC Address Register - Low */ +@@ -315,6 +317,12 @@ + #define RX_DMA_VID(_x) ((_x) & 0xfff) + + /* QDMA descriptor rxd4 */ ++#define MTK_RXD4_FOE_ENTRY GENMASK(13, 0) ++#define MTK_RXD4_PPE_CPU_REASON GENMASK(18, 14) ++#define MTK_RXD4_SRC_PORT GENMASK(21, 19) ++#define MTK_RXD4_ALG GENMASK(31, 22) ++ ++/* QDMA descriptor rxd4 */ + #define RX_DMA_L4_VALID BIT(24) + #define RX_DMA_L4_VALID_PDMA BIT(30) /* when PDMA is used */ + #define RX_DMA_FPORT_SHIFT 19 +@@ -819,6 +827,7 @@ struct mtk_soc_data { + u32 caps; + u32 required_clks; + bool required_pctl; ++ u8 offload_version; + netdev_features_t hw_features; + }; + +@@ -918,6 +927,8 @@ struct mtk_eth { + u32 tx_int_status_reg; + u32 rx_dma_l4_valid; + int ip_align; ++ ++ struct mtk_ppe ppe; + }; + + /* struct mtk_mac - the structure that holds the info about the MACs of the +--- /dev/null ++++ b/drivers/net/ethernet/mediatek/mtk_ppe.c +@@ -0,0 +1,511 @@ ++// SPDX-License-Identifier: GPL-2.0-only ++/* Copyright (C) 2020 Felix Fietkau <nbd@nbd.name> */ ++ ++#include <linux/kernel.h> ++#include <linux/jiffies.h> ++#include <linux/delay.h> ++#include <linux/io.h> ++#include <linux/etherdevice.h> ++#include <linux/platform_device.h> ++#include "mtk_ppe.h" ++#include "mtk_ppe_regs.h" ++ ++static void ppe_w32(struct mtk_ppe *ppe, u32 reg, u32 val) ++{ ++ writel(val, ppe->base + reg); ++} ++ ++static u32 ppe_r32(struct mtk_ppe *ppe, u32 reg) ++{ ++ return readl(ppe->base + reg); ++} ++ ++static u32 ppe_m32(struct mtk_ppe *ppe, u32 reg, u32 mask, u32 set) ++{ ++ u32 val; ++ ++ val = ppe_r32(ppe, reg); ++ val &= ~mask; ++ val |= set; ++ ppe_w32(ppe, reg, val); ++ ++ return val; ++} ++ ++static u32 ppe_set(struct mtk_ppe *ppe, u32 reg, u32 val) ++{ ++ return ppe_m32(ppe, reg, 0, val); ++} ++ ++static u32 ppe_clear(struct mtk_ppe *ppe, u32 reg, u32 val) ++{ ++ return ppe_m32(ppe, reg, val, 0); ++} ++ ++static int mtk_ppe_wait_busy(struct mtk_ppe *ppe) ++{ ++ unsigned long timeout = jiffies + HZ; ++ ++ while (time_is_before_jiffies(timeout)) { ++ if (!(ppe_r32(ppe, MTK_PPE_GLO_CFG) & MTK_PPE_GLO_CFG_BUSY)) ++ return 0; ++ ++ usleep_range(10, 20); ++ } ++ ++ dev_err(ppe->dev, "PPE table busy"); ++ ++ return -ETIMEDOUT; ++} ++ ++static void mtk_ppe_cache_clear(struct mtk_ppe *ppe) ++{ ++ ppe_set(ppe, MTK_PPE_CACHE_CTL, MTK_PPE_CACHE_CTL_CLEAR); ++ ppe_clear(ppe, MTK_PPE_CACHE_CTL, MTK_PPE_CACHE_CTL_CLEAR); ++} ++ ++static void mtk_ppe_cache_enable(struct mtk_ppe *ppe, bool enable) ++{ ++ mtk_ppe_cache_clear(ppe); ++ ++ ppe_m32(ppe, MTK_PPE_CACHE_CTL, MTK_PPE_CACHE_CTL_EN, ++ enable * MTK_PPE_CACHE_CTL_EN); ++} ++ ++static u32 mtk_ppe_hash_entry(struct mtk_foe_entry *e) ++{ ++ u32 hv1, hv2, hv3; ++ u32 hash; ++ ++ switch (FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, e->ib1)) { ++ case MTK_PPE_PKT_TYPE_BRIDGE: ++ hv1 = e->bridge.src_mac_lo; ++ hv1 ^= ((e->bridge.src_mac_hi & 0xffff) << 16); ++ hv2 = e->bridge.src_mac_hi >> 16; ++ hv2 ^= e->bridge.dest_mac_lo; ++ hv3 = e->bridge.dest_mac_hi; ++ break; ++ case MTK_PPE_PKT_TYPE_IPV4_ROUTE: ++ case MTK_PPE_PKT_TYPE_IPV4_HNAPT: ++ hv1 = e->ipv4.orig.ports; ++ hv2 = e->ipv4.orig.dest_ip; ++ hv3 = e->ipv4.orig.src_ip; ++ break; ++ case MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T: ++ case MTK_PPE_PKT_TYPE_IPV6_ROUTE_5T: ++ hv1 = e->ipv6.src_ip[3] ^ e->ipv6.dest_ip[3]; ++ hv1 ^= e->ipv6.ports; ++ ++ hv2 = e->ipv6.src_ip[2] ^ e->ipv6.dest_ip[2]; ++ hv2 ^= e->ipv6.dest_ip[0]; ++ ++ hv3 = e->ipv6.src_ip[1] ^ e->ipv6.dest_ip[1]; ++ hv3 ^= e->ipv6.src_ip[0]; ++ break; ++ case MTK_PPE_PKT_TYPE_IPV4_DSLITE: ++ case MTK_PPE_PKT_TYPE_IPV6_6RD: ++ default: ++ WARN_ON_ONCE(1); ++ return MTK_PPE_HASH_MASK; ++ } ++ ++ hash = (hv1 & hv2) | ((~hv1) & hv3); ++ hash = (hash >> 24) | ((hash & 0xffffff) << 8); ++ hash ^= hv1 ^ hv2 ^ hv3; ++ hash ^= hash >> 16; ++ hash <<= 1; ++ hash &= MTK_PPE_ENTRIES - 1; ++ ++ return hash; ++} ++ ++static inline struct mtk_foe_mac_info * ++mtk_foe_entry_l2(struct mtk_foe_entry *entry) ++{ ++ int type = FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, entry->ib1); ++ ++ if (type >= MTK_PPE_PKT_TYPE_IPV4_DSLITE) ++ return &entry->ipv6.l2; ++ ++ return &entry->ipv4.l2; ++} ++ ++static inline u32 * ++mtk_foe_entry_ib2(struct mtk_foe_entry *entry) ++{ ++ int type = FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, entry->ib1); ++ ++ if (type >= MTK_PPE_PKT_TYPE_IPV4_DSLITE) ++ return &entry->ipv6.ib2; ++ ++ return &entry->ipv4.ib2; ++} ++ ++int mtk_foe_entry_prepare(struct mtk_foe_entry *entry, int type, int l4proto, ++ u8 pse_port, u8 *src_mac, u8 *dest_mac) ++{ ++ struct mtk_foe_mac_info *l2; ++ u32 ports_pad, val; ++ ++ memset(entry, 0, sizeof(*entry)); ++ ++ val = FIELD_PREP(MTK_FOE_IB1_STATE, MTK_FOE_STATE_BIND) | ++ FIELD_PREP(MTK_FOE_IB1_PACKET_TYPE, type) | ++ FIELD_PREP(MTK_FOE_IB1_UDP, l4proto == IPPROTO_UDP) | ++ MTK_FOE_IB1_BIND_TTL | ++ MTK_FOE_IB1_BIND_CACHE; ++ entry->ib1 = val; ++ ++ val = FIELD_PREP(MTK_FOE_IB2_PORT_MG, 0x3f) | ++ FIELD_PREP(MTK_FOE_IB2_PORT_AG, 0x1f) | ++ FIELD_PREP(MTK_FOE_IB2_DEST_PORT, pse_port); ++ ++ if (is_multicast_ether_addr(dest_mac)) ++ val |= MTK_FOE_IB2_MULTICAST; ++ ++ ports_pad = 0xa5a5a500 | (l4proto & 0xff); ++ if (type == MTK_PPE_PKT_TYPE_IPV4_ROUTE) ++ entry->ipv4.orig.ports = ports_pad; ++ if (type == MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T) ++ entry->ipv6.ports = ports_pad; ++ ++ if (type >= MTK_PPE_PKT_TYPE_IPV4_DSLITE) { ++ entry->ipv6.ib2 = val; ++ l2 = &entry->ipv6.l2; ++ } else { ++ entry->ipv4.ib2 = val; ++ l2 = &entry->ipv4.l2; ++ } ++ ++ l2->dest_mac_hi = get_unaligned_be32(dest_mac); ++ l2->dest_mac_lo = get_unaligned_be16(dest_mac + 4); ++ l2->src_mac_hi = get_unaligned_be32(src_mac); ++ l2->src_mac_lo = get_unaligned_be16(src_mac + 4); ++ ++ if (type >= MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T) ++ l2->etype = ETH_P_IPV6; ++ else ++ l2->etype = ETH_P_IP; ++ ++ return 0; ++} ++ ++int mtk_foe_entry_set_pse_port(struct mtk_foe_entry *entry, u8 port) ++{ ++ u32 *ib2 = mtk_foe_entry_ib2(entry); ++ u32 val; ++ ++ val = *ib2; ++ val &= ~MTK_FOE_IB2_DEST_PORT; ++ val |= FIELD_PREP(MTK_FOE_IB2_DEST_PORT, port); ++ *ib2 = val; ++ ++ return 0; ++} ++ ++int mtk_foe_entry_set_ipv4_tuple(struct mtk_foe_entry *entry, bool egress, ++ __be32 src_addr, __be16 src_port, ++ __be32 dest_addr, __be16 dest_port) ++{ ++ int type = FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, entry->ib1); ++ struct mtk_ipv4_tuple *t; ++ ++ switch (type) { ++ case MTK_PPE_PKT_TYPE_IPV4_HNAPT: ++ if (egress) { ++ t = &entry->ipv4.new; ++ break; ++ } ++ fallthrough; ++ case MTK_PPE_PKT_TYPE_IPV4_DSLITE: ++ case MTK_PPE_PKT_TYPE_IPV4_ROUTE: ++ t = &entry->ipv4.orig; ++ break; ++ case MTK_PPE_PKT_TYPE_IPV6_6RD: ++ entry->ipv6_6rd.tunnel_src_ip = be32_to_cpu(src_addr); ++ entry->ipv6_6rd.tunnel_dest_ip = be32_to_cpu(dest_addr); ++ return 0; ++ default: ++ WARN_ON_ONCE(1); ++ return -EINVAL; ++ } ++ ++ t->src_ip = be32_to_cpu(src_addr); ++ t->dest_ip = be32_to_cpu(dest_addr); ++ ++ if (type == MTK_PPE_PKT_TYPE_IPV4_ROUTE) ++ return 0; ++ ++ t->src_port = be16_to_cpu(src_port); ++ t->dest_port = be16_to_cpu(dest_port); ++ ++ return 0; ++} ++ ++int mtk_foe_entry_set_ipv6_tuple(struct mtk_foe_entry *entry, ++ __be32 *src_addr, __be16 src_port, ++ __be32 *dest_addr, __be16 dest_port) ++{ ++ int type = FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, entry->ib1); ++ u32 *src, *dest; ++ int i; ++ ++ switch (type) { ++ case MTK_PPE_PKT_TYPE_IPV4_DSLITE: ++ src = entry->dslite.tunnel_src_ip; ++ dest = entry->dslite.tunnel_dest_ip; ++ break; ++ case MTK_PPE_PKT_TYPE_IPV6_ROUTE_5T: ++ case MTK_PPE_PKT_TYPE_IPV6_6RD: ++ entry->ipv6.src_port = be16_to_cpu(src_port); ++ entry->ipv6.dest_port = be16_to_cpu(dest_port); ++ fallthrough; ++ case MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T: ++ src = entry->ipv6.src_ip; ++ dest = entry->ipv6.dest_ip; ++ break; ++ default: ++ WARN_ON_ONCE(1); ++ return -EINVAL; ++ }; ++ ++ for (i = 0; i < 4; i++) ++ src[i] = be32_to_cpu(src_addr[i]); ++ for (i = 0; i < 4; i++) ++ dest[i] = be32_to_cpu(dest_addr[i]); ++ ++ return 0; ++} ++ ++int mtk_foe_entry_set_dsa(struct mtk_foe_entry *entry, int port) ++{ ++ struct mtk_foe_mac_info *l2 = mtk_foe_entry_l2(entry); ++ ++ l2->etype = BIT(port); ++ ++ if (!(entry->ib1 & MTK_FOE_IB1_BIND_VLAN_LAYER)) ++ entry->ib1 |= FIELD_PREP(MTK_FOE_IB1_BIND_VLAN_LAYER, 1); ++ else ++ l2->etype |= BIT(8); ++ ++ entry->ib1 &= ~MTK_FOE_IB1_BIND_VLAN_TAG; ++ ++ return 0; ++} ++ ++int mtk_foe_entry_set_vlan(struct mtk_foe_entry *entry, int vid) ++{ ++ struct mtk_foe_mac_info *l2 = mtk_foe_entry_l2(entry); ++ ++ switch (FIELD_GET(MTK_FOE_IB1_BIND_VLAN_LAYER, entry->ib1)) { ++ case 0: ++ entry->ib1 |= MTK_FOE_IB1_BIND_VLAN_TAG | ++ FIELD_PREP(MTK_FOE_IB1_BIND_VLAN_LAYER, 1); ++ l2->vlan1 = vid; ++ return 0; ++ case 1: ++ if (!(entry->ib1 & MTK_FOE_IB1_BIND_VLAN_TAG)) { ++ l2->vlan1 = vid; ++ l2->etype |= BIT(8); ++ } else { ++ l2->vlan2 = vid; ++ entry->ib1 += FIELD_PREP(MTK_FOE_IB1_BIND_VLAN_LAYER, 1); ++ } ++ return 0; ++ default: ++ return -ENOSPC; ++ } ++} ++ ++int mtk_foe_entry_set_pppoe(struct mtk_foe_entry *entry, int sid) ++{ ++ struct mtk_foe_mac_info *l2 = mtk_foe_entry_l2(entry); ++ ++ if (!(entry->ib1 & MTK_FOE_IB1_BIND_VLAN_LAYER) || ++ (entry->ib1 & MTK_FOE_IB1_BIND_VLAN_TAG)) ++ l2->etype = ETH_P_PPP_SES; ++ ++ entry->ib1 |= MTK_FOE_IB1_BIND_PPPOE; ++ l2->pppoe_id = sid; ++ ++ return 0; ++} ++ ++static inline bool mtk_foe_entry_usable(struct mtk_foe_entry *entry) ++{ ++ return !(entry->ib1 & MTK_FOE_IB1_STATIC) && ++ FIELD_GET(MTK_FOE_IB1_STATE, entry->ib1) != MTK_FOE_STATE_BIND; ++} ++ ++int mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_foe_entry *entry, ++ u16 timestamp) ++{ ++ struct mtk_foe_entry *hwe; ++ u32 hash; ++ ++ timestamp &= MTK_FOE_IB1_BIND_TIMESTAMP; ++ entry->ib1 &= ~MTK_FOE_IB1_BIND_TIMESTAMP; ++ entry->ib1 |= FIELD_PREP(MTK_FOE_IB1_BIND_TIMESTAMP, timestamp); ++ ++ hash = mtk_ppe_hash_entry(entry); ++ hwe = &ppe->foe_table[hash]; ++ if (!mtk_foe_entry_usable(hwe)) { ++ hwe++; ++ hash++; ++ ++ if (!mtk_foe_entry_usable(hwe)) ++ return -ENOSPC; ++ } ++ ++ memcpy(&hwe->data, &entry->data, sizeof(hwe->data)); ++ wmb(); ++ hwe->ib1 = entry->ib1; ++ ++ dma_wmb(); ++ ++ mtk_ppe_cache_clear(ppe); ++ ++ return hash; ++} ++ ++int mtk_ppe_init(struct mtk_ppe *ppe, struct device *dev, void __iomem *base, ++ int version) ++{ ++ struct mtk_foe_entry *foe; ++ ++ /* need to allocate a separate device, since it PPE DMA access is ++ * not coherent. ++ */ ++ ppe->base = base; ++ ppe->dev = dev; ++ ppe->version = version; ++ ++ foe = dmam_alloc_coherent(ppe->dev, MTK_PPE_ENTRIES * sizeof(*foe), ++ &ppe->foe_phys, GFP_KERNEL); ++ if (!foe) ++ return -ENOMEM; ++ ++ ppe->foe_table = foe; ++ ++ mtk_ppe_debugfs_init(ppe); ++ ++ return 0; ++} ++ ++static void mtk_ppe_init_foe_table(struct mtk_ppe *ppe) ++{ ++ static const u8 skip[] = { 12, 25, 38, 51, 76, 89, 102 }; ++ int i, k; ++ ++ memset(ppe->foe_table, 0, MTK_PPE_ENTRIES * sizeof(ppe->foe_table)); ++ ++ if (!IS_ENABLED(CONFIG_SOC_MT7621)) ++ return; ++ ++ /* skip all entries that cross the 1024 byte boundary */ ++ for (i = 0; i < MTK_PPE_ENTRIES; i += 128) ++ for (k = 0; k < ARRAY_SIZE(skip); k++) ++ ppe->foe_table[i + skip[k]].ib1 |= MTK_FOE_IB1_STATIC; ++} ++ ++int mtk_ppe_start(struct mtk_ppe *ppe) ++{ ++ u32 val; ++ ++ mtk_ppe_init_foe_table(ppe); ++ ppe_w32(ppe, MTK_PPE_TB_BASE, ppe->foe_phys); ++ ++ val = MTK_PPE_TB_CFG_ENTRY_80B | ++ MTK_PPE_TB_CFG_AGE_NON_L4 | ++ MTK_PPE_TB_CFG_AGE_UNBIND | ++ MTK_PPE_TB_CFG_AGE_TCP | ++ MTK_PPE_TB_CFG_AGE_UDP | ++ MTK_PPE_TB_CFG_AGE_TCP_FIN | ++ FIELD_PREP(MTK_PPE_TB_CFG_SEARCH_MISS, ++ MTK_PPE_SEARCH_MISS_ACTION_FORWARD_BUILD) | ++ FIELD_PREP(MTK_PPE_TB_CFG_KEEPALIVE, ++ MTK_PPE_KEEPALIVE_DISABLE) | ++ FIELD_PREP(MTK_PPE_TB_CFG_HASH_MODE, 1) | ++ FIELD_PREP(MTK_PPE_TB_CFG_SCAN_MODE, ++ MTK_PPE_SCAN_MODE_KEEPALIVE_AGE) | ++ FIELD_PREP(MTK_PPE_TB_CFG_ENTRY_NUM, ++ MTK_PPE_ENTRIES_SHIFT); ++ ppe_w32(ppe, MTK_PPE_TB_CFG, val); ++ ++ ppe_w32(ppe, MTK_PPE_IP_PROTO_CHK, ++ MTK_PPE_IP_PROTO_CHK_IPV4 | MTK_PPE_IP_PROTO_CHK_IPV6); ++ ++ mtk_ppe_cache_enable(ppe, true); ++ ++ val = MTK_PPE_FLOW_CFG_IP4_TCP_FRAG | ++ MTK_PPE_FLOW_CFG_IP4_UDP_FRAG | ++ MTK_PPE_FLOW_CFG_IP6_3T_ROUTE | ++ MTK_PPE_FLOW_CFG_IP6_5T_ROUTE | ++ MTK_PPE_FLOW_CFG_IP6_6RD | ++ MTK_PPE_FLOW_CFG_IP4_NAT | ++ MTK_PPE_FLOW_CFG_IP4_NAPT | ++ MTK_PPE_FLOW_CFG_IP4_DSLITE | ++ MTK_PPE_FLOW_CFG_L2_BRIDGE | ++ MTK_PPE_FLOW_CFG_IP4_NAT_FRAG; ++ ppe_w32(ppe, MTK_PPE_FLOW_CFG, val); ++ ++ val = FIELD_PREP(MTK_PPE_UNBIND_AGE_MIN_PACKETS, 1000) | ++ FIELD_PREP(MTK_PPE_UNBIND_AGE_DELTA, 3); ++ ppe_w32(ppe, MTK_PPE_UNBIND_AGE, val); ++ ++ val = FIELD_PREP(MTK_PPE_BIND_AGE0_DELTA_UDP, 12) | ++ FIELD_PREP(MTK_PPE_BIND_AGE0_DELTA_NON_L4, 1); ++ ppe_w32(ppe, MTK_PPE_BIND_AGE0, val); ++ ++ val = FIELD_PREP(MTK_PPE_BIND_AGE1_DELTA_TCP_FIN, 1) | ++ FIELD_PREP(MTK_PPE_BIND_AGE1_DELTA_TCP, 7); ++ ppe_w32(ppe, MTK_PPE_BIND_AGE1, val); ++ ++ val = MTK_PPE_BIND_LIMIT0_QUARTER | MTK_PPE_BIND_LIMIT0_HALF; ++ ppe_w32(ppe, MTK_PPE_BIND_LIMIT0, val); ++ ++ val = MTK_PPE_BIND_LIMIT1_FULL | ++ FIELD_PREP(MTK_PPE_BIND_LIMIT1_NON_L4, 1); ++ ppe_w32(ppe, MTK_PPE_BIND_LIMIT1, val); ++ ++ val = FIELD_PREP(MTK_PPE_BIND_RATE_BIND, 30) | ++ FIELD_PREP(MTK_PPE_BIND_RATE_PREBIND, 1); ++ ppe_w32(ppe, MTK_PPE_BIND_RATE, val); ++ ++ /* enable PPE */ ++ val = MTK_PPE_GLO_CFG_EN | ++ MTK_PPE_GLO_CFG_IP4_L4_CS_DROP | ++ MTK_PPE_GLO_CFG_IP4_CS_DROP | ++ MTK_PPE_GLO_CFG_FLOW_DROP_UPDATE; ++ ppe_w32(ppe, MTK_PPE_GLO_CFG, val); ++ ++ ppe_w32(ppe, MTK_PPE_DEFAULT_CPU_PORT, 0); ++ ++ return 0; ++} ++ ++int mtk_ppe_stop(struct mtk_ppe *ppe) ++{ ++ u32 val; ++ int i; ++ ++ for (i = 0; i < MTK_PPE_ENTRIES; i++) ++ ppe->foe_table[i].ib1 = FIELD_PREP(MTK_FOE_IB1_STATE, ++ MTK_FOE_STATE_INVALID); ++ ++ mtk_ppe_cache_enable(ppe, false); ++ ++ /* disable offload engine */ ++ ppe_clear(ppe, MTK_PPE_GLO_CFG, MTK_PPE_GLO_CFG_EN); ++ ppe_w32(ppe, MTK_PPE_FLOW_CFG, 0); ++ ++ /* disable aging */ ++ val = MTK_PPE_TB_CFG_AGE_NON_L4 | ++ MTK_PPE_TB_CFG_AGE_UNBIND | ++ MTK_PPE_TB_CFG_AGE_TCP | ++ MTK_PPE_TB_CFG_AGE_UDP | ++ MTK_PPE_TB_CFG_AGE_TCP_FIN; ++ ppe_clear(ppe, MTK_PPE_TB_CFG, val); ++ ++ return mtk_ppe_wait_busy(ppe); ++} +--- /dev/null ++++ b/drivers/net/ethernet/mediatek/mtk_ppe.h +@@ -0,0 +1,287 @@ ++// SPDX-License-Identifier: GPL-2.0-only ++/* Copyright (C) 2020 Felix Fietkau <nbd@nbd.name> */ ++ ++#ifndef __MTK_PPE_H ++#define __MTK_PPE_H ++ ++#include <linux/kernel.h> ++#include <linux/bitfield.h> ++ ++#define MTK_ETH_PPE_BASE 0xc00 ++ ++#define MTK_PPE_ENTRIES_SHIFT 3 ++#define MTK_PPE_ENTRIES (1024 << MTK_PPE_ENTRIES_SHIFT) ++#define MTK_PPE_HASH_MASK (MTK_PPE_ENTRIES - 1) ++ ++#define MTK_FOE_IB1_UNBIND_TIMESTAMP GENMASK(7, 0) ++#define MTK_FOE_IB1_UNBIND_PACKETS GENMASK(23, 8) ++#define MTK_FOE_IB1_UNBIND_PREBIND BIT(24) ++ ++#define MTK_FOE_IB1_BIND_TIMESTAMP GENMASK(14, 0) ++#define MTK_FOE_IB1_BIND_KEEPALIVE BIT(15) ++#define MTK_FOE_IB1_BIND_VLAN_LAYER GENMASK(18, 16) ++#define MTK_FOE_IB1_BIND_PPPOE BIT(19) ++#define MTK_FOE_IB1_BIND_VLAN_TAG BIT(20) ++#define MTK_FOE_IB1_BIND_PKT_SAMPLE BIT(21) ++#define MTK_FOE_IB1_BIND_CACHE BIT(22) ++#define MTK_FOE_IB1_BIND_TUNNEL_DECAP BIT(23) ++#define MTK_FOE_IB1_BIND_TTL BIT(24) ++ ++#define MTK_FOE_IB1_PACKET_TYPE GENMASK(27, 25) ++#define MTK_FOE_IB1_STATE GENMASK(29, 28) ++#define MTK_FOE_IB1_UDP BIT(30) ++#define MTK_FOE_IB1_STATIC BIT(31) ++ ++enum { ++ MTK_PPE_PKT_TYPE_IPV4_HNAPT = 0, ++ MTK_PPE_PKT_TYPE_IPV4_ROUTE = 1, ++ MTK_PPE_PKT_TYPE_BRIDGE = 2, ++ MTK_PPE_PKT_TYPE_IPV4_DSLITE = 3, ++ MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T = 4, ++ MTK_PPE_PKT_TYPE_IPV6_ROUTE_5T = 5, ++ MTK_PPE_PKT_TYPE_IPV6_6RD = 7, ++}; ++ ++#define MTK_FOE_IB2_QID GENMASK(3, 0) ++#define MTK_FOE_IB2_PSE_QOS BIT(4) ++#define MTK_FOE_IB2_DEST_PORT GENMASK(7, 5) ++#define MTK_FOE_IB2_MULTICAST BIT(8) ++ ++#define MTK_FOE_IB2_WHNAT_QID2 GENMASK(13, 12) ++#define MTK_FOE_IB2_WHNAT_DEVIDX BIT(16) ++#define MTK_FOE_IB2_WHNAT_NAT BIT(17) ++ ++#define MTK_FOE_IB2_PORT_MG GENMASK(17, 12) ++ ++#define MTK_FOE_IB2_PORT_AG GENMASK(23, 18) ++ ++#define MTK_FOE_IB2_DSCP GENMASK(31, 24) ++ ++#define MTK_FOE_VLAN2_WHNAT_BSS GEMMASK(5, 0) ++#define MTK_FOE_VLAN2_WHNAT_WCID GENMASK(13, 6) ++#define MTK_FOE_VLAN2_WHNAT_RING GENMASK(15, 14) ++ ++enum { ++ MTK_FOE_STATE_INVALID, ++ MTK_FOE_STATE_UNBIND, ++ MTK_FOE_STATE_BIND, ++ MTK_FOE_STATE_FIN ++}; ++ ++struct mtk_foe_mac_info { ++ u16 vlan1; ++ u16 etype; ++ ++ u32 dest_mac_hi; ++ ++ u16 vlan2; ++ u16 dest_mac_lo; ++ ++ u32 src_mac_hi; ++ ++ u16 pppoe_id; ++ u16 src_mac_lo; ++}; ++ ++struct mtk_foe_bridge { ++ u32 dest_mac_hi; ++ ++ u16 src_mac_lo; ++ u16 dest_mac_lo; ++ ++ u32 src_mac_hi; ++ ++ u32 ib2; ++ ++ u32 _rsv[5]; ++ ++ u32 udf_tsid; ++ struct mtk_foe_mac_info l2; ++}; ++ ++struct mtk_ipv4_tuple { ++ u32 src_ip; ++ u32 dest_ip; ++ union { ++ struct { ++ u16 dest_port; ++ u16 src_port; ++ }; ++ struct { ++ u8 protocol; ++ u8 _pad[3]; /* fill with 0xa5a5a5 */ ++ }; ++ u32 ports; ++ }; ++}; ++ ++struct mtk_foe_ipv4 { ++ struct mtk_ipv4_tuple orig; ++ ++ u32 ib2; ++ ++ struct mtk_ipv4_tuple new; ++ ++ u16 timestamp; ++ u16 _rsv0[3]; ++ ++ u32 udf_tsid; ++ ++ struct mtk_foe_mac_info l2; ++}; ++ ++struct mtk_foe_ipv4_dslite { ++ struct mtk_ipv4_tuple ip4; ++ ++ u32 tunnel_src_ip[4]; ++ u32 tunnel_dest_ip[4]; ++ ++ u8 flow_label[3]; ++ u8 priority; ++ ++ u32 udf_tsid; ++ ++ u32 ib2; ++ ++ struct mtk_foe_mac_info l2; ++}; ++ ++struct mtk_foe_ipv6 { ++ u32 src_ip[4]; ++ u32 dest_ip[4]; ++ ++ union { ++ struct { ++ u8 protocol; ++ u8 _pad[3]; /* fill with 0xa5a5a5 */ ++ }; /* 3-tuple */ ++ struct { ++ u16 dest_port; ++ u16 src_port; ++ }; /* 5-tuple */ ++ u32 ports; ++ }; ++ ++ u32 _rsv[3]; ++ ++ u32 udf; ++ ++ u32 ib2; ++ struct mtk_foe_mac_info l2; ++}; ++ ++struct mtk_foe_ipv6_6rd { ++ u32 src_ip[4]; ++ u32 dest_ip[4]; ++ u16 dest_port; ++ u16 src_port; ++ ++ u32 tunnel_src_ip; ++ u32 tunnel_dest_ip; ++ ++ u16 hdr_csum; ++ u8 dscp; ++ u8 ttl; ++ ++ u8 flag; ++ u8 pad; ++ u8 per_flow_6rd_id; ++ u8 pad2; ++ ++ u32 ib2; ++ struct mtk_foe_mac_info l2; ++}; ++ ++struct mtk_foe_entry { ++ u32 ib1; ++ ++ union { ++ struct mtk_foe_bridge bridge; ++ struct mtk_foe_ipv4 ipv4; ++ struct mtk_foe_ipv4_dslite dslite; ++ struct mtk_foe_ipv6 ipv6; ++ struct mtk_foe_ipv6_6rd ipv6_6rd; ++ u32 data[19]; ++ }; ++}; ++ ++enum { ++ MTK_PPE_CPU_REASON_TTL_EXCEEDED = 0x02, ++ MTK_PPE_CPU_REASON_OPTION_HEADER = 0x03, ++ MTK_PPE_CPU_REASON_NO_FLOW = 0x07, ++ MTK_PPE_CPU_REASON_IPV4_FRAG = 0x08, ++ MTK_PPE_CPU_REASON_IPV4_DSLITE_FRAG = 0x09, ++ MTK_PPE_CPU_REASON_IPV4_DSLITE_NO_TCP_UDP = 0x0a, ++ MTK_PPE_CPU_REASON_IPV6_6RD_NO_TCP_UDP = 0x0b, ++ MTK_PPE_CPU_REASON_TCP_FIN_SYN_RST = 0x0c, ++ MTK_PPE_CPU_REASON_UN_HIT = 0x0d, ++ MTK_PPE_CPU_REASON_HIT_UNBIND = 0x0e, ++ MTK_PPE_CPU_REASON_HIT_UNBIND_RATE_REACHED = 0x0f, ++ MTK_PPE_CPU_REASON_HIT_BIND_TCP_FIN = 0x10, ++ MTK_PPE_CPU_REASON_HIT_TTL_1 = 0x11, ++ MTK_PPE_CPU_REASON_HIT_BIND_VLAN_VIOLATION = 0x12, ++ MTK_PPE_CPU_REASON_KEEPALIVE_UC_OLD_HDR = 0x13, ++ MTK_PPE_CPU_REASON_KEEPALIVE_MC_NEW_HDR = 0x14, ++ MTK_PPE_CPU_REASON_KEEPALIVE_DUP_OLD_HDR = 0x15, ++ MTK_PPE_CPU_REASON_HIT_BIND_FORCE_CPU = 0x16, ++ MTK_PPE_CPU_REASON_TUNNEL_OPTION_HEADER = 0x17, ++ MTK_PPE_CPU_REASON_MULTICAST_TO_CPU = 0x18, ++ MTK_PPE_CPU_REASON_MULTICAST_TO_GMAC1_CPU = 0x19, ++ MTK_PPE_CPU_REASON_HIT_PRE_BIND = 0x1a, ++ MTK_PPE_CPU_REASON_PACKET_SAMPLING = 0x1b, ++ MTK_PPE_CPU_REASON_EXCEED_MTU = 0x1c, ++ MTK_PPE_CPU_REASON_PPE_BYPASS = 0x1e, ++ MTK_PPE_CPU_REASON_INVALID = 0x1f, ++}; ++ ++struct mtk_ppe { ++ struct device *dev; ++ void __iomem *base; ++ int version; ++ ++ struct mtk_foe_entry *foe_table; ++ dma_addr_t foe_phys; ++ ++ void *acct_table; ++}; ++ ++int mtk_ppe_init(struct mtk_ppe *ppe, struct device *dev, void __iomem *base, ++ int version); ++int mtk_ppe_start(struct mtk_ppe *ppe); ++int mtk_ppe_stop(struct mtk_ppe *ppe); ++ ++static inline void ++mtk_foe_entry_clear(struct mtk_ppe *ppe, u16 hash) ++{ ++ ppe->foe_table[hash].ib1 = 0; ++ dma_wmb(); ++} ++ ++static inline int ++mtk_foe_entry_timestamp(struct mtk_ppe *ppe, u16 hash) ++{ ++ u32 ib1 = READ_ONCE(ppe->foe_table[hash].ib1); ++ ++ if (FIELD_GET(MTK_FOE_IB1_STATE, ib1) != MTK_FOE_STATE_BIND) ++ return -1; ++ ++ return FIELD_GET(MTK_FOE_IB1_BIND_TIMESTAMP, ib1); ++} ++ ++int mtk_foe_entry_prepare(struct mtk_foe_entry *entry, int type, int l4proto, ++ u8 pse_port, u8 *src_mac, u8 *dest_mac); ++int mtk_foe_entry_set_pse_port(struct mtk_foe_entry *entry, u8 port); ++int mtk_foe_entry_set_ipv4_tuple(struct mtk_foe_entry *entry, bool orig, ++ __be32 src_addr, __be16 src_port, ++ __be32 dest_addr, __be16 dest_port); ++int mtk_foe_entry_set_ipv6_tuple(struct mtk_foe_entry *entry, ++ __be32 *src_addr, __be16 src_port, ++ __be32 *dest_addr, __be16 dest_port); ++int mtk_foe_entry_set_dsa(struct mtk_foe_entry *entry, int port); ++int mtk_foe_entry_set_vlan(struct mtk_foe_entry *entry, int vid); ++int mtk_foe_entry_set_pppoe(struct mtk_foe_entry *entry, int sid); ++int mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_foe_entry *entry, ++ u16 timestamp); ++int mtk_ppe_debugfs_init(struct mtk_ppe *ppe); ++ ++#endif +--- /dev/null ++++ b/drivers/net/ethernet/mediatek/mtk_ppe_debugfs.c +@@ -0,0 +1,217 @@ ++// SPDX-License-Identifier: GPL-2.0-only ++/* Copyright (C) 2020 Felix Fietkau <nbd@nbd.name> */ ++ ++#include <linux/kernel.h> ++#include <linux/debugfs.h> ++#include "mtk_eth_soc.h" ++ ++struct mtk_flow_addr_info ++{ ++ void *src, *dest; ++ u16 *src_port, *dest_port; ++ bool ipv6; ++}; ++ ++static const char *mtk_foe_entry_state_str(int state) ++{ ++ static const char * const state_str[] = { ++ [MTK_FOE_STATE_INVALID] = "INV", ++ [MTK_FOE_STATE_UNBIND] = "UNB", ++ [MTK_FOE_STATE_BIND] = "BND", ++ [MTK_FOE_STATE_FIN] = "FIN", ++ }; ++ ++ if (state >= ARRAY_SIZE(state_str) || !state_str[state]) ++ return "UNK"; ++ ++ return state_str[state]; ++} ++ ++static const char *mtk_foe_pkt_type_str(int type) ++{ ++ static const char * const type_str[] = { ++ [MTK_PPE_PKT_TYPE_IPV4_HNAPT] = "IPv4 5T", ++ [MTK_PPE_PKT_TYPE_IPV4_ROUTE] = "IPv4 3T", ++ [MTK_PPE_PKT_TYPE_BRIDGE] = "L2", ++ [MTK_PPE_PKT_TYPE_IPV4_DSLITE] = "DS-LITE", ++ [MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T] = "IPv6 3T", ++ [MTK_PPE_PKT_TYPE_IPV6_ROUTE_5T] = "IPv6 5T", ++ [MTK_PPE_PKT_TYPE_IPV6_6RD] = "6RD", ++ }; ++ ++ if (type >= ARRAY_SIZE(type_str) || !type_str[type]) ++ return "UNKNOWN"; ++ ++ return type_str[type]; ++} ++ ++static void ++mtk_print_addr(struct seq_file *m, u32 *addr, bool ipv6) ++{ ++ u32 n_addr[4]; ++ int i; ++ ++ if (!ipv6) { ++ seq_printf(m, "%pI4h", addr); ++ return; ++ } ++ ++ for (i = 0; i < ARRAY_SIZE(n_addr); i++) ++ n_addr[i] = htonl(addr[i]); ++ seq_printf(m, "%pI6", n_addr); ++} ++ ++static void ++mtk_print_addr_info(struct seq_file *m, struct mtk_flow_addr_info *ai) ++{ ++ mtk_print_addr(m, ai->src, ai->ipv6); ++ if (ai->src_port) ++ seq_printf(m, ":%d", *ai->src_port); ++ seq_printf(m, "->"); ++ mtk_print_addr(m, ai->dest, ai->ipv6); ++ if (ai->dest_port) ++ seq_printf(m, ":%d", *ai->dest_port); ++} ++ ++static int ++mtk_ppe_debugfs_foe_show(struct seq_file *m, void *private, bool bind) ++{ ++ struct mtk_ppe *ppe = m->private; ++ int i, count; ++ ++ for (i = 0, count = 0; i < MTK_PPE_ENTRIES; i++) { ++ struct mtk_foe_entry *entry = &ppe->foe_table[i]; ++ struct mtk_foe_mac_info *l2; ++ struct mtk_flow_addr_info ai = {}; ++ unsigned char h_source[ETH_ALEN]; ++ unsigned char h_dest[ETH_ALEN]; ++ int type, state; ++ u32 ib2; ++ ++ ++ state = FIELD_GET(MTK_FOE_IB1_STATE, entry->ib1); ++ if (!state) ++ continue; ++ ++ if (bind && state != MTK_FOE_STATE_BIND) ++ continue; ++ ++ type = FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, entry->ib1); ++ seq_printf(m, "%05x %s %7s", i, ++ mtk_foe_entry_state_str(state), ++ mtk_foe_pkt_type_str(type)); ++ ++ switch (type) { ++ case MTK_PPE_PKT_TYPE_IPV4_HNAPT: ++ case MTK_PPE_PKT_TYPE_IPV4_DSLITE: ++ ai.src_port = &entry->ipv4.orig.src_port; ++ ai.dest_port = &entry->ipv4.orig.dest_port; ++ fallthrough; ++ case MTK_PPE_PKT_TYPE_IPV4_ROUTE: ++ ai.src = &entry->ipv4.orig.src_ip; ++ ai.dest = &entry->ipv4.orig.dest_ip; ++ break; ++ case MTK_PPE_PKT_TYPE_IPV6_ROUTE_5T: ++ ai.src_port = &entry->ipv6.src_port; ++ ai.dest_port = &entry->ipv6.dest_port; ++ fallthrough; ++ case MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T: ++ case MTK_PPE_PKT_TYPE_IPV6_6RD: ++ ai.src = &entry->ipv6.src_ip; ++ ai.dest = &entry->ipv6.dest_ip; ++ ai.ipv6 = true; ++ break; ++ } ++ ++ seq_printf(m, " orig="); ++ mtk_print_addr_info(m, &ai); ++ ++ switch (type) { ++ case MTK_PPE_PKT_TYPE_IPV4_HNAPT: ++ case MTK_PPE_PKT_TYPE_IPV4_DSLITE: ++ ai.src_port = &entry->ipv4.new.src_port; ++ ai.dest_port = &entry->ipv4.new.dest_port; ++ fallthrough; ++ case MTK_PPE_PKT_TYPE_IPV4_ROUTE: ++ ai.src = &entry->ipv4.new.src_ip; ++ ai.dest = &entry->ipv4.new.dest_ip; ++ seq_printf(m, " new="); ++ mtk_print_addr_info(m, &ai); ++ break; ++ } ++ ++ if (type >= MTK_PPE_PKT_TYPE_IPV4_DSLITE) { ++ l2 = &entry->ipv6.l2; ++ ib2 = entry->ipv6.ib2; ++ } else { ++ l2 = &entry->ipv4.l2; ++ ib2 = entry->ipv4.ib2; ++ } ++ ++ *((__be32 *)h_source) = htonl(l2->src_mac_hi); ++ *((__be16 *)&h_source[4]) = htons(l2->src_mac_lo); ++ *((__be32 *)h_dest) = htonl(l2->dest_mac_hi); ++ *((__be16 *)&h_dest[4]) = htons(l2->dest_mac_lo); ++ ++ seq_printf(m, " eth=%pM->%pM etype=%04x" ++ " vlan=%d,%d ib1=%08x ib2=%08x\n", ++ h_source, h_dest, ntohs(l2->etype), ++ l2->vlan1, l2->vlan2, entry->ib1, ib2); ++ } ++ ++ return 0; ++} ++ ++static int ++mtk_ppe_debugfs_foe_show_all(struct seq_file *m, void *private) ++{ ++ return mtk_ppe_debugfs_foe_show(m, private, false); ++} ++ ++static int ++mtk_ppe_debugfs_foe_show_bind(struct seq_file *m, void *private) ++{ ++ return mtk_ppe_debugfs_foe_show(m, private, true); ++} ++ ++static int ++mtk_ppe_debugfs_foe_open_all(struct inode *inode, struct file *file) ++{ ++ return single_open(file, mtk_ppe_debugfs_foe_show_all, ++ inode->i_private); ++} ++ ++static int ++mtk_ppe_debugfs_foe_open_bind(struct inode *inode, struct file *file) ++{ ++ return single_open(file, mtk_ppe_debugfs_foe_show_bind, ++ inode->i_private); ++} ++ ++int mtk_ppe_debugfs_init(struct mtk_ppe *ppe) ++{ ++ static const struct file_operations fops_all = { ++ .open = mtk_ppe_debugfs_foe_open_all, ++ .read = seq_read, ++ .llseek = seq_lseek, ++ .release = single_release, ++ }; ++ ++ static const struct file_operations fops_bind = { ++ .open = mtk_ppe_debugfs_foe_open_bind, ++ .read = seq_read, ++ .llseek = seq_lseek, ++ .release = single_release, ++ }; ++ ++ struct dentry *root; ++ ++ root = debugfs_create_dir("mtk_ppe", NULL); ++ if (!root) ++ return -ENOMEM; ++ ++ debugfs_create_file("entries", S_IRUGO, root, ppe, &fops_all); ++ debugfs_create_file("bind", S_IRUGO, root, ppe, &fops_bind); ++ ++ return 0; ++} +--- /dev/null ++++ b/drivers/net/ethernet/mediatek/mtk_ppe_regs.h +@@ -0,0 +1,144 @@ ++// SPDX-License-Identifier: GPL-2.0-only ++/* Copyright (C) 2020 Felix Fietkau <nbd@nbd.name> */ ++ ++#ifndef __MTK_PPE_REGS_H ++#define __MTK_PPE_REGS_H ++ ++#define MTK_PPE_GLO_CFG 0x200 ++#define MTK_PPE_GLO_CFG_EN BIT(0) ++#define MTK_PPE_GLO_CFG_TSID_EN BIT(1) ++#define MTK_PPE_GLO_CFG_IP4_L4_CS_DROP BIT(2) ++#define MTK_PPE_GLO_CFG_IP4_CS_DROP BIT(3) ++#define MTK_PPE_GLO_CFG_TTL0_DROP BIT(4) ++#define MTK_PPE_GLO_CFG_PPE_BSWAP BIT(5) ++#define MTK_PPE_GLO_CFG_PSE_HASH_OFS BIT(6) ++#define MTK_PPE_GLO_CFG_MCAST_TB_EN BIT(7) ++#define MTK_PPE_GLO_CFG_FLOW_DROP_KA BIT(8) ++#define MTK_PPE_GLO_CFG_FLOW_DROP_UPDATE BIT(9) ++#define MTK_PPE_GLO_CFG_UDP_LITE_EN BIT(10) ++#define MTK_PPE_GLO_CFG_UDP_LEN_DROP BIT(11) ++#define MTK_PPE_GLO_CFG_MCAST_ENTRIES GNEMASK(13, 12) ++#define MTK_PPE_GLO_CFG_BUSY BIT(31) ++ ++#define MTK_PPE_FLOW_CFG 0x204 ++#define MTK_PPE_FLOW_CFG_IP4_TCP_FRAG BIT(6) ++#define MTK_PPE_FLOW_CFG_IP4_UDP_FRAG BIT(7) ++#define MTK_PPE_FLOW_CFG_IP6_3T_ROUTE BIT(8) ++#define MTK_PPE_FLOW_CFG_IP6_5T_ROUTE BIT(9) ++#define MTK_PPE_FLOW_CFG_IP6_6RD BIT(10) ++#define MTK_PPE_FLOW_CFG_IP4_NAT BIT(12) ++#define MTK_PPE_FLOW_CFG_IP4_NAPT BIT(13) ++#define MTK_PPE_FLOW_CFG_IP4_DSLITE BIT(14) ++#define MTK_PPE_FLOW_CFG_L2_BRIDGE BIT(15) ++#define MTK_PPE_FLOW_CFG_IP_PROTO_BLACKLIST BIT(16) ++#define MTK_PPE_FLOW_CFG_IP4_NAT_FRAG BIT(17) ++#define MTK_PPE_FLOW_CFG_IP4_HASH_FLOW_LABEL BIT(18) ++#define MTK_PPE_FLOW_CFG_IP4_HASH_GRE_KEY BIT(19) ++#define MTK_PPE_FLOW_CFG_IP6_HASH_GRE_KEY BIT(20) ++ ++#define MTK_PPE_IP_PROTO_CHK 0x208 ++#define MTK_PPE_IP_PROTO_CHK_IPV4 GENMASK(15, 0) ++#define MTK_PPE_IP_PROTO_CHK_IPV6 GENMASK(31, 16) ++ ++#define MTK_PPE_TB_CFG 0x21c ++#define MTK_PPE_TB_CFG_ENTRY_NUM GENMASK(2, 0) ++#define MTK_PPE_TB_CFG_ENTRY_80B BIT(3) ++#define MTK_PPE_TB_CFG_SEARCH_MISS GENMASK(5, 4) ++#define MTK_PPE_TB_CFG_AGE_PREBIND BIT(6) ++#define MTK_PPE_TB_CFG_AGE_NON_L4 BIT(7) ++#define MTK_PPE_TB_CFG_AGE_UNBIND BIT(8) ++#define MTK_PPE_TB_CFG_AGE_TCP BIT(9) ++#define MTK_PPE_TB_CFG_AGE_UDP BIT(10) ++#define MTK_PPE_TB_CFG_AGE_TCP_FIN BIT(11) ++#define MTK_PPE_TB_CFG_KEEPALIVE GENMASK(13, 12) ++#define MTK_PPE_TB_CFG_HASH_MODE GENMASK(15, 14) ++#define MTK_PPE_TB_CFG_SCAN_MODE GENMASK(17, 16) ++#define MTK_PPE_TB_CFG_HASH_DEBUG GENMASK(19, 18) ++ ++enum { ++ MTK_PPE_SCAN_MODE_DISABLED, ++ MTK_PPE_SCAN_MODE_CHECK_AGE, ++ MTK_PPE_SCAN_MODE_KEEPALIVE_AGE, ++}; ++ ++enum { ++ MTK_PPE_KEEPALIVE_DISABLE, ++ MTK_PPE_KEEPALIVE_UNICAST_CPU, ++ MTK_PPE_KEEPALIVE_DUP_CPU = 3, ++}; ++ ++enum { ++ MTK_PPE_SEARCH_MISS_ACTION_DROP, ++ MTK_PPE_SEARCH_MISS_ACTION_FORWARD = 2, ++ MTK_PPE_SEARCH_MISS_ACTION_FORWARD_BUILD = 3, ++}; ++ ++#define MTK_PPE_TB_BASE 0x220 ++ ++#define MTK_PPE_TB_USED 0x224 ++#define MTK_PPE_TB_USED_NUM GENMASK(13, 0) ++ ++#define MTK_PPE_BIND_RATE 0x228 ++#define MTK_PPE_BIND_RATE_BIND GENMASK(15, 0) ++#define MTK_PPE_BIND_RATE_PREBIND GENMASK(31, 16) ++ ++#define MTK_PPE_BIND_LIMIT0 0x22c ++#define MTK_PPE_BIND_LIMIT0_QUARTER GENMASK(13, 0) ++#define MTK_PPE_BIND_LIMIT0_HALF GENMASK(29, 16) ++ ++#define MTK_PPE_BIND_LIMIT1 0x230 ++#define MTK_PPE_BIND_LIMIT1_FULL GENMASK(13, 0) ++#define MTK_PPE_BIND_LIMIT1_NON_L4 GENMASK(23, 16) ++ ++#define MTK_PPE_KEEPALIVE 0x234 ++#define MTK_PPE_KEEPALIVE_TIME GENMASK(15, 0) ++#define MTK_PPE_KEEPALIVE_TIME_TCP GENMASK(23, 16) ++#define MTK_PPE_KEEPALIVE_TIME_UDP GENMASK(31, 24) ++ ++#define MTK_PPE_UNBIND_AGE 0x238 ++#define MTK_PPE_UNBIND_AGE_MIN_PACKETS GENMASK(31, 16) ++#define MTK_PPE_UNBIND_AGE_DELTA GENMASK(7, 0) ++ ++#define MTK_PPE_BIND_AGE0 0x23c ++#define MTK_PPE_BIND_AGE0_DELTA_NON_L4 GENMASK(30, 16) ++#define MTK_PPE_BIND_AGE0_DELTA_UDP GENMASK(14, 0) ++ ++#define MTK_PPE_BIND_AGE1 0x240 ++#define MTK_PPE_BIND_AGE1_DELTA_TCP_FIN GENMASK(30, 16) ++#define MTK_PPE_BIND_AGE1_DELTA_TCP GENMASK(14, 0) ++ ++#define MTK_PPE_HASH_SEED 0x244 ++ ++#define MTK_PPE_DEFAULT_CPU_PORT 0x248 ++#define MTK_PPE_DEFAULT_CPU_PORT_MASK(_n) (GENMASK(2, 0) << ((_n) * 4)) ++ ++#define MTK_PPE_MTU_DROP 0x308 ++ ++#define MTK_PPE_VLAN_MTU0 0x30c ++#define MTK_PPE_VLAN_MTU0_NONE GENMASK(13, 0) ++#define MTK_PPE_VLAN_MTU0_1TAG GENMASK(29, 16) ++ ++#define MTK_PPE_VLAN_MTU1 0x310 ++#define MTK_PPE_VLAN_MTU1_2TAG GENMASK(13, 0) ++#define MTK_PPE_VLAN_MTU1_3TAG GENMASK(29, 16) ++ ++#define MTK_PPE_VPM_TPID 0x318 ++ ++#define MTK_PPE_CACHE_CTL 0x320 ++#define MTK_PPE_CACHE_CTL_EN BIT(0) ++#define MTK_PPE_CACHE_CTL_LOCK_CLR BIT(4) ++#define MTK_PPE_CACHE_CTL_REQ BIT(8) ++#define MTK_PPE_CACHE_CTL_CLEAR BIT(9) ++#define MTK_PPE_CACHE_CTL_CMD GENMASK(13, 12) ++ ++#define MTK_PPE_MIB_CFG 0x334 ++#define MTK_PPE_MIB_CFG_EN BIT(0) ++#define MTK_PPE_MIB_CFG_RD_CLR BIT(1) ++ ++#define MTK_PPE_MIB_TB_BASE 0x338 ++ ++#define MTK_PPE_MIB_CACHE_CTL 0x350 ++#define MTK_PPE_MIB_CACHE_CTL_EN BIT(0) ++#define MTK_PPE_MIB_CACHE_CTL_FLUSH BIT(2) ++ ++#endif diff --git a/target/linux/generic/backport-5.15/610-v5.13-33-net-ethernet-mtk_eth_soc-add-flow-offloading-support.patch b/target/linux/generic/backport-5.15/610-v5.13-33-net-ethernet-mtk_eth_soc-add-flow-offloading-support.patch new file mode 100644 index 0000000000..599757d413 --- /dev/null +++ b/target/linux/generic/backport-5.15/610-v5.13-33-net-ethernet-mtk_eth_soc-add-flow-offloading-support.patch @@ -0,0 +1,568 @@ +From: Felix Fietkau <nbd@nbd.name> +Date: Wed, 24 Mar 2021 02:30:54 +0100 +Subject: [PATCH] net: ethernet: mtk_eth_soc: add flow offloading support + +This adds support for offloading IPv4 routed flows, including SNAT/DNAT, +one VLAN, PPPoE and DSA. + +Signed-off-by: Felix Fietkau <nbd@nbd.name> +Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org> +--- + create mode 100644 drivers/net/ethernet/mediatek/mtk_ppe_offload.c + +--- a/drivers/net/ethernet/mediatek/Makefile ++++ b/drivers/net/ethernet/mediatek/Makefile +@@ -4,5 +4,5 @@ + # + + obj-$(CONFIG_NET_MEDIATEK_SOC) += mtk_eth.o +-mtk_eth-y := mtk_eth_soc.o mtk_sgmii.o mtk_eth_path.o mtk_ppe.o mtk_ppe_debugfs.o ++mtk_eth-y := mtk_eth_soc.o mtk_sgmii.o mtk_eth_path.o mtk_ppe.o mtk_ppe_debugfs.o mtk_ppe_offload.o + obj-$(CONFIG_NET_MEDIATEK_STAR_EMAC) += mtk_star_emac.o +--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c ++++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c +@@ -2834,6 +2834,7 @@ static const struct net_device_ops mtk_n + #ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = mtk_poll_controller, + #endif ++ .ndo_setup_tc = mtk_eth_setup_tc, + }; + + static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np) +@@ -3092,6 +3093,10 @@ static int mtk_probe(struct platform_dev + eth->base + MTK_ETH_PPE_BASE, 2); + if (err) + goto err_free_dev; ++ ++ err = mtk_eth_offload_init(eth); ++ if (err) ++ goto err_free_dev; + } + + for (i = 0; i < MTK_MAX_DEVS; i++) { +--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h ++++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h +@@ -15,6 +15,7 @@ + #include <linux/u64_stats_sync.h> + #include <linux/refcount.h> + #include <linux/phylink.h> ++#include <linux/rhashtable.h> + #include "mtk_ppe.h" + + #define MTK_QDMA_PAGE_SIZE 2048 +@@ -40,7 +41,8 @@ + NETIF_F_HW_VLAN_CTAG_RX | \ + NETIF_F_SG | NETIF_F_TSO | \ + NETIF_F_TSO6 | \ +- NETIF_F_IPV6_CSUM) ++ NETIF_F_IPV6_CSUM |\ ++ NETIF_F_HW_TC) + #define MTK_HW_FEATURES_MT7628 (NETIF_F_SG | NETIF_F_RXCSUM) + #define NEXT_DESP_IDX(X, Y) (((X) + 1) & ((Y) - 1)) + +@@ -929,6 +931,7 @@ struct mtk_eth { + int ip_align; + + struct mtk_ppe ppe; ++ struct rhashtable flow_table; + }; + + /* struct mtk_mac - the structure that holds the info about the MACs of the +@@ -973,4 +976,9 @@ int mtk_gmac_sgmii_path_setup(struct mtk + int mtk_gmac_gephy_path_setup(struct mtk_eth *eth, int mac_id); + int mtk_gmac_rgmii_path_setup(struct mtk_eth *eth, int mac_id); + ++int mtk_eth_offload_init(struct mtk_eth *eth); ++int mtk_eth_setup_tc(struct net_device *dev, enum tc_setup_type type, ++ void *type_data); ++ ++ + #endif /* MTK_ETH_H */ +--- /dev/null ++++ b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c +@@ -0,0 +1,485 @@ ++// SPDX-License-Identifier: GPL-2.0-only ++/* ++ * Copyright (C) 2020 Felix Fietkau <nbd@nbd.name> ++ */ ++ ++#include <linux/if_ether.h> ++#include <linux/rhashtable.h> ++#include <linux/if_ether.h> ++#include <linux/ip.h> ++#include <net/flow_offload.h> ++#include <net/pkt_cls.h> ++#include <net/dsa.h> ++#include "mtk_eth_soc.h" ++ ++struct mtk_flow_data { ++ struct ethhdr eth; ++ ++ union { ++ struct { ++ __be32 src_addr; ++ __be32 dst_addr; ++ } v4; ++ }; ++ ++ __be16 src_port; ++ __be16 dst_port; ++ ++ struct { ++ u16 id; ++ __be16 proto; ++ u8 num; ++ } vlan; ++ struct { ++ u16 sid; ++ u8 num; ++ } pppoe; ++}; ++ ++struct mtk_flow_entry { ++ struct rhash_head node; ++ unsigned long cookie; ++ u16 hash; ++}; ++ ++static const struct rhashtable_params mtk_flow_ht_params = { ++ .head_offset = offsetof(struct mtk_flow_entry, node), ++ .head_offset = offsetof(struct mtk_flow_entry, cookie), ++ .key_len = sizeof(unsigned long), ++ .automatic_shrinking = true, ++}; ++ ++static u32 ++mtk_eth_timestamp(struct mtk_eth *eth) ++{ ++ return mtk_r32(eth, 0x0010) & MTK_FOE_IB1_BIND_TIMESTAMP; ++} ++ ++static int ++mtk_flow_set_ipv4_addr(struct mtk_foe_entry *foe, struct mtk_flow_data *data, ++ bool egress) ++{ ++ return mtk_foe_entry_set_ipv4_tuple(foe, egress, ++ data->v4.src_addr, data->src_port, ++ data->v4.dst_addr, data->dst_port); ++} ++ ++static void ++mtk_flow_offload_mangle_eth(const struct flow_action_entry *act, void *eth) ++{ ++ void *dest = eth + act->mangle.offset; ++ const void *src = &act->mangle.val; ++ ++ if (act->mangle.offset > 8) ++ return; ++ ++ if (act->mangle.mask == 0xffff) { ++ src += 2; ++ dest += 2; ++ } ++ ++ memcpy(dest, src, act->mangle.mask ? 2 : 4); ++} ++ ++ ++static int ++mtk_flow_mangle_ports(const struct flow_action_entry *act, ++ struct mtk_flow_data *data) ++{ ++ u32 val = ntohl(act->mangle.val); ++ ++ switch (act->mangle.offset) { ++ case 0: ++ if (act->mangle.mask == ~htonl(0xffff)) ++ data->dst_port = cpu_to_be16(val); ++ else ++ data->src_port = cpu_to_be16(val >> 16); ++ break; ++ case 2: ++ data->dst_port = cpu_to_be16(val); ++ break; ++ default: ++ return -EINVAL; ++ } ++ ++ return 0; ++} ++ ++static int ++mtk_flow_mangle_ipv4(const struct flow_action_entry *act, ++ struct mtk_flow_data *data) ++{ ++ __be32 *dest; ++ ++ switch (act->mangle.offset) { ++ case offsetof(struct iphdr, saddr): ++ dest = &data->v4.src_addr; ++ break; ++ case offsetof(struct iphdr, daddr): ++ dest = &data->v4.dst_addr; ++ break; ++ default: ++ return -EINVAL; ++ } ++ ++ memcpy(dest, &act->mangle.val, sizeof(u32)); ++ ++ return 0; ++} ++ ++static int ++mtk_flow_get_dsa_port(struct net_device **dev) ++{ ++#if IS_ENABLED(CONFIG_NET_DSA) ++ struct dsa_port *dp; ++ ++ dp = dsa_port_from_netdev(*dev); ++ if (IS_ERR(dp)) ++ return -ENODEV; ++ ++ if (dp->cpu_dp->tag_ops->proto != DSA_TAG_PROTO_MTK) ++ return -ENODEV; ++ ++ *dev = dp->cpu_dp->master; ++ ++ return dp->index; ++#else ++ return -ENODEV; ++#endif ++} ++ ++static int ++mtk_flow_set_output_device(struct mtk_eth *eth, struct mtk_foe_entry *foe, ++ struct net_device *dev) ++{ ++ int pse_port, dsa_port; ++ ++ dsa_port = mtk_flow_get_dsa_port(&dev); ++ if (dsa_port >= 0) ++ mtk_foe_entry_set_dsa(foe, dsa_port); ++ ++ if (dev == eth->netdev[0]) ++ pse_port = 1; ++ else if (dev == eth->netdev[1]) ++ pse_port = 2; ++ else ++ return -EOPNOTSUPP; ++ ++ mtk_foe_entry_set_pse_port(foe, pse_port); ++ ++ return 0; ++} ++ ++static int ++mtk_flow_offload_replace(struct mtk_eth *eth, struct flow_cls_offload *f) ++{ ++ struct flow_rule *rule = flow_cls_offload_flow_rule(f); ++ struct flow_action_entry *act; ++ struct mtk_flow_data data = {}; ++ struct mtk_foe_entry foe; ++ struct net_device *odev = NULL; ++ struct mtk_flow_entry *entry; ++ int offload_type = 0; ++ u16 addr_type = 0; ++ u32 timestamp; ++ u8 l4proto = 0; ++ int err = 0; ++ int hash; ++ int i; ++ ++ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_META)) { ++ struct flow_match_meta match; ++ ++ flow_rule_match_meta(rule, &match); ++ } else { ++ return -EOPNOTSUPP; ++ } ++ ++ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) { ++ struct flow_match_control match; ++ ++ flow_rule_match_control(rule, &match); ++ addr_type = match.key->addr_type; ++ } else { ++ return -EOPNOTSUPP; ++ } ++ ++ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) { ++ struct flow_match_basic match; ++ ++ flow_rule_match_basic(rule, &match); ++ l4proto = match.key->ip_proto; ++ } else { ++ return -EOPNOTSUPP; ++ } ++ ++ flow_action_for_each(i, act, &rule->action) { ++ switch (act->id) { ++ case FLOW_ACTION_MANGLE: ++ if (act->mangle.htype == FLOW_ACT_MANGLE_HDR_TYPE_ETH) ++ mtk_flow_offload_mangle_eth(act, &data.eth); ++ break; ++ case FLOW_ACTION_REDIRECT: ++ odev = act->dev; ++ break; ++ case FLOW_ACTION_CSUM: ++ break; ++ case FLOW_ACTION_VLAN_PUSH: ++ if (data.vlan.num == 1 || ++ act->vlan.proto != htons(ETH_P_8021Q)) ++ return -EOPNOTSUPP; ++ ++ data.vlan.id = act->vlan.vid; ++ data.vlan.proto = act->vlan.proto; ++ data.vlan.num++; ++ break; ++ case FLOW_ACTION_PPPOE_PUSH: ++ if (data.pppoe.num == 1) ++ return -EOPNOTSUPP; ++ ++ data.pppoe.sid = act->pppoe.sid; ++ data.pppoe.num++; ++ break; ++ default: ++ return -EOPNOTSUPP; ++ } ++ } ++ ++ switch (addr_type) { ++ case FLOW_DISSECTOR_KEY_IPV4_ADDRS: ++ offload_type = MTK_PPE_PKT_TYPE_IPV4_HNAPT; ++ break; ++ default: ++ return -EOPNOTSUPP; ++ } ++ ++ if (!is_valid_ether_addr(data.eth.h_source) || ++ !is_valid_ether_addr(data.eth.h_dest)) ++ return -EINVAL; ++ ++ err = mtk_foe_entry_prepare(&foe, offload_type, l4proto, 0, ++ data.eth.h_source, ++ data.eth.h_dest); ++ if (err) ++ return err; ++ ++ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) { ++ struct flow_match_ports ports; ++ ++ flow_rule_match_ports(rule, &ports); ++ data.src_port = ports.key->src; ++ data.dst_port = ports.key->dst; ++ } else { ++ return -EOPNOTSUPP; ++ } ++ ++ if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) { ++ struct flow_match_ipv4_addrs addrs; ++ ++ flow_rule_match_ipv4_addrs(rule, &addrs); ++ ++ data.v4.src_addr = addrs.key->src; ++ data.v4.dst_addr = addrs.key->dst; ++ ++ mtk_flow_set_ipv4_addr(&foe, &data, false); ++ } ++ ++ flow_action_for_each(i, act, &rule->action) { ++ if (act->id != FLOW_ACTION_MANGLE) ++ continue; ++ ++ switch (act->mangle.htype) { ++ case FLOW_ACT_MANGLE_HDR_TYPE_TCP: ++ case FLOW_ACT_MANGLE_HDR_TYPE_UDP: ++ err = mtk_flow_mangle_ports(act, &data); ++ break; ++ case FLOW_ACT_MANGLE_HDR_TYPE_IP4: ++ err = mtk_flow_mangle_ipv4(act, &data); ++ break; ++ case FLOW_ACT_MANGLE_HDR_TYPE_ETH: ++ /* handled earlier */ ++ break; ++ default: ++ return -EOPNOTSUPP; ++ } ++ ++ if (err) ++ return err; ++ } ++ ++ if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) { ++ err = mtk_flow_set_ipv4_addr(&foe, &data, true); ++ if (err) ++ return err; ++ } ++ ++ if (data.vlan.num == 1) { ++ if (data.vlan.proto != htons(ETH_P_8021Q)) ++ return -EOPNOTSUPP; ++ ++ mtk_foe_entry_set_vlan(&foe, data.vlan.id); ++ } ++ if (data.pppoe.num == 1) ++ mtk_foe_entry_set_pppoe(&foe, data.pppoe.sid); ++ ++ err = mtk_flow_set_output_device(eth, &foe, odev); ++ if (err) ++ return err; ++ ++ entry = kzalloc(sizeof(*entry), GFP_KERNEL); ++ if (!entry) ++ return -ENOMEM; ++ ++ entry->cookie = f->cookie; ++ timestamp = mtk_eth_timestamp(eth); ++ hash = mtk_foe_entry_commit(ð->ppe, &foe, timestamp); ++ if (hash < 0) { ++ err = hash; ++ goto free; ++ } ++ ++ entry->hash = hash; ++ err = rhashtable_insert_fast(ð->flow_table, &entry->node, ++ mtk_flow_ht_params); ++ if (err < 0) ++ goto clear_flow; ++ ++ return 0; ++clear_flow: ++ mtk_foe_entry_clear(ð->ppe, hash); ++free: ++ kfree(entry); ++ return err; ++} ++ ++static int ++mtk_flow_offload_destroy(struct mtk_eth *eth, struct flow_cls_offload *f) ++{ ++ struct mtk_flow_entry *entry; ++ ++ entry = rhashtable_lookup(ð->flow_table, &f->cookie, ++ mtk_flow_ht_params); ++ if (!entry) ++ return -ENOENT; ++ ++ mtk_foe_entry_clear(ð->ppe, entry->hash); ++ rhashtable_remove_fast(ð->flow_table, &entry->node, ++ mtk_flow_ht_params); ++ kfree(entry); ++ ++ return 0; ++} ++ ++static int ++mtk_flow_offload_stats(struct mtk_eth *eth, struct flow_cls_offload *f) ++{ ++ struct mtk_flow_entry *entry; ++ int timestamp; ++ u32 idle; ++ ++ entry = rhashtable_lookup(ð->flow_table, &f->cookie, ++ mtk_flow_ht_params); ++ if (!entry) ++ return -ENOENT; ++ ++ timestamp = mtk_foe_entry_timestamp(ð->ppe, entry->hash); ++ if (timestamp < 0) ++ return -ETIMEDOUT; ++ ++ idle = mtk_eth_timestamp(eth) - timestamp; ++ f->stats.lastused = jiffies - idle * HZ; ++ ++ return 0; ++} ++ ++static int ++mtk_eth_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv) ++{ ++ struct flow_cls_offload *cls = type_data; ++ struct net_device *dev = cb_priv; ++ struct mtk_mac *mac = netdev_priv(dev); ++ struct mtk_eth *eth = mac->hw; ++ ++ if (!tc_can_offload(dev)) ++ return -EOPNOTSUPP; ++ ++ if (type != TC_SETUP_CLSFLOWER) ++ return -EOPNOTSUPP; ++ ++ switch (cls->command) { ++ case FLOW_CLS_REPLACE: ++ return mtk_flow_offload_replace(eth, cls); ++ case FLOW_CLS_DESTROY: ++ return mtk_flow_offload_destroy(eth, cls); ++ case FLOW_CLS_STATS: ++ return mtk_flow_offload_stats(eth, cls); ++ default: ++ return -EOPNOTSUPP; ++ } ++ ++ return 0; ++} ++ ++static int ++mtk_eth_setup_tc_block(struct net_device *dev, struct flow_block_offload *f) ++{ ++ struct mtk_mac *mac = netdev_priv(dev); ++ struct mtk_eth *eth = mac->hw; ++ static LIST_HEAD(block_cb_list); ++ struct flow_block_cb *block_cb; ++ flow_setup_cb_t *cb; ++ ++ if (!eth->ppe.foe_table) ++ return -EOPNOTSUPP; ++ ++ if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS) ++ return -EOPNOTSUPP; ++ ++ cb = mtk_eth_setup_tc_block_cb; ++ f->driver_block_list = &block_cb_list; ++ ++ switch (f->command) { ++ case FLOW_BLOCK_BIND: ++ block_cb = flow_block_cb_lookup(f->block, cb, dev); ++ if (block_cb) { ++ flow_block_cb_incref(block_cb); ++ return 0; ++ } ++ block_cb = flow_block_cb_alloc(cb, dev, dev, NULL); ++ if (IS_ERR(block_cb)) ++ return PTR_ERR(block_cb); ++ ++ flow_block_cb_add(block_cb, f); ++ list_add_tail(&block_cb->driver_list, &block_cb_list); ++ return 0; ++ case FLOW_BLOCK_UNBIND: ++ block_cb = flow_block_cb_lookup(f->block, cb, dev); ++ if (!block_cb) ++ return -ENOENT; ++ ++ if (flow_block_cb_decref(block_cb)) { ++ flow_block_cb_remove(block_cb, f); ++ list_del(&block_cb->driver_list); ++ } ++ return 0; ++ default: ++ return -EOPNOTSUPP; ++ } ++} ++ ++int mtk_eth_setup_tc(struct net_device *dev, enum tc_setup_type type, ++ void *type_data) ++{ ++ if (type == TC_SETUP_FT) ++ return mtk_eth_setup_tc_block(dev, type_data); ++ ++ return -EOPNOTSUPP; ++} ++ ++int mtk_eth_offload_init(struct mtk_eth *eth) ++{ ++ if (!eth->ppe.foe_table) ++ return 0; ++ ++ return rhashtable_init(ð->flow_table, &mtk_flow_ht_params); ++} diff --git a/target/linux/generic/backport-5.15/610-v5.13-34-docs-nf_flowtable-update-documentation-with-enhancem.patch b/target/linux/generic/backport-5.15/610-v5.13-34-docs-nf_flowtable-update-documentation-with-enhancem.patch new file mode 100644 index 0000000000..2cea1ebe24 --- /dev/null +++ b/target/linux/generic/backport-5.15/610-v5.13-34-docs-nf_flowtable-update-documentation-with-enhancem.patch @@ -0,0 +1,236 @@ +From: Pablo Neira Ayuso <pablo@netfilter.org> +Date: Wed, 24 Mar 2021 02:30:55 +0100 +Subject: [PATCH] docs: nf_flowtable: update documentation with + enhancements + +This patch updates the flowtable documentation to describe recent +enhancements: + +- Offload action is available after the first packets go through the + classic forwarding path. +- IPv4 and IPv6 are supported. Only TCP and UDP layer 4 are supported at + this stage. +- Tuple has been augmented to track VLAN id and PPPoE session id. +- Bridge and IP forwarding integration, including bridge VLAN filtering + support. +- Hardware offload support. +- Describe the [OFFLOAD] and [HW_OFFLOAD] tags in the conntrack table + listing. +- Replace 'flow offload' by 'flow add' in example rulesets (preferred + syntax). +- Describe existing cache limitations. + +Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org> +--- + +--- a/Documentation/networking/nf_flowtable.rst ++++ b/Documentation/networking/nf_flowtable.rst +@@ -4,35 +4,38 @@ + Netfilter's flowtable infrastructure + ==================================== + +-This documentation describes the software flowtable infrastructure available in +-Netfilter since Linux kernel 4.16. ++This documentation describes the Netfilter flowtable infrastructure which allows ++you to define a fastpath through the flowtable datapath. This infrastructure ++also provides hardware offload support. The flowtable supports for the layer 3 ++IPv4 and IPv6 and the layer 4 TCP and UDP protocols. + + Overview + -------- + +-Initial packets follow the classic forwarding path, once the flow enters the +-established state according to the conntrack semantics (ie. we have seen traffic +-in both directions), then you can decide to offload the flow to the flowtable +-from the forward chain via the 'flow offload' action available in nftables. +- +-Packets that find an entry in the flowtable (ie. flowtable hit) are sent to the +-output netdevice via neigh_xmit(), hence, they bypass the classic forwarding +-path (the visible effect is that you do not see these packets from any of the +-netfilter hooks coming after the ingress). In case of flowtable miss, the packet +-follows the classic forward path. +- +-The flowtable uses a resizable hashtable, lookups are based on the following +-7-tuple selectors: source, destination, layer 3 and layer 4 protocols, source +-and destination ports and the input interface (useful in case there are several +-conntrack zones in place). +- +-Flowtables are populated via the 'flow offload' nftables action, so the user can +-selectively specify what flows are placed into the flow table. Hence, packets +-follow the classic forwarding path unless the user explicitly instruct packets +-to use this new alternative forwarding path via nftables policy. ++Once the first packet of the flow successfully goes through the IP forwarding ++path, from the second packet on, you might decide to offload the flow to the ++flowtable through your ruleset. The flowtable infrastructure provides a rule ++action that allows you to specify when to add a flow to the flowtable. ++ ++A packet that finds a matching entry in the flowtable (ie. flowtable hit) is ++transmitted to the output netdevice via neigh_xmit(), hence, packets bypass the ++classic IP forwarding path (the visible effect is that you do not see these ++packets from any of the Netfilter hooks coming after ingress). In case that ++there is no matching entry in the flowtable (ie. flowtable miss), the packet ++follows the classic IP forwarding path. ++ ++The flowtable uses a resizable hashtable. Lookups are based on the following ++n-tuple selectors: layer 2 protocol encapsulation (VLAN and PPPoE), layer 3 ++source and destination, layer 4 source and destination ports and the input ++interface (useful in case there are several conntrack zones in place). ++ ++The 'flow add' action allows you to populate the flowtable, the user selectively ++specifies what flows are placed into the flowtable. Hence, packets follow the ++classic IP forwarding path unless the user explicitly instruct flows to use this ++new alternative forwarding path via policy. + +-This is represented in Fig.1, which describes the classic forwarding path +-including the Netfilter hooks and the flowtable fastpath bypass. ++The flowtable datapath is represented in Fig.1, which describes the classic IP ++forwarding path including the Netfilter hooks and the flowtable fastpath bypass. + + :: + +@@ -67,11 +70,13 @@ including the Netfilter hooks and the fl + Fig.1 Netfilter hooks and flowtable interactions + + The flowtable entry also stores the NAT configuration, so all packets are +-mangled according to the NAT policy that matches the initial packets that went +-through the classic forwarding path. The TTL is decremented before calling +-neigh_xmit(). Fragmented traffic is passed up to follow the classic forwarding +-path given that the transport selectors are missing, therefore flowtable lookup +-is not possible. ++mangled according to the NAT policy that is specified from the classic IP ++forwarding path. The TTL is decremented before calling neigh_xmit(). Fragmented ++traffic is passed up to follow the classic IP forwarding path given that the ++transport header is missing, in this case, flowtable lookups are not possible. ++TCP RST and FIN packets are also passed up to the classic IP forwarding path to ++release the flow gracefully. Packets that exceed the MTU are also passed up to ++the classic forwarding path to report packet-too-big ICMP errors to the sender. + + Example configuration + --------------------- +@@ -85,7 +90,7 @@ flowtable and add one rule to your forwa + } + chain y { + type filter hook forward priority 0; policy accept; +- ip protocol tcp flow offload @f ++ ip protocol tcp flow add @f + counter packets 0 bytes 0 + } + } +@@ -103,6 +108,117 @@ flow is offloaded, you will observe that + does not get updated for the packets that are being forwarded through the + forwarding bypass. + ++You can identify offloaded flows through the [OFFLOAD] tag when listing your ++connection tracking table. ++ ++:: ++ # conntrack -L ++ tcp 6 src=10.141.10.2 dst=192.168.10.2 sport=52728 dport=5201 src=192.168.10.2 dst=192.168.10.1 sport=5201 dport=52728 [OFFLOAD] mark=0 use=2 ++ ++ ++Layer 2 encapsulation ++--------------------- ++ ++Since Linux kernel 5.13, the flowtable infrastructure discovers the real ++netdevice behind VLAN and PPPoE netdevices. The flowtable software datapath ++parses the VLAN and PPPoE layer 2 headers to extract the ethertype and the ++VLAN ID / PPPoE session ID which are used for the flowtable lookups. The ++flowtable datapath also deals with layer 2 decapsulation. ++ ++You do not need to add the PPPoE and the VLAN devices to your flowtable, ++instead the real device is sufficient for the flowtable to track your flows. ++ ++Bridge and IP forwarding ++------------------------ ++ ++Since Linux kernel 5.13, you can add bridge ports to the flowtable. The ++flowtable infrastructure discovers the topology behind the bridge device. This ++allows the flowtable to define a fastpath bypass between the bridge ports ++(represented as eth1 and eth2 in the example figure below) and the gateway ++device (represented as eth0) in your switch/router. ++ ++:: ++ fastpath bypass ++ .-------------------------. ++ / \ ++ | IP forwarding | ++ | / \ \/ ++ | br0 eth0 ..... eth0 ++ . / \ *host B* ++ -> eth1 eth2 ++ . *switch/router* ++ . ++ . ++ eth0 ++ *host A* ++ ++The flowtable infrastructure also supports for bridge VLAN filtering actions ++such as PVID and untagged. You can also stack a classic VLAN device on top of ++your bridge port. ++ ++If you would like that your flowtable defines a fastpath between your bridge ++ports and your IP forwarding path, you have to add your bridge ports (as ++represented by the real netdevice) to your flowtable definition. ++ ++Counters ++-------- ++ ++The flowtable can synchronize packet and byte counters with the existing ++connection tracking entry by specifying the counter statement in your flowtable ++definition, e.g. ++ ++:: ++ table inet x { ++ flowtable f { ++ hook ingress priority 0; devices = { eth0, eth1 }; ++ counter ++ } ++ ... ++ } ++ ++Counter support is available since Linux kernel 5.7. ++ ++Hardware offload ++---------------- ++ ++If your network device provides hardware offload support, you can turn it on by ++means of the 'offload' flag in your flowtable definition, e.g. ++ ++:: ++ table inet x { ++ flowtable f { ++ hook ingress priority 0; devices = { eth0, eth1 }; ++ flags offload; ++ } ++ ... ++ } ++ ++There is a workqueue that adds the flows to the hardware. Note that a few ++packets might still run over the flowtable software path until the workqueue has ++a chance to offload the flow to the network device. ++ ++You can identify hardware offloaded flows through the [HW_OFFLOAD] tag when ++listing your connection tracking table. Please, note that the [OFFLOAD] tag ++refers to the software offload mode, so there is a distinction between [OFFLOAD] ++which refers to the software flowtable fastpath and [HW_OFFLOAD] which refers ++to the hardware offload datapath being used by the flow. ++ ++The flowtable hardware offload infrastructure also supports for the DSA ++(Distributed Switch Architecture). ++ ++Limitations ++----------- ++ ++The flowtable behaves like a cache. The flowtable entries might get stale if ++either the destination MAC address or the egress netdevice that is used for ++transmission changes. ++ ++This might be a problem if: ++ ++- You run the flowtable in software mode and you combine bridge and IP ++ forwarding in your setup. ++- Hardware offload is enabled. ++ + More reading + ------------ + diff --git a/target/linux/generic/backport-5.15/610-v5.13-35-net-ethernet-mediatek-ppe-fix-busy-wait-loop.patch b/target/linux/generic/backport-5.15/610-v5.13-35-net-ethernet-mediatek-ppe-fix-busy-wait-loop.patch new file mode 100644 index 0000000000..66cd053cd1 --- /dev/null +++ b/target/linux/generic/backport-5.15/610-v5.13-35-net-ethernet-mediatek-ppe-fix-busy-wait-loop.patch @@ -0,0 +1,72 @@ +From c5d66587b8900201e1530b7c18d41e87bd5812f4 Mon Sep 17 00:00:00 2001 +From: Ilya Lipnitskiy <ilya.lipnitskiy@gmail.com> +Date: Thu, 15 Apr 2021 17:37:48 -0700 +Subject: [PATCH] net: ethernet: mediatek: ppe: fix busy wait loop + +The intention is for the loop to timeout if the body does not succeed. +The current logic calls time_is_before_jiffies(timeout) which is false +until after the timeout, so the loop body never executes. + +Fix by using readl_poll_timeout as a more standard and less error-prone +solution. + +Fixes: ba37b7caf1ed ("net: ethernet: mtk_eth_soc: add support for initializing the PPE") +Signed-off-by: Ilya Lipnitskiy <ilya.lipnitskiy@gmail.com> +Cc: Felix Fietkau <nbd@nbd.name> +Reviewed-by: Andrew Lunn <andrew@lunn.ch> +Signed-off-by: David S. Miller <davem@davemloft.net> +--- + drivers/net/ethernet/mediatek/mtk_ppe.c | 20 +++++++++----------- + drivers/net/ethernet/mediatek/mtk_ppe.h | 1 + + 2 files changed, 10 insertions(+), 11 deletions(-) + +--- a/drivers/net/ethernet/mediatek/mtk_ppe.c ++++ b/drivers/net/ethernet/mediatek/mtk_ppe.c +@@ -2,9 +2,8 @@ + /* Copyright (C) 2020 Felix Fietkau <nbd@nbd.name> */ + + #include <linux/kernel.h> +-#include <linux/jiffies.h> +-#include <linux/delay.h> + #include <linux/io.h> ++#include <linux/iopoll.h> + #include <linux/etherdevice.h> + #include <linux/platform_device.h> + #include "mtk_ppe.h" +@@ -44,18 +43,17 @@ static u32 ppe_clear(struct mtk_ppe *ppe + + static int mtk_ppe_wait_busy(struct mtk_ppe *ppe) + { +- unsigned long timeout = jiffies + HZ; +- +- while (time_is_before_jiffies(timeout)) { +- if (!(ppe_r32(ppe, MTK_PPE_GLO_CFG) & MTK_PPE_GLO_CFG_BUSY)) +- return 0; ++ int ret; ++ u32 val; + +- usleep_range(10, 20); +- } ++ ret = readl_poll_timeout(ppe->base + MTK_PPE_GLO_CFG, val, ++ !(val & MTK_PPE_GLO_CFG_BUSY), ++ 20, MTK_PPE_WAIT_TIMEOUT_US); + +- dev_err(ppe->dev, "PPE table busy"); ++ if (ret) ++ dev_err(ppe->dev, "PPE table busy"); + +- return -ETIMEDOUT; ++ return ret; + } + + static void mtk_ppe_cache_clear(struct mtk_ppe *ppe) +--- a/drivers/net/ethernet/mediatek/mtk_ppe.h ++++ b/drivers/net/ethernet/mediatek/mtk_ppe.h +@@ -12,6 +12,7 @@ + #define MTK_PPE_ENTRIES_SHIFT 3 + #define MTK_PPE_ENTRIES (1024 << MTK_PPE_ENTRIES_SHIFT) + #define MTK_PPE_HASH_MASK (MTK_PPE_ENTRIES - 1) ++#define MTK_PPE_WAIT_TIMEOUT_US 1000000 + + #define MTK_FOE_IB1_UNBIND_TIMESTAMP GENMASK(7, 0) + #define MTK_FOE_IB1_UNBIND_PACKETS GENMASK(23, 8) diff --git a/target/linux/generic/backport-5.15/610-v5.13-36-net-ethernet-mediatek-fix-a-typo-bug-in-flow-offload.patch b/target/linux/generic/backport-5.15/610-v5.13-36-net-ethernet-mediatek-fix-a-typo-bug-in-flow-offload.patch new file mode 100644 index 0000000000..de376bf78d --- /dev/null +++ b/target/linux/generic/backport-5.15/610-v5.13-36-net-ethernet-mediatek-fix-a-typo-bug-in-flow-offload.patch @@ -0,0 +1,29 @@ +From 6ecaf81d4ac6365f9284f9d68d74f7c209e74f98 Mon Sep 17 00:00:00 2001 +From: DENG Qingfang <dqfext@gmail.com> +Date: Sat, 17 Apr 2021 15:29:04 +0800 +Subject: [PATCH] net: ethernet: mediatek: fix a typo bug in flow offloading + +Issue was traffic problems after a while with increased ping times if +flow offload is active. It turns out that key_offset with cookie is +needed in rhashtable_params but was re-assigned to head_offset. +Fix the assignment. + +Fixes: 502e84e2382d ("net: ethernet: mtk_eth_soc: add flow offloading support") +Signed-off-by: DENG Qingfang <dqfext@gmail.com> +Tested-by: Frank Wunderlich <frank-w@public-files.de> +Signed-off-by: David S. Miller <davem@davemloft.net> +--- + drivers/net/ethernet/mediatek/mtk_ppe_offload.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +--- a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c ++++ b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c +@@ -44,7 +44,7 @@ struct mtk_flow_entry { + + static const struct rhashtable_params mtk_flow_ht_params = { + .head_offset = offsetof(struct mtk_flow_entry, node), +- .head_offset = offsetof(struct mtk_flow_entry, cookie), ++ .key_offset = offsetof(struct mtk_flow_entry, cookie), + .key_len = sizeof(unsigned long), + .automatic_shrinking = true, + }; diff --git a/target/linux/generic/backport-5.15/610-v5.13-38-net-ethernet-mtk_eth_soc-unmap-RX-data-before-callin.patch b/target/linux/generic/backport-5.15/610-v5.13-38-net-ethernet-mtk_eth_soc-unmap-RX-data-before-callin.patch new file mode 100644 index 0000000000..e99302ecda --- /dev/null +++ b/target/linux/generic/backport-5.15/610-v5.13-38-net-ethernet-mtk_eth_soc-unmap-RX-data-before-callin.patch @@ -0,0 +1,38 @@ +From 5196c417854942e218a59ec87bf7d414b3bd581e Mon Sep 17 00:00:00 2001 +From: Felix Fietkau <nbd@nbd.name> +Date: Thu, 22 Apr 2021 22:20:55 -0700 +Subject: [PATCH] net: ethernet: mtk_eth_soc: unmap RX data before calling + build_skb + +Since build_skb accesses the data area (for initializing shinfo), dma unmap +needs to happen before that call + +Signed-off-by: Felix Fietkau <nbd@nbd.name> +[Ilya: split build_skb cleanup fix into a separate commit] +Signed-off-by: Ilya Lipnitskiy <ilya.lipnitskiy@gmail.com> +Signed-off-by: David S. Miller <davem@davemloft.net> +--- + drivers/net/ethernet/mediatek/mtk_eth_soc.c | 5 +++-- + 1 file changed, 3 insertions(+), 2 deletions(-) + +--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c ++++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c +@@ -1319,6 +1319,9 @@ static int mtk_poll_rx(struct napi_struc + goto release_desc; + } + ++ dma_unmap_single(eth->dev, trxd.rxd1, ++ ring->buf_size, DMA_FROM_DEVICE); ++ + /* receive data */ + skb = build_skb(data, ring->frag_size); + if (unlikely(!skb)) { +@@ -1328,8 +1331,6 @@ static int mtk_poll_rx(struct napi_struc + } + skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN); + +- dma_unmap_single(eth->dev, trxd.rxd1, +- ring->buf_size, DMA_FROM_DEVICE); + pktlen = RX_DMA_GET_PLEN0(trxd.rxd2); + skb->dev = netdev; + skb_put(skb, pktlen); diff --git a/target/linux/generic/backport-5.15/610-v5.13-39-net-ethernet-mtk_eth_soc-fix-build_skb-cleanup.patch b/target/linux/generic/backport-5.15/610-v5.13-39-net-ethernet-mtk_eth_soc-fix-build_skb-cleanup.patch new file mode 100644 index 0000000000..a82518882a --- /dev/null +++ b/target/linux/generic/backport-5.15/610-v5.13-39-net-ethernet-mtk_eth_soc-fix-build_skb-cleanup.patch @@ -0,0 +1,38 @@ +From 787082ab9f7be4711e52f67c388535eda74a1269 Mon Sep 17 00:00:00 2001 +From: Ilya Lipnitskiy <ilya.lipnitskiy@gmail.com> +Date: Thu, 22 Apr 2021 22:20:56 -0700 +Subject: [PATCH] net: ethernet: mtk_eth_soc: fix build_skb cleanup + +In case build_skb fails, call skb_free_frag on the correct pointer. Also +update the DMA structures with the new mapping before exiting, because +the mapping was successful + +Suggested-by: Felix Fietkau <nbd@nbd.name> +Signed-off-by: Ilya Lipnitskiy <ilya.lipnitskiy@gmail.com> +Signed-off-by: David S. Miller <davem@davemloft.net> +--- + drivers/net/ethernet/mediatek/mtk_eth_soc.c | 5 +++-- + 1 file changed, 3 insertions(+), 2 deletions(-) + +--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c ++++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c +@@ -1325,9 +1325,9 @@ static int mtk_poll_rx(struct napi_struc + /* receive data */ + skb = build_skb(data, ring->frag_size); + if (unlikely(!skb)) { +- skb_free_frag(new_data); ++ skb_free_frag(data); + netdev->stats.rx_dropped++; +- goto release_desc; ++ goto skip_rx; + } + skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN); + +@@ -1347,6 +1347,7 @@ static int mtk_poll_rx(struct napi_struc + skb_record_rx_queue(skb, 0); + napi_gro_receive(napi, skb); + ++skip_rx: + ring->data[idx] = new_data; + rxd->rxd1 = (unsigned int)dma_addr; + diff --git a/target/linux/generic/backport-5.15/610-v5.13-40-net-ethernet-mtk_eth_soc-use-napi_consume_skb.patch b/target/linux/generic/backport-5.15/610-v5.13-40-net-ethernet-mtk_eth_soc-use-napi_consume_skb.patch new file mode 100644 index 0000000000..5972b9afa7 --- /dev/null +++ b/target/linux/generic/backport-5.15/610-v5.13-40-net-ethernet-mtk_eth_soc-use-napi_consume_skb.patch @@ -0,0 +1,77 @@ +From c30c4a82739090a2de4a4e3f245355ea4fb3ec14 Mon Sep 17 00:00:00 2001 +From: Felix Fietkau <nbd@nbd.name> +Date: Thu, 22 Apr 2021 22:20:57 -0700 +Subject: [PATCH] net: ethernet: mtk_eth_soc: use napi_consume_skb + +Should improve performance, since it can use bulk free + +Signed-off-by: Felix Fietkau <nbd@nbd.name> +Signed-off-by: Ilya Lipnitskiy <ilya.lipnitskiy@gmail.com> +Signed-off-by: David S. Miller <davem@davemloft.net> +--- + drivers/net/ethernet/mediatek/mtk_eth_soc.c | 19 ++++++++++++------- + 1 file changed, 12 insertions(+), 7 deletions(-) + +--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c ++++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c +@@ -879,7 +879,8 @@ static int txd_to_idx(struct mtk_tx_ring + return ((void *)dma - (void *)ring->dma) / sizeof(*dma); + } + +-static void mtk_tx_unmap(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf) ++static void mtk_tx_unmap(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf, ++ bool napi) + { + if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) { + if (tx_buf->flags & MTK_TX_FLAGS_SINGLE0) { +@@ -911,8 +912,12 @@ static void mtk_tx_unmap(struct mtk_eth + + tx_buf->flags = 0; + if (tx_buf->skb && +- (tx_buf->skb != (struct sk_buff *)MTK_DMA_DUMMY_DESC)) +- dev_kfree_skb_any(tx_buf->skb); ++ (tx_buf->skb != (struct sk_buff *)MTK_DMA_DUMMY_DESC)) { ++ if (napi) ++ napi_consume_skb(tx_buf->skb, napi); ++ else ++ dev_kfree_skb_any(tx_buf->skb); ++ } + tx_buf->skb = NULL; + } + +@@ -1090,7 +1095,7 @@ err_dma: + tx_buf = mtk_desc_to_tx_buf(ring, itxd); + + /* unmap dma */ +- mtk_tx_unmap(eth, tx_buf); ++ mtk_tx_unmap(eth, tx_buf, false); + + itxd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU; + if (!MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) +@@ -1409,7 +1414,7 @@ static int mtk_poll_tx_qdma(struct mtk_e + done[mac]++; + budget--; + } +- mtk_tx_unmap(eth, tx_buf); ++ mtk_tx_unmap(eth, tx_buf, true); + + ring->last_free = desc; + atomic_inc(&ring->free_count); +@@ -1446,7 +1451,7 @@ static int mtk_poll_tx_pdma(struct mtk_e + budget--; + } + +- mtk_tx_unmap(eth, tx_buf); ++ mtk_tx_unmap(eth, tx_buf, true); + + desc = &ring->dma[cpu]; + ring->last_free = desc; +@@ -1648,7 +1653,7 @@ static void mtk_tx_clean(struct mtk_eth + + if (ring->buf) { + for (i = 0; i < MTK_DMA_SIZE; i++) +- mtk_tx_unmap(eth, &ring->buf[i]); ++ mtk_tx_unmap(eth, &ring->buf[i], false); + kfree(ring->buf); + ring->buf = NULL; + } diff --git a/target/linux/generic/backport-5.15/610-v5.13-41-net-ethernet-mtk_eth_soc-reduce-MDIO-bus-access-late.patch b/target/linux/generic/backport-5.15/610-v5.13-41-net-ethernet-mtk_eth_soc-reduce-MDIO-bus-access-late.patch new file mode 100644 index 0000000000..7ebc3fa903 --- /dev/null +++ b/target/linux/generic/backport-5.15/610-v5.13-41-net-ethernet-mtk_eth_soc-reduce-MDIO-bus-access-late.patch @@ -0,0 +1,30 @@ +From 3630d519d7c3eab92567658690e44ffe0517d109 Mon Sep 17 00:00:00 2001 +From: Felix Fietkau <nbd@nbd.name> +Date: Thu, 22 Apr 2021 22:20:58 -0700 +Subject: [PATCH] net: ethernet: mtk_eth_soc: reduce MDIO bus access latency + +usleep_range often ends up sleeping much longer than the 10-20us provided +as a range here. This causes significant latency in mdio bus acceses, +which easily adds multiple seconds to the boot time on MT7621 when polling +DSA slave ports. +Use cond_resched instead of usleep_range, since the MDIO access does not +take much time + +Signed-off-by: Felix Fietkau <nbd@nbd.name> +Signed-off-by: Ilya Lipnitskiy <ilya.lipnitskiy@gmail.com> +Signed-off-by: David S. Miller <davem@davemloft.net> +--- + drivers/net/ethernet/mediatek/mtk_eth_soc.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c ++++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c +@@ -86,7 +86,7 @@ static int mtk_mdio_busy_wait(struct mtk + return 0; + if (time_after(jiffies, t_start + PHY_IAC_TIMEOUT)) + break; +- usleep_range(10, 20); ++ cond_resched(); + } + + dev_err(eth->dev, "mdio: MDIO timeout\n"); diff --git a/target/linux/generic/backport-5.15/610-v5.13-42-net-ethernet-mtk_eth_soc-remove-unnecessary-TX-queue.patch b/target/linux/generic/backport-5.15/610-v5.13-42-net-ethernet-mtk_eth_soc-remove-unnecessary-TX-queue.patch new file mode 100644 index 0000000000..56d4a82824 --- /dev/null +++ b/target/linux/generic/backport-5.15/610-v5.13-42-net-ethernet-mtk_eth_soc-remove-unnecessary-TX-queue.patch @@ -0,0 +1,54 @@ +From 16ef670789b252b221700adc413497ed2f941d8a Mon Sep 17 00:00:00 2001 +From: Felix Fietkau <nbd@nbd.name> +Date: Thu, 22 Apr 2021 22:20:59 -0700 +Subject: [PATCH] net: ethernet: mtk_eth_soc: remove unnecessary TX queue stops + +When running short on descriptors, only stop the queue for the netdev that +tx was attempted for. By the time something tries to send on the other +netdev, the ring might have some more room already. + +Signed-off-by: Felix Fietkau <nbd@nbd.name> +Signed-off-by: Ilya Lipnitskiy <ilya.lipnitskiy@gmail.com> +Signed-off-by: David S. Miller <davem@davemloft.net> +--- + drivers/net/ethernet/mediatek/mtk_eth_soc.c | 15 ++------------- + 1 file changed, 2 insertions(+), 13 deletions(-) + +--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c ++++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c +@@ -1152,17 +1152,6 @@ static void mtk_wake_queue(struct mtk_et + } + } + +-static void mtk_stop_queue(struct mtk_eth *eth) +-{ +- int i; +- +- for (i = 0; i < MTK_MAC_COUNT; i++) { +- if (!eth->netdev[i]) +- continue; +- netif_stop_queue(eth->netdev[i]); +- } +-} +- + static netdev_tx_t mtk_start_xmit(struct sk_buff *skb, struct net_device *dev) + { + struct mtk_mac *mac = netdev_priv(dev); +@@ -1183,7 +1172,7 @@ static netdev_tx_t mtk_start_xmit(struct + + tx_num = mtk_cal_txd_req(skb); + if (unlikely(atomic_read(&ring->free_count) <= tx_num)) { +- mtk_stop_queue(eth); ++ netif_stop_queue(dev); + netif_err(eth, tx_queued, dev, + "Tx Ring full when queue awake!\n"); + spin_unlock(ð->page_lock); +@@ -1209,7 +1198,7 @@ static netdev_tx_t mtk_start_xmit(struct + goto drop; + + if (unlikely(atomic_read(&ring->free_count) <= ring->thresh)) +- mtk_stop_queue(eth); ++ netif_stop_queue(dev); + + spin_unlock(ð->page_lock); + diff --git a/target/linux/generic/backport-5.15/610-v5.13-43-net-ethernet-mtk_eth_soc-use-larger-burst-size-for-Q.patch b/target/linux/generic/backport-5.15/610-v5.13-43-net-ethernet-mtk_eth_soc-use-larger-burst-size-for-Q.patch new file mode 100644 index 0000000000..a192250e28 --- /dev/null +++ b/target/linux/generic/backport-5.15/610-v5.13-43-net-ethernet-mtk_eth_soc-use-larger-burst-size-for-Q.patch @@ -0,0 +1,37 @@ +From 59555a8d0dd39bf60b7ca1ba5e7393d293f7398d Mon Sep 17 00:00:00 2001 +From: Felix Fietkau <nbd@nbd.name> +Date: Thu, 22 Apr 2021 22:21:00 -0700 +Subject: [PATCH] net: ethernet: mtk_eth_soc: use larger burst size for QDMA TX + +Improves tx performance + +Signed-off-by: Felix Fietkau <nbd@nbd.name> +Signed-off-by: Ilya Lipnitskiy <ilya.lipnitskiy@gmail.com> +Signed-off-by: David S. Miller <davem@davemloft.net> +--- + drivers/net/ethernet/mediatek/mtk_eth_soc.c | 2 +- + drivers/net/ethernet/mediatek/mtk_eth_soc.h | 2 +- + 2 files changed, 2 insertions(+), 2 deletions(-) + +--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c ++++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c +@@ -2214,7 +2214,7 @@ static int mtk_start_dma(struct mtk_eth + if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) { + mtk_w32(eth, + MTK_TX_WB_DDONE | MTK_TX_DMA_EN | +- MTK_DMA_SIZE_16DWORDS | MTK_NDP_CO_PRO | ++ MTK_TX_BT_32DWORDS | MTK_NDP_CO_PRO | + MTK_RX_DMA_EN | MTK_RX_2B_OFFSET | + MTK_RX_BT_32DWORDS, + MTK_QDMA_GLO_CFG); +--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h ++++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h +@@ -202,7 +202,7 @@ + #define MTK_RX_BT_32DWORDS (3 << 11) + #define MTK_NDP_CO_PRO BIT(10) + #define MTK_TX_WB_DDONE BIT(6) +-#define MTK_DMA_SIZE_16DWORDS (2 << 4) ++#define MTK_TX_BT_32DWORDS (3 << 4) + #define MTK_RX_DMA_BUSY BIT(3) + #define MTK_TX_DMA_BUSY BIT(1) + #define MTK_RX_DMA_EN BIT(2) diff --git a/target/linux/generic/backport-5.15/610-v5.13-44-net-ethernet-mtk_eth_soc-increase-DMA-ring-sizes.patch b/target/linux/generic/backport-5.15/610-v5.13-44-net-ethernet-mtk_eth_soc-increase-DMA-ring-sizes.patch new file mode 100644 index 0000000000..d695f0fb8a --- /dev/null +++ b/target/linux/generic/backport-5.15/610-v5.13-44-net-ethernet-mtk_eth_soc-increase-DMA-ring-sizes.patch @@ -0,0 +1,26 @@ +From 6b4423b258b91032c50a5efca15d3d9bb194ea1d Mon Sep 17 00:00:00 2001 +From: Felix Fietkau <nbd@nbd.name> +Date: Thu, 22 Apr 2021 22:21:01 -0700 +Subject: [PATCH] net: ethernet: mtk_eth_soc: increase DMA ring sizes + +256 descriptors is not enough for multi-gigabit traffic under load on +MT7622. Bump it to 512 to improve performance. + +Signed-off-by: Felix Fietkau <nbd@nbd.name> +Signed-off-by: Ilya Lipnitskiy <ilya.lipnitskiy@gmail.com> +Signed-off-by: David S. Miller <davem@davemloft.net> +--- + drivers/net/ethernet/mediatek/mtk_eth_soc.h | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h ++++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h +@@ -21,7 +21,7 @@ + #define MTK_QDMA_PAGE_SIZE 2048 + #define MTK_MAX_RX_LENGTH 1536 + #define MTK_TX_DMA_BUF_LEN 0x3fff +-#define MTK_DMA_SIZE 256 ++#define MTK_DMA_SIZE 512 + #define MTK_NAPI_WEIGHT 64 + #define MTK_MAC_COUNT 2 + #define MTK_RX_ETH_HLEN (VLAN_ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN) diff --git a/target/linux/generic/backport-5.15/610-v5.13-45-net-ethernet-mtk_eth_soc-implement-dynamic-interrupt.patch b/target/linux/generic/backport-5.15/610-v5.13-45-net-ethernet-mtk_eth_soc-implement-dynamic-interrupt.patch new file mode 100644 index 0000000000..207d8397d8 --- /dev/null +++ b/target/linux/generic/backport-5.15/610-v5.13-45-net-ethernet-mtk_eth_soc-implement-dynamic-interrupt.patch @@ -0,0 +1,313 @@ +From e9229ffd550b2d8c4997c67a501dbc3919fd4e26 Mon Sep 17 00:00:00 2001 +From: Felix Fietkau <nbd@nbd.name> +Date: Thu, 22 Apr 2021 22:21:02 -0700 +Subject: [PATCH] net: ethernet: mtk_eth_soc: implement dynamic interrupt + moderation + +Reduces the number of interrupts under load + +Signed-off-by: Felix Fietkau <nbd@nbd.name> +[Ilya: add documentation for new struct fields] +Signed-off-by: Ilya Lipnitskiy <ilya.lipnitskiy@gmail.com> +Signed-off-by: David S. Miller <davem@davemloft.net> +--- + drivers/net/ethernet/mediatek/Kconfig | 1 + + drivers/net/ethernet/mediatek/mtk_eth_soc.c | 96 +++++++++++++++++++-- + drivers/net/ethernet/mediatek/mtk_eth_soc.h | 41 +++++++-- + 3 files changed, 124 insertions(+), 14 deletions(-) + +--- a/drivers/net/ethernet/mediatek/Kconfig ++++ b/drivers/net/ethernet/mediatek/Kconfig +@@ -10,6 +10,7 @@ if NET_VENDOR_MEDIATEK + config NET_MEDIATEK_SOC + tristate "MediaTek SoC Gigabit Ethernet support" + select PHYLINK ++ select DIMLIB + help + This driver supports the gigabit ethernet MACs in the + MediaTek SoC family. +--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c ++++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c +@@ -1254,12 +1254,13 @@ static void mtk_update_rx_cpu_idx(struct + static int mtk_poll_rx(struct napi_struct *napi, int budget, + struct mtk_eth *eth) + { ++ struct dim_sample dim_sample = {}; + struct mtk_rx_ring *ring; + int idx; + struct sk_buff *skb; + u8 *data, *new_data; + struct mtk_rx_dma *rxd, trxd; +- int done = 0; ++ int done = 0, bytes = 0; + + while (done < budget) { + struct net_device *netdev; +@@ -1333,6 +1334,7 @@ static int mtk_poll_rx(struct napi_struc + else + skb_checksum_none_assert(skb); + skb->protocol = eth_type_trans(skb, netdev); ++ bytes += pktlen; + + if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX && + (trxd.rxd2 & RX_DMA_VTAG)) +@@ -1365,6 +1367,12 @@ rx_done: + mtk_update_rx_cpu_idx(eth); + } + ++ eth->rx_packets += done; ++ eth->rx_bytes += bytes; ++ dim_update_sample(eth->rx_events, eth->rx_packets, eth->rx_bytes, ++ &dim_sample); ++ net_dim(ð->rx_dim, dim_sample); ++ + return done; + } + +@@ -1457,6 +1465,7 @@ static int mtk_poll_tx_pdma(struct mtk_e + static int mtk_poll_tx(struct mtk_eth *eth, int budget) + { + struct mtk_tx_ring *ring = ð->tx_ring; ++ struct dim_sample dim_sample = {}; + unsigned int done[MTK_MAX_DEVS]; + unsigned int bytes[MTK_MAX_DEVS]; + int total = 0, i; +@@ -1474,8 +1483,14 @@ static int mtk_poll_tx(struct mtk_eth *e + continue; + netdev_completed_queue(eth->netdev[i], done[i], bytes[i]); + total += done[i]; ++ eth->tx_packets += done[i]; ++ eth->tx_bytes += bytes[i]; + } + ++ dim_update_sample(eth->tx_events, eth->tx_packets, eth->tx_bytes, ++ &dim_sample); ++ net_dim(ð->tx_dim, dim_sample); ++ + if (mtk_queue_stopped(eth) && + (atomic_read(&ring->free_count) > ring->thresh)) + mtk_wake_queue(eth); +@@ -2150,6 +2165,7 @@ static irqreturn_t mtk_handle_irq_rx(int + { + struct mtk_eth *eth = _eth; + ++ eth->rx_events++; + if (likely(napi_schedule_prep(ð->rx_napi))) { + __napi_schedule(ð->rx_napi); + mtk_rx_irq_disable(eth, MTK_RX_DONE_INT); +@@ -2162,6 +2178,7 @@ static irqreturn_t mtk_handle_irq_tx(int + { + struct mtk_eth *eth = _eth; + ++ eth->tx_events++; + if (likely(napi_schedule_prep(ð->tx_napi))) { + __napi_schedule(ð->tx_napi); + mtk_tx_irq_disable(eth, MTK_TX_DONE_INT); +@@ -2346,6 +2363,9 @@ static int mtk_stop(struct net_device *d + napi_disable(ð->tx_napi); + napi_disable(ð->rx_napi); + ++ cancel_work_sync(ð->rx_dim.work); ++ cancel_work_sync(ð->tx_dim.work); ++ + if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) + mtk_stop_dma(eth, MTK_QDMA_GLO_CFG); + mtk_stop_dma(eth, MTK_PDMA_GLO_CFG); +@@ -2398,6 +2418,64 @@ err_disable_clks: + return ret; + } + ++static void mtk_dim_rx(struct work_struct *work) ++{ ++ struct dim *dim = container_of(work, struct dim, work); ++ struct mtk_eth *eth = container_of(dim, struct mtk_eth, rx_dim); ++ struct dim_cq_moder cur_profile; ++ u32 val, cur; ++ ++ cur_profile = net_dim_get_rx_moderation(eth->rx_dim.mode, ++ dim->profile_ix); ++ spin_lock_bh(ð->dim_lock); ++ ++ val = mtk_r32(eth, MTK_PDMA_DELAY_INT); ++ val &= MTK_PDMA_DELAY_TX_MASK; ++ val |= MTK_PDMA_DELAY_RX_EN; ++ ++ cur = min_t(u32, DIV_ROUND_UP(cur_profile.usec, 20), MTK_PDMA_DELAY_PTIME_MASK); ++ val |= cur << MTK_PDMA_DELAY_RX_PTIME_SHIFT; ++ ++ cur = min_t(u32, cur_profile.pkts, MTK_PDMA_DELAY_PINT_MASK); ++ val |= cur << MTK_PDMA_DELAY_RX_PINT_SHIFT; ++ ++ mtk_w32(eth, val, MTK_PDMA_DELAY_INT); ++ mtk_w32(eth, val, MTK_QDMA_DELAY_INT); ++ ++ spin_unlock_bh(ð->dim_lock); ++ ++ dim->state = DIM_START_MEASURE; ++} ++ ++static void mtk_dim_tx(struct work_struct *work) ++{ ++ struct dim *dim = container_of(work, struct dim, work); ++ struct mtk_eth *eth = container_of(dim, struct mtk_eth, tx_dim); ++ struct dim_cq_moder cur_profile; ++ u32 val, cur; ++ ++ cur_profile = net_dim_get_tx_moderation(eth->tx_dim.mode, ++ dim->profile_ix); ++ spin_lock_bh(ð->dim_lock); ++ ++ val = mtk_r32(eth, MTK_PDMA_DELAY_INT); ++ val &= MTK_PDMA_DELAY_RX_MASK; ++ val |= MTK_PDMA_DELAY_TX_EN; ++ ++ cur = min_t(u32, DIV_ROUND_UP(cur_profile.usec, 20), MTK_PDMA_DELAY_PTIME_MASK); ++ val |= cur << MTK_PDMA_DELAY_TX_PTIME_SHIFT; ++ ++ cur = min_t(u32, cur_profile.pkts, MTK_PDMA_DELAY_PINT_MASK); ++ val |= cur << MTK_PDMA_DELAY_TX_PINT_SHIFT; ++ ++ mtk_w32(eth, val, MTK_PDMA_DELAY_INT); ++ mtk_w32(eth, val, MTK_QDMA_DELAY_INT); ++ ++ spin_unlock_bh(ð->dim_lock); ++ ++ dim->state = DIM_START_MEASURE; ++} ++ + static int mtk_hw_init(struct mtk_eth *eth) + { + int i, val, ret; +@@ -2419,9 +2497,6 @@ static int mtk_hw_init(struct mtk_eth *e + goto err_disable_pm; + } + +- /* enable interrupt delay for RX */ +- mtk_w32(eth, MTK_PDMA_DELAY_RX_DELAY, MTK_PDMA_DELAY_INT); +- + /* disable delay and normal interrupt */ + mtk_tx_irq_disable(eth, ~0); + mtk_rx_irq_disable(eth, ~0); +@@ -2460,11 +2535,11 @@ static int mtk_hw_init(struct mtk_eth *e + /* Enable RX VLan Offloading */ + mtk_w32(eth, 1, MTK_CDMP_EG_CTRL); + +- /* enable interrupt delay for RX */ +- mtk_w32(eth, MTK_PDMA_DELAY_RX_DELAY, MTK_PDMA_DELAY_INT); ++ /* set interrupt delays based on current Net DIM sample */ ++ mtk_dim_rx(ð->rx_dim.work); ++ mtk_dim_tx(ð->tx_dim.work); + + /* disable delay and normal interrupt */ +- mtk_w32(eth, 0, MTK_QDMA_DELAY_INT); + mtk_tx_irq_disable(eth, ~0); + mtk_rx_irq_disable(eth, ~0); + +@@ -2969,6 +3044,13 @@ static int mtk_probe(struct platform_dev + spin_lock_init(ð->page_lock); + spin_lock_init(ð->tx_irq_lock); + spin_lock_init(ð->rx_irq_lock); ++ spin_lock_init(ð->dim_lock); ++ ++ eth->rx_dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE; ++ INIT_WORK(ð->rx_dim.work, mtk_dim_rx); ++ ++ eth->tx_dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE; ++ INIT_WORK(ð->tx_dim.work, mtk_dim_tx); + + if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) { + eth->ethsys = syscon_regmap_lookup_by_phandle(pdev->dev.of_node, +--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h ++++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h +@@ -16,6 +16,7 @@ + #include <linux/refcount.h> + #include <linux/phylink.h> + #include <linux/rhashtable.h> ++#include <linux/dim.h> + #include "mtk_ppe.h" + + #define MTK_QDMA_PAGE_SIZE 2048 +@@ -136,13 +137,18 @@ + + /* PDMA Delay Interrupt Register */ + #define MTK_PDMA_DELAY_INT 0xa0c ++#define MTK_PDMA_DELAY_RX_MASK GENMASK(15, 0) + #define MTK_PDMA_DELAY_RX_EN BIT(15) +-#define MTK_PDMA_DELAY_RX_PINT 4 + #define MTK_PDMA_DELAY_RX_PINT_SHIFT 8 +-#define MTK_PDMA_DELAY_RX_PTIME 4 +-#define MTK_PDMA_DELAY_RX_DELAY \ +- (MTK_PDMA_DELAY_RX_EN | MTK_PDMA_DELAY_RX_PTIME | \ +- (MTK_PDMA_DELAY_RX_PINT << MTK_PDMA_DELAY_RX_PINT_SHIFT)) ++#define MTK_PDMA_DELAY_RX_PTIME_SHIFT 0 ++ ++#define MTK_PDMA_DELAY_TX_MASK GENMASK(31, 16) ++#define MTK_PDMA_DELAY_TX_EN BIT(31) ++#define MTK_PDMA_DELAY_TX_PINT_SHIFT 24 ++#define MTK_PDMA_DELAY_TX_PTIME_SHIFT 16 ++ ++#define MTK_PDMA_DELAY_PINT_MASK 0x7f ++#define MTK_PDMA_DELAY_PTIME_MASK 0xff + + /* PDMA Interrupt Status Register */ + #define MTK_PDMA_INT_STATUS 0xa20 +@@ -224,6 +230,7 @@ + /* QDMA Interrupt Status Register */ + #define MTK_QDMA_INT_STATUS 0x1A18 + #define MTK_RX_DONE_DLY BIT(30) ++#define MTK_TX_DONE_DLY BIT(28) + #define MTK_RX_DONE_INT3 BIT(19) + #define MTK_RX_DONE_INT2 BIT(18) + #define MTK_RX_DONE_INT1 BIT(17) +@@ -233,8 +240,7 @@ + #define MTK_TX_DONE_INT1 BIT(1) + #define MTK_TX_DONE_INT0 BIT(0) + #define MTK_RX_DONE_INT MTK_RX_DONE_DLY +-#define MTK_TX_DONE_INT (MTK_TX_DONE_INT0 | MTK_TX_DONE_INT1 | \ +- MTK_TX_DONE_INT2 | MTK_TX_DONE_INT3) ++#define MTK_TX_DONE_INT MTK_TX_DONE_DLY + + /* QDMA Interrupt grouping registers */ + #define MTK_QDMA_INT_GRP1 0x1a20 +@@ -863,6 +869,7 @@ struct mtk_sgmii { + * @page_lock: Make sure that register operations are atomic + * @tx_irq__lock: Make sure that IRQ register operations are atomic + * @rx_irq__lock: Make sure that IRQ register operations are atomic ++ * @dim_lock: Make sure that Net DIM operations are atomic + * @dummy_dev: we run 2 netdevs on 1 physical DMA ring and need a + * dummy for NAPI to work + * @netdev: The netdev instances +@@ -881,6 +888,14 @@ struct mtk_sgmii { + * @rx_ring_qdma: Pointer to the memory holding info about the QDMA RX ring + * @tx_napi: The TX NAPI struct + * @rx_napi: The RX NAPI struct ++ * @rx_events: Net DIM RX event counter ++ * @rx_packets: Net DIM RX packet counter ++ * @rx_bytes: Net DIM RX byte counter ++ * @rx_dim: Net DIM RX context ++ * @tx_events: Net DIM TX event counter ++ * @tx_packets: Net DIM TX packet counter ++ * @tx_bytes: Net DIM TX byte counter ++ * @tx_dim: Net DIM TX context + * @scratch_ring: Newer SoCs need memory for a second HW managed TX ring + * @phy_scratch_ring: physical address of scratch_ring + * @scratch_head: The scratch memory that scratch_ring points to. +@@ -925,6 +940,18 @@ struct mtk_eth { + + const struct mtk_soc_data *soc; + ++ spinlock_t dim_lock; ++ ++ u32 rx_events; ++ u32 rx_packets; ++ u32 rx_bytes; ++ struct dim rx_dim; ++ ++ u32 tx_events; ++ u32 tx_packets; ++ u32 tx_bytes; ++ struct dim tx_dim; ++ + u32 tx_int_mask_reg; + u32 tx_int_status_reg; + u32 rx_dma_l4_valid; diff --git a/target/linux/generic/backport-5.15/610-v5.13-46-net-ethernet-mtk_eth_soc-cache-HW-pointer-of-last-fr.patch b/target/linux/generic/backport-5.15/610-v5.13-46-net-ethernet-mtk_eth_soc-cache-HW-pointer-of-last-fr.patch new file mode 100644 index 0000000000..c2252ed728 --- /dev/null +++ b/target/linux/generic/backport-5.15/610-v5.13-46-net-ethernet-mtk_eth_soc-cache-HW-pointer-of-last-fr.patch @@ -0,0 +1,73 @@ +From 4e6bf609569c59b6bd6acf4a607c096cbd820d79 Mon Sep 17 00:00:00 2001 +From: Felix Fietkau <nbd@nbd.name> +Date: Thu, 22 Apr 2021 22:21:03 -0700 +Subject: [PATCH] net: ethernet: mtk_eth_soc: cache HW pointer of last freed TX + descriptor + +The value is only updated by the CPU, so it is cheaper to access from the +ring data structure than from a hardware register. + +Signed-off-by: Felix Fietkau <nbd@nbd.name> +Signed-off-by: Ilya Lipnitskiy <ilya.lipnitskiy@gmail.com> +Signed-off-by: David S. Miller <davem@davemloft.net> +--- + drivers/net/ethernet/mediatek/mtk_eth_soc.c | 8 ++++---- + drivers/net/ethernet/mediatek/mtk_eth_soc.h | 2 ++ + 2 files changed, 6 insertions(+), 4 deletions(-) + +--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c ++++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c +@@ -1385,7 +1385,7 @@ static int mtk_poll_tx_qdma(struct mtk_e + struct mtk_tx_buf *tx_buf; + u32 cpu, dma; + +- cpu = mtk_r32(eth, MTK_QTX_CRX_PTR); ++ cpu = ring->last_free_ptr; + dma = mtk_r32(eth, MTK_QTX_DRX_PTR); + + desc = mtk_qdma_phys_to_virt(ring, cpu); +@@ -1419,6 +1419,7 @@ static int mtk_poll_tx_qdma(struct mtk_e + cpu = next_cpu; + } + ++ ring->last_free_ptr = cpu; + mtk_w32(eth, cpu, MTK_QTX_CRX_PTR); + + return budget; +@@ -1619,6 +1620,7 @@ static int mtk_tx_alloc(struct mtk_eth * + atomic_set(&ring->free_count, MTK_DMA_SIZE - 2); + ring->next_free = &ring->dma[0]; + ring->last_free = &ring->dma[MTK_DMA_SIZE - 1]; ++ ring->last_free_ptr = (u32)(ring->phys + ((MTK_DMA_SIZE - 1) * sz)); + ring->thresh = MAX_SKB_FRAGS; + + /* make sure that all changes to the dma ring are flushed before we +@@ -1632,9 +1634,7 @@ static int mtk_tx_alloc(struct mtk_eth * + mtk_w32(eth, + ring->phys + ((MTK_DMA_SIZE - 1) * sz), + MTK_QTX_CRX_PTR); +- mtk_w32(eth, +- ring->phys + ((MTK_DMA_SIZE - 1) * sz), +- MTK_QTX_DRX_PTR); ++ mtk_w32(eth, ring->last_free_ptr, MTK_QTX_DRX_PTR); + mtk_w32(eth, (QDMA_RES_THRES << 8) | QDMA_RES_THRES, + MTK_QTX_CFG(0)); + } else { +--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h ++++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h +@@ -656,6 +656,7 @@ struct mtk_tx_buf { + * @phys: The physical addr of tx_buf + * @next_free: Pointer to the next free descriptor + * @last_free: Pointer to the last free descriptor ++ * @last_free_ptr: Hardware pointer value of the last free descriptor + * @thresh: The threshold of minimum amount of free descriptors + * @free_count: QDMA uses a linked list. Track how many free descriptors + * are present +@@ -666,6 +667,7 @@ struct mtk_tx_ring { + dma_addr_t phys; + struct mtk_tx_dma *next_free; + struct mtk_tx_dma *last_free; ++ u32 last_free_ptr; + u16 thresh; + atomic_t free_count; + int dma_size; diff --git a/target/linux/generic/backport-5.15/610-v5.13-47-net-ethernet-mtk_eth_soc-only-read-the-full-RX-descr.patch b/target/linux/generic/backport-5.15/610-v5.13-47-net-ethernet-mtk_eth_soc-only-read-the-full-RX-descr.patch new file mode 100644 index 0000000000..09b8b3afb9 --- /dev/null +++ b/target/linux/generic/backport-5.15/610-v5.13-47-net-ethernet-mtk_eth_soc-only-read-the-full-RX-descr.patch @@ -0,0 +1,49 @@ +From 816ac3e6e67bdd78d86226c6eb53619780750e92 Mon Sep 17 00:00:00 2001 +From: Felix Fietkau <nbd@nbd.name> +Date: Thu, 22 Apr 2021 22:21:04 -0700 +Subject: [PATCH] net: ethernet: mtk_eth_soc: only read the full RX descriptor + if DMA is done + +Uncached memory access is expensive, and there is no need to access all +descriptor words if we can't process them anyway + +Signed-off-by: Felix Fietkau <nbd@nbd.name> +Signed-off-by: Ilya Lipnitskiy <ilya.lipnitskiy@gmail.com> +Signed-off-by: David S. Miller <davem@davemloft.net> +--- + drivers/net/ethernet/mediatek/mtk_eth_soc.c | 12 ++++++++---- + 1 file changed, 8 insertions(+), 4 deletions(-) + +--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c ++++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c +@@ -798,13 +798,18 @@ static inline int mtk_max_buf_size(int f + return buf_size; + } + +-static inline void mtk_rx_get_desc(struct mtk_rx_dma *rxd, ++static inline bool mtk_rx_get_desc(struct mtk_rx_dma *rxd, + struct mtk_rx_dma *dma_rxd) + { +- rxd->rxd1 = READ_ONCE(dma_rxd->rxd1); + rxd->rxd2 = READ_ONCE(dma_rxd->rxd2); ++ if (!(rxd->rxd2 & RX_DMA_DONE)) ++ return false; ++ ++ rxd->rxd1 = READ_ONCE(dma_rxd->rxd1); + rxd->rxd3 = READ_ONCE(dma_rxd->rxd3); + rxd->rxd4 = READ_ONCE(dma_rxd->rxd4); ++ ++ return true; + } + + /* the qdma core needs scratch memory to be setup */ +@@ -1276,8 +1281,7 @@ static int mtk_poll_rx(struct napi_struc + rxd = &ring->dma[idx]; + data = ring->data[idx]; + +- mtk_rx_get_desc(&trxd, rxd); +- if (!(trxd.rxd2 & RX_DMA_DONE)) ++ if (!mtk_rx_get_desc(&trxd, rxd)) + break; + + /* find out which mac the packet come from. values start at 1 */ diff --git a/target/linux/generic/backport-5.15/610-v5.13-48-net-ethernet-mtk_eth_soc-reduce-unnecessary-interrup.patch b/target/linux/generic/backport-5.15/610-v5.13-48-net-ethernet-mtk_eth_soc-reduce-unnecessary-interrup.patch new file mode 100644 index 0000000000..411374c2e7 --- /dev/null +++ b/target/linux/generic/backport-5.15/610-v5.13-48-net-ethernet-mtk_eth_soc-reduce-unnecessary-interrup.patch @@ -0,0 +1,39 @@ +From 16769a8923fad5a5377253bcd76b0e0d64976c73 Mon Sep 17 00:00:00 2001 +From: Felix Fietkau <nbd@nbd.name> +Date: Thu, 22 Apr 2021 22:21:05 -0700 +Subject: [PATCH] net: ethernet: mtk_eth_soc: reduce unnecessary interrupts + +Avoid rearming interrupt if napi_complete returns false + +Signed-off-by: Felix Fietkau <nbd@nbd.name> +Signed-off-by: Ilya Lipnitskiy <ilya.lipnitskiy@gmail.com> +Signed-off-by: David S. Miller <davem@davemloft.net> +--- + drivers/net/ethernet/mediatek/mtk_eth_soc.c | 9 +++++---- + 1 file changed, 5 insertions(+), 4 deletions(-) + +--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c ++++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c +@@ -1540,8 +1540,8 @@ static int mtk_napi_tx(struct napi_struc + if (status & MTK_TX_DONE_INT) + return budget; + +- napi_complete(napi); +- mtk_tx_irq_enable(eth, MTK_TX_DONE_INT); ++ if (napi_complete(napi)) ++ mtk_tx_irq_enable(eth, MTK_TX_DONE_INT); + + return tx_done; + } +@@ -1574,8 +1574,9 @@ poll_again: + remain_budget -= rx_done; + goto poll_again; + } +- napi_complete(napi); +- mtk_rx_irq_enable(eth, MTK_RX_DONE_INT); ++ ++ if (napi_complete(napi)) ++ mtk_rx_irq_enable(eth, MTK_RX_DONE_INT); + + return rx_done + budget - remain_budget; + } diff --git a/target/linux/generic/backport-5.15/610-v5.13-49-net-ethernet-mtk_eth_soc-rework-NAPI-callbacks.patch b/target/linux/generic/backport-5.15/610-v5.13-49-net-ethernet-mtk_eth_soc-rework-NAPI-callbacks.patch new file mode 100644 index 0000000000..d0926325d9 --- /dev/null +++ b/target/linux/generic/backport-5.15/610-v5.13-49-net-ethernet-mtk_eth_soc-rework-NAPI-callbacks.patch @@ -0,0 +1,110 @@ +From db2c7b353db3b3f71b55f9ff4627d8a786446fbe Mon Sep 17 00:00:00 2001 +From: Ilya Lipnitskiy <ilya.lipnitskiy@gmail.com> +Date: Thu, 22 Apr 2021 22:21:06 -0700 +Subject: [PATCH] net: ethernet: mtk_eth_soc: rework NAPI callbacks + +Use napi_complete_done to communicate total TX and RX work done to NAPI. +Count total RX work up instead of remaining work down for clarity. +Remove unneeded local variables for clarity. Use do {} while instead of +goto for clarity. + +Suggested-by: Jakub Kicinski <kuba@kernel.org> +Signed-off-by: Ilya Lipnitskiy <ilya.lipnitskiy@gmail.com> +Signed-off-by: David S. Miller <davem@davemloft.net> +--- + drivers/net/ethernet/mediatek/mtk_eth_soc.c | 54 +++++++++------------ + 1 file changed, 24 insertions(+), 30 deletions(-) + +--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c ++++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c +@@ -1517,7 +1517,6 @@ static void mtk_handle_status_irq(struct + static int mtk_napi_tx(struct napi_struct *napi, int budget) + { + struct mtk_eth *eth = container_of(napi, struct mtk_eth, tx_napi); +- u32 status, mask; + int tx_done = 0; + + if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) +@@ -1526,21 +1525,19 @@ static int mtk_napi_tx(struct napi_struc + tx_done = mtk_poll_tx(eth, budget); + + if (unlikely(netif_msg_intr(eth))) { +- status = mtk_r32(eth, eth->tx_int_status_reg); +- mask = mtk_r32(eth, eth->tx_int_mask_reg); + dev_info(eth->dev, +- "done tx %d, intr 0x%08x/0x%x\n", +- tx_done, status, mask); ++ "done tx %d, intr 0x%08x/0x%x\n", tx_done, ++ mtk_r32(eth, eth->tx_int_status_reg), ++ mtk_r32(eth, eth->tx_int_mask_reg)); + } + + if (tx_done == budget) + return budget; + +- status = mtk_r32(eth, eth->tx_int_status_reg); +- if (status & MTK_TX_DONE_INT) ++ if (mtk_r32(eth, eth->tx_int_status_reg) & MTK_TX_DONE_INT) + return budget; + +- if (napi_complete(napi)) ++ if (napi_complete_done(napi, tx_done)) + mtk_tx_irq_enable(eth, MTK_TX_DONE_INT); + + return tx_done; +@@ -1549,36 +1546,33 @@ static int mtk_napi_tx(struct napi_struc + static int mtk_napi_rx(struct napi_struct *napi, int budget) + { + struct mtk_eth *eth = container_of(napi, struct mtk_eth, rx_napi); +- u32 status, mask; +- int rx_done = 0; +- int remain_budget = budget; ++ int rx_done_total = 0; + + mtk_handle_status_irq(eth); + +-poll_again: +- mtk_w32(eth, MTK_RX_DONE_INT, MTK_PDMA_INT_STATUS); +- rx_done = mtk_poll_rx(napi, remain_budget, eth); ++ do { ++ int rx_done; + +- if (unlikely(netif_msg_intr(eth))) { +- status = mtk_r32(eth, MTK_PDMA_INT_STATUS); +- mask = mtk_r32(eth, MTK_PDMA_INT_MASK); +- dev_info(eth->dev, +- "done rx %d, intr 0x%08x/0x%x\n", +- rx_done, status, mask); +- } +- if (rx_done == remain_budget) +- return budget; ++ mtk_w32(eth, MTK_RX_DONE_INT, MTK_PDMA_INT_STATUS); ++ rx_done = mtk_poll_rx(napi, budget - rx_done_total, eth); ++ rx_done_total += rx_done; ++ ++ if (unlikely(netif_msg_intr(eth))) { ++ dev_info(eth->dev, ++ "done rx %d, intr 0x%08x/0x%x\n", rx_done, ++ mtk_r32(eth, MTK_PDMA_INT_STATUS), ++ mtk_r32(eth, MTK_PDMA_INT_MASK)); ++ } + +- status = mtk_r32(eth, MTK_PDMA_INT_STATUS); +- if (status & MTK_RX_DONE_INT) { +- remain_budget -= rx_done; +- goto poll_again; +- } ++ if (rx_done_total == budget) ++ return budget; ++ ++ } while (mtk_r32(eth, MTK_PDMA_INT_STATUS) & MTK_RX_DONE_INT); + +- if (napi_complete(napi)) ++ if (napi_complete_done(napi, rx_done_total)) + mtk_rx_irq_enable(eth, MTK_RX_DONE_INT); + +- return rx_done + budget - remain_budget; ++ return rx_done_total; + } + + static int mtk_tx_alloc(struct mtk_eth *eth) diff --git a/target/linux/generic/backport-5.15/610-v5.13-50-net-ethernet-mtk_eth_soc-set-PPE-flow-hash-as-skb-ha.patch b/target/linux/generic/backport-5.15/610-v5.13-50-net-ethernet-mtk_eth_soc-set-PPE-flow-hash-as-skb-ha.patch new file mode 100644 index 0000000000..ab298ec42b --- /dev/null +++ b/target/linux/generic/backport-5.15/610-v5.13-50-net-ethernet-mtk_eth_soc-set-PPE-flow-hash-as-skb-ha.patch @@ -0,0 +1,47 @@ +From fa817272c37ef78e25dc14e4760ac78a7043a18a Mon Sep 17 00:00:00 2001 +From: Felix Fietkau <nbd@nbd.name> +Date: Thu, 22 Apr 2021 22:21:07 -0700 +Subject: [PATCH] net: ethernet: mtk_eth_soc: set PPE flow hash as skb hash if + present + +This improves GRO performance + +Signed-off-by: Felix Fietkau <nbd@nbd.name> +[Ilya: Use MTK_RXD4_FOE_ENTRY instead of GENMASK(13, 0)] +Signed-off-by: Ilya Lipnitskiy <ilya.lipnitskiy@gmail.com> +Signed-off-by: David S. Miller <davem@davemloft.net> +--- + drivers/net/ethernet/mediatek/mtk_eth_soc.c | 8 ++++++++ + 1 file changed, 8 insertions(+) + +--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c ++++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c +@@ -19,6 +19,7 @@ + #include <linux/interrupt.h> + #include <linux/pinctrl/devinfo.h> + #include <linux/phylink.h> ++#include <linux/jhash.h> + #include <net/dsa.h> + + #include "mtk_eth_soc.h" +@@ -1271,6 +1272,7 @@ static int mtk_poll_rx(struct napi_struc + struct net_device *netdev; + unsigned int pktlen; + dma_addr_t dma_addr; ++ u32 hash; + int mac; + + ring = mtk_get_rx_ring(eth); +@@ -1340,6 +1342,12 @@ static int mtk_poll_rx(struct napi_struc + skb->protocol = eth_type_trans(skb, netdev); + bytes += pktlen; + ++ hash = trxd.rxd4 & MTK_RXD4_FOE_ENTRY; ++ if (hash != MTK_RXD4_FOE_ENTRY) { ++ hash = jhash_1word(hash, 0); ++ skb_set_hash(skb, hash, PKT_HASH_TYPE_L4); ++ } ++ + if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX && + (trxd.rxd2 & RX_DMA_VTAG)) + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), diff --git a/target/linux/generic/backport-5.15/610-v5.13-51-net-ethernet-mtk_eth_soc-use-iopoll.h-macro-for-DMA-.patch b/target/linux/generic/backport-5.15/610-v5.13-51-net-ethernet-mtk_eth_soc-use-iopoll.h-macro-for-DMA-.patch new file mode 100644 index 0000000000..26bb33e42c --- /dev/null +++ b/target/linux/generic/backport-5.15/610-v5.13-51-net-ethernet-mtk_eth_soc-use-iopoll.h-macro-for-DMA-.patch @@ -0,0 +1,71 @@ +From 3bc8e0aff23be0526af0dbc7973a8866a08d73f1 Mon Sep 17 00:00:00 2001 +From: Ilya Lipnitskiy <ilya.lipnitskiy@gmail.com> +Date: Thu, 22 Apr 2021 22:21:08 -0700 +Subject: [PATCH] net: ethernet: mtk_eth_soc: use iopoll.h macro for DMA init + +Replace a tight busy-wait loop without a pause with a standard +readx_poll_timeout_atomic routine with a 5 us poll period. + +Tested by booting a MT7621 device to ensure the driver initializes +properly. + +Signed-off-by: Ilya Lipnitskiy <ilya.lipnitskiy@gmail.com> +Reviewed-by: Andrew Lunn <andrew@lunn.ch> +Signed-off-by: David S. Miller <davem@davemloft.net> +--- + drivers/net/ethernet/mediatek/mtk_eth_soc.c | 29 +++++++++------------ + drivers/net/ethernet/mediatek/mtk_eth_soc.h | 2 +- + 2 files changed, 14 insertions(+), 17 deletions(-) + +--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c ++++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c +@@ -2054,25 +2054,22 @@ static int mtk_set_features(struct net_d + /* wait for DMA to finish whatever it is doing before we start using it again */ + static int mtk_dma_busy_wait(struct mtk_eth *eth) + { +- unsigned long t_start = jiffies; ++ unsigned int reg; ++ int ret; ++ u32 val; + +- while (1) { +- if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) { +- if (!(mtk_r32(eth, MTK_QDMA_GLO_CFG) & +- (MTK_RX_DMA_BUSY | MTK_TX_DMA_BUSY))) +- return 0; +- } else { +- if (!(mtk_r32(eth, MTK_PDMA_GLO_CFG) & +- (MTK_RX_DMA_BUSY | MTK_TX_DMA_BUSY))) +- return 0; +- } ++ if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) ++ reg = MTK_QDMA_GLO_CFG; ++ else ++ reg = MTK_PDMA_GLO_CFG; + +- if (time_after(jiffies, t_start + MTK_DMA_BUSY_TIMEOUT)) +- break; +- } ++ ret = readx_poll_timeout_atomic(__raw_readl, eth->base + reg, val, ++ !(val & (MTK_RX_DMA_BUSY | MTK_TX_DMA_BUSY)), ++ 5, MTK_DMA_BUSY_TIMEOUT_US); ++ if (ret) ++ dev_err(eth->dev, "DMA init timeout\n"); + +- dev_err(eth->dev, "DMA init timeout\n"); +- return -1; ++ return ret; + } + + static int mtk_dma_init(struct mtk_eth *eth) +--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h ++++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h +@@ -213,7 +213,7 @@ + #define MTK_TX_DMA_BUSY BIT(1) + #define MTK_RX_DMA_EN BIT(2) + #define MTK_TX_DMA_EN BIT(0) +-#define MTK_DMA_BUSY_TIMEOUT HZ ++#define MTK_DMA_BUSY_TIMEOUT_US 1000000 + + /* QDMA Reset Index Register */ + #define MTK_QDMA_RST_IDX 0x1A08 diff --git a/target/linux/generic/backport-5.15/610-v5.13-52-net-ethernet-mtk_eth_soc-missing-mutex.patch b/target/linux/generic/backport-5.15/610-v5.13-52-net-ethernet-mtk_eth_soc-missing-mutex.patch new file mode 100644 index 0000000000..a846ce43e2 --- /dev/null +++ b/target/linux/generic/backport-5.15/610-v5.13-52-net-ethernet-mtk_eth_soc-missing-mutex.patch @@ -0,0 +1,63 @@ +From: Pablo Neira Ayuso <pablo@netfilter.org> +Date: Sun, 18 Apr 2021 23:11:44 +0200 +Subject: [PATCH] net: ethernet: mtk_eth_soc: missing mutex + +Patch 2ed37183abb7 ("netfilter: flowtable: separate replace, destroy and +stats to different workqueues") splits the workqueue per event type. Add +a mutex to serialize updates. + +Fixes: 502e84e2382d ("net: ethernet: mtk_eth_soc: add flow offloading support") +Reported-by: Frank Wunderlich <frank-w@public-files.de> +Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org> +Signed-off-by: David S. Miller <davem@davemloft.net> +--- + +--- a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c ++++ b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c +@@ -392,6 +392,8 @@ mtk_flow_offload_stats(struct mtk_eth *e + return 0; + } + ++static DEFINE_MUTEX(mtk_flow_offload_mutex); ++ + static int + mtk_eth_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv) + { +@@ -399,6 +401,7 @@ mtk_eth_setup_tc_block_cb(enum tc_setup_ + struct net_device *dev = cb_priv; + struct mtk_mac *mac = netdev_priv(dev); + struct mtk_eth *eth = mac->hw; ++ int err; + + if (!tc_can_offload(dev)) + return -EOPNOTSUPP; +@@ -406,18 +409,24 @@ mtk_eth_setup_tc_block_cb(enum tc_setup_ + if (type != TC_SETUP_CLSFLOWER) + return -EOPNOTSUPP; + ++ mutex_lock(&mtk_flow_offload_mutex); + switch (cls->command) { + case FLOW_CLS_REPLACE: +- return mtk_flow_offload_replace(eth, cls); ++ err = mtk_flow_offload_replace(eth, cls); ++ break; + case FLOW_CLS_DESTROY: +- return mtk_flow_offload_destroy(eth, cls); ++ err = mtk_flow_offload_destroy(eth, cls); ++ break; + case FLOW_CLS_STATS: +- return mtk_flow_offload_stats(eth, cls); ++ err = mtk_flow_offload_stats(eth, cls); ++ break; + default: +- return -EOPNOTSUPP; ++ err = -EOPNOTSUPP; ++ break; + } ++ mutex_unlock(&mtk_flow_offload_mutex); + +- return 0; ++ return err; + } + + static int diff --git a/target/linux/generic/backport-5.15/610-v5.13-53-net-ethernet-mtk_eth_soc-handle-VLAN-pop-action.patch b/target/linux/generic/backport-5.15/610-v5.13-53-net-ethernet-mtk_eth_soc-handle-VLAN-pop-action.patch new file mode 100644 index 0000000000..806fd0dcdf --- /dev/null +++ b/target/linux/generic/backport-5.15/610-v5.13-53-net-ethernet-mtk_eth_soc-handle-VLAN-pop-action.patch @@ -0,0 +1,22 @@ +From: Pablo Neira Ayuso <pablo@netfilter.org> +Date: Sun, 18 Apr 2021 23:11:45 +0200 +Subject: [PATCH] net: ethernet: mtk_eth_soc: handle VLAN pop action + +Do not hit EOPNOTSUPP when flowtable offload provides a VLAN pop action. + +Fixes: efce49dfe6a8 ("netfilter: flowtable: add vlan pop action offload support") +Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org> +Signed-off-by: David S. Miller <davem@davemloft.net> +--- + +--- a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c ++++ b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c +@@ -233,6 +233,8 @@ mtk_flow_offload_replace(struct mtk_eth + data.vlan.proto = act->vlan.proto; + data.vlan.num++; + break; ++ case FLOW_ACTION_VLAN_POP: ++ break; + case FLOW_ACTION_PPPOE_PUSH: + if (data.pppoe.num == 1) + return -EOPNOTSUPP; diff --git a/target/linux/generic/backport-5.15/610-v5.13-54-netfilter-flowtable-dst_check-from-garbage-collector.patch b/target/linux/generic/backport-5.15/610-v5.13-54-netfilter-flowtable-dst_check-from-garbage-collector.patch new file mode 100644 index 0000000000..42b55f021a --- /dev/null +++ b/target/linux/generic/backport-5.15/610-v5.13-54-netfilter-flowtable-dst_check-from-garbage-collector.patch @@ -0,0 +1,159 @@ +From: Pablo Neira Ayuso <pablo@netfilter.org> +Date: Sun, 28 Mar 2021 23:08:55 +0200 +Subject: [PATCH] netfilter: flowtable: dst_check() from garbage collector path + +Move dst_check() to the garbage collector path. Stale routes trigger the +flow entry teardown state which makes affected flows go back to the +classic forwarding path to re-evaluate flow offloading. + +IPv6 requires the dst cookie to work, store it in the flow_tuple, +otherwise dst_check() always fails. + +Fixes: e5075c0badaa ("netfilter: flowtable: call dst_check() to fall back to classic forwarding") +Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org> +--- + +--- a/include/net/netfilter/nf_flow_table.h ++++ b/include/net/netfilter/nf_flow_table.h +@@ -129,7 +129,10 @@ struct flow_offload_tuple { + in_vlan_ingress:2; + u16 mtu; + union { +- struct dst_entry *dst_cache; ++ struct { ++ struct dst_entry *dst_cache; ++ u32 dst_cookie; ++ }; + struct { + u32 ifidx; + u32 hw_ifidx; +--- a/net/netfilter/nf_flow_table_core.c ++++ b/net/netfilter/nf_flow_table_core.c +@@ -74,6 +74,18 @@ err_ct_refcnt: + } + EXPORT_SYMBOL_GPL(flow_offload_alloc); + ++static u32 flow_offload_dst_cookie(struct flow_offload_tuple *flow_tuple) ++{ ++ const struct rt6_info *rt; ++ ++ if (flow_tuple->l3proto == NFPROTO_IPV6) { ++ rt = (const struct rt6_info *)flow_tuple->dst_cache; ++ return rt6_get_cookie(rt); ++ } ++ ++ return 0; ++} ++ + static int flow_offload_fill_route(struct flow_offload *flow, + const struct nf_flow_route *route, + enum flow_offload_tuple_dir dir) +@@ -116,6 +128,7 @@ static int flow_offload_fill_route(struc + return -1; + + flow_tuple->dst_cache = dst; ++ flow_tuple->dst_cookie = flow_offload_dst_cookie(flow_tuple); + break; + } + flow_tuple->xmit_type = route->tuple[dir].xmit_type; +@@ -389,11 +402,33 @@ nf_flow_table_iterate(struct nf_flowtabl + return err; + } + ++static bool flow_offload_stale_dst(struct flow_offload_tuple *tuple) ++{ ++ struct dst_entry *dst; ++ ++ if (tuple->xmit_type == FLOW_OFFLOAD_XMIT_NEIGH || ++ tuple->xmit_type == FLOW_OFFLOAD_XMIT_XFRM) { ++ dst = tuple->dst_cache; ++ if (!dst_check(dst, tuple->dst_cookie)) ++ return true; ++ } ++ ++ return false; ++} ++ ++static bool nf_flow_has_stale_dst(struct flow_offload *flow) ++{ ++ return flow_offload_stale_dst(&flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple) || ++ flow_offload_stale_dst(&flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple); ++} ++ + static void nf_flow_offload_gc_step(struct flow_offload *flow, void *data) + { + struct nf_flowtable *flow_table = data; + +- if (nf_flow_has_expired(flow) || nf_ct_is_dying(flow->ct)) ++ if (nf_flow_has_expired(flow) || ++ nf_ct_is_dying(flow->ct) || ++ nf_flow_has_stale_dst(flow)) + set_bit(NF_FLOW_TEARDOWN, &flow->flags); + + if (test_bit(NF_FLOW_TEARDOWN, &flow->flags)) { +--- a/net/netfilter/nf_flow_table_ip.c ++++ b/net/netfilter/nf_flow_table_ip.c +@@ -364,15 +364,6 @@ nf_flow_offload_ip_hook(void *priv, stru + if (nf_flow_state_check(flow, iph->protocol, skb, thoff)) + return NF_ACCEPT; + +- if (tuplehash->tuple.xmit_type == FLOW_OFFLOAD_XMIT_NEIGH || +- tuplehash->tuple.xmit_type == FLOW_OFFLOAD_XMIT_XFRM) { +- rt = (struct rtable *)tuplehash->tuple.dst_cache; +- if (!dst_check(&rt->dst, 0)) { +- flow_offload_teardown(flow); +- return NF_ACCEPT; +- } +- } +- + if (skb_try_make_writable(skb, thoff + hdrsize)) + return NF_DROP; + +@@ -391,6 +382,7 @@ nf_flow_offload_ip_hook(void *priv, stru + nf_ct_acct_update(flow->ct, tuplehash->tuple.dir, skb->len); + + if (unlikely(tuplehash->tuple.xmit_type == FLOW_OFFLOAD_XMIT_XFRM)) { ++ rt = (struct rtable *)tuplehash->tuple.dst_cache; + memset(skb->cb, 0, sizeof(struct inet_skb_parm)); + IPCB(skb)->iif = skb->dev->ifindex; + IPCB(skb)->flags = IPSKB_FORWARDED; +@@ -399,6 +391,7 @@ nf_flow_offload_ip_hook(void *priv, stru + + switch (tuplehash->tuple.xmit_type) { + case FLOW_OFFLOAD_XMIT_NEIGH: ++ rt = (struct rtable *)tuplehash->tuple.dst_cache; + outdev = rt->dst.dev; + skb->dev = outdev; + nexthop = rt_nexthop(rt, flow->tuplehash[!dir].tuple.src_v4.s_addr); +@@ -607,15 +600,6 @@ nf_flow_offload_ipv6_hook(void *priv, st + if (nf_flow_state_check(flow, ip6h->nexthdr, skb, thoff)) + return NF_ACCEPT; + +- if (tuplehash->tuple.xmit_type == FLOW_OFFLOAD_XMIT_NEIGH || +- tuplehash->tuple.xmit_type == FLOW_OFFLOAD_XMIT_XFRM) { +- rt = (struct rt6_info *)tuplehash->tuple.dst_cache; +- if (!dst_check(&rt->dst, 0)) { +- flow_offload_teardown(flow); +- return NF_ACCEPT; +- } +- } +- + if (skb_try_make_writable(skb, thoff + hdrsize)) + return NF_DROP; + +@@ -633,6 +617,7 @@ nf_flow_offload_ipv6_hook(void *priv, st + nf_ct_acct_update(flow->ct, tuplehash->tuple.dir, skb->len); + + if (unlikely(tuplehash->tuple.xmit_type == FLOW_OFFLOAD_XMIT_XFRM)) { ++ rt = (struct rt6_info *)tuplehash->tuple.dst_cache; + memset(skb->cb, 0, sizeof(struct inet6_skb_parm)); + IP6CB(skb)->iif = skb->dev->ifindex; + IP6CB(skb)->flags = IP6SKB_FORWARDED; +@@ -641,6 +626,7 @@ nf_flow_offload_ipv6_hook(void *priv, st + + switch (tuplehash->tuple.xmit_type) { + case FLOW_OFFLOAD_XMIT_NEIGH: ++ rt = (struct rt6_info *)tuplehash->tuple.dst_cache; + outdev = rt->dst.dev; + skb->dev = outdev; + nexthop = rt6_nexthop(rt, &flow->tuplehash[!dir].tuple.src_v6); diff --git a/target/linux/generic/backport-5.15/610-v5.13-55-netfilter-conntrack-Introduce-tcp-offload-timeout-co.patch b/target/linux/generic/backport-5.15/610-v5.13-55-netfilter-conntrack-Introduce-tcp-offload-timeout-co.patch new file mode 100644 index 0000000000..e5a346cac4 --- /dev/null +++ b/target/linux/generic/backport-5.15/610-v5.13-55-netfilter-conntrack-Introduce-tcp-offload-timeout-co.patch @@ -0,0 +1,94 @@ +From: Oz Shlomo <ozsh@nvidia.com> +Date: Thu, 3 Jun 2021 15:12:33 +0300 +Subject: [PATCH] netfilter: conntrack: Introduce tcp offload timeout + configuration + +TCP connections may be offloaded from nf conntrack to nf flow table. +Offloaded connections are aged after 30 seconds of inactivity. +Once aged, ownership is returned to conntrack with a hard coded pickup +time of 120 seconds, after which the connection may be deleted. +eted. The current aging intervals may be too aggressive for some users. + +Provide users with the ability to control the nf flow table offload +aging and pickup time intervals via sysctl parameter as a pre-step for +configuring the nf flow table GC timeout intervals. + +Signed-off-by: Oz Shlomo <ozsh@nvidia.com> +Reviewed-by: Paul Blakey <paulb@nvidia.com> +Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org> +--- + +--- a/include/net/netns/conntrack.h ++++ b/include/net/netns/conntrack.h +@@ -27,6 +27,10 @@ struct nf_tcp_net { + int tcp_loose; + int tcp_be_liberal; + int tcp_max_retrans; ++#if IS_ENABLED(CONFIG_NF_FLOW_TABLE) ++ unsigned int offload_timeout; ++ unsigned int offload_pickup; ++#endif + }; + + enum udp_conntrack { +--- a/net/netfilter/nf_conntrack_proto_tcp.c ++++ b/net/netfilter/nf_conntrack_proto_tcp.c +@@ -1438,6 +1438,11 @@ void nf_conntrack_tcp_init_net(struct ne + tn->tcp_loose = nf_ct_tcp_loose; + tn->tcp_be_liberal = nf_ct_tcp_be_liberal; + tn->tcp_max_retrans = nf_ct_tcp_max_retrans; ++ ++#if IS_ENABLED(CONFIG_NF_FLOW_TABLE) ++ tn->offload_timeout = 30 * HZ; ++ tn->offload_pickup = 120 * HZ; ++#endif + } + + const struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp = +--- a/net/netfilter/nf_conntrack_standalone.c ++++ b/net/netfilter/nf_conntrack_standalone.c +@@ -567,6 +567,10 @@ enum nf_ct_sysctl_index { + NF_SYSCTL_CT_PROTO_TIMEOUT_TCP_CLOSE, + NF_SYSCTL_CT_PROTO_TIMEOUT_TCP_RETRANS, + NF_SYSCTL_CT_PROTO_TIMEOUT_TCP_UNACK, ++#if IS_ENABLED(CONFIG_NF_FLOW_TABLE) ++ NF_SYSCTL_CT_PROTO_TIMEOUT_TCP_OFFLOAD, ++ NF_SYSCTL_CT_PROTO_TIMEOUT_TCP_OFFLOAD_PICKUP, ++#endif + NF_SYSCTL_CT_PROTO_TCP_LOOSE, + NF_SYSCTL_CT_PROTO_TCP_LIBERAL, + NF_SYSCTL_CT_PROTO_TCP_MAX_RETRANS, +@@ -758,6 +762,20 @@ static struct ctl_table nf_ct_sysctl_tab + .mode = 0644, + .proc_handler = proc_dointvec_jiffies, + }, ++#if IS_ENABLED(CONFIG_NF_FLOW_TABLE) ++ [NF_SYSCTL_CT_PROTO_TIMEOUT_TCP_OFFLOAD] = { ++ .procname = "nf_flowtable_tcp_timeout", ++ .maxlen = sizeof(unsigned int), ++ .mode = 0644, ++ .proc_handler = proc_dointvec_jiffies, ++ }, ++ [NF_SYSCTL_CT_PROTO_TIMEOUT_TCP_OFFLOAD_PICKUP] = { ++ .procname = "nf_flowtable_tcp_pickup", ++ .maxlen = sizeof(unsigned int), ++ .mode = 0644, ++ .proc_handler = proc_dointvec_jiffies, ++ }, ++#endif + [NF_SYSCTL_CT_PROTO_TCP_LOOSE] = { + .procname = "nf_conntrack_tcp_loose", + .maxlen = sizeof(int), +@@ -967,6 +985,12 @@ static void nf_conntrack_standalone_init + XASSIGN(LIBERAL, &tn->tcp_be_liberal); + XASSIGN(MAX_RETRANS, &tn->tcp_max_retrans); + #undef XASSIGN ++ ++#if IS_ENABLED(CONFIG_NF_FLOW_TABLE) ++ table[NF_SYSCTL_CT_PROTO_TIMEOUT_TCP_OFFLOAD].data = &tn->offload_timeout; ++ table[NF_SYSCTL_CT_PROTO_TIMEOUT_TCP_OFFLOAD_PICKUP].data = &tn->offload_pickup; ++#endif ++ + } + + static void nf_conntrack_standalone_init_sctp_sysctl(struct net *net, diff --git a/target/linux/generic/backport-5.15/610-v5.13-56-netfilter-conntrack-Introduce-udp-offload-timeout-co.patch b/target/linux/generic/backport-5.15/610-v5.13-56-netfilter-conntrack-Introduce-udp-offload-timeout-co.patch new file mode 100644 index 0000000000..416e20356d --- /dev/null +++ b/target/linux/generic/backport-5.15/610-v5.13-56-netfilter-conntrack-Introduce-udp-offload-timeout-co.patch @@ -0,0 +1,92 @@ +From: Oz Shlomo <ozsh@nvidia.com> +Date: Thu, 3 Jun 2021 15:12:34 +0300 +Subject: [PATCH] netfilter: conntrack: Introduce udp offload timeout + configuration + +UDP connections may be offloaded from nf conntrack to nf flow table. +Offloaded connections are aged after 30 seconds of inactivity. +Once aged, ownership is returned to conntrack with a hard coded pickup +time of 30 seconds, after which the connection may be deleted. +eted. The current aging intervals may be too aggressive for some users. + +Provide users with the ability to control the nf flow table offload +aging and pickup time intervals via sysctl parameter as a pre-step for +configuring the nf flow table GC timeout intervals. + +Signed-off-by: Oz Shlomo <ozsh@nvidia.com> +Reviewed-by: Paul Blakey <paulb@nvidia.com> +Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org> +--- + +--- a/include/net/netns/conntrack.h ++++ b/include/net/netns/conntrack.h +@@ -41,6 +41,10 @@ enum udp_conntrack { + + struct nf_udp_net { + unsigned int timeouts[UDP_CT_MAX]; ++#if IS_ENABLED(CONFIG_NF_FLOW_TABLE) ++ unsigned int offload_timeout; ++ unsigned int offload_pickup; ++#endif + }; + + struct nf_icmp_net { +--- a/net/netfilter/nf_conntrack_proto_udp.c ++++ b/net/netfilter/nf_conntrack_proto_udp.c +@@ -273,6 +273,11 @@ void nf_conntrack_udp_init_net(struct ne + + for (i = 0; i < UDP_CT_MAX; i++) + un->timeouts[i] = udp_timeouts[i]; ++ ++#if IS_ENABLED(CONFIG_NF_FLOW_TABLE) ++ un->offload_timeout = 30 * HZ; ++ un->offload_pickup = 30 * HZ; ++#endif + } + + const struct nf_conntrack_l4proto nf_conntrack_l4proto_udp = +--- a/net/netfilter/nf_conntrack_standalone.c ++++ b/net/netfilter/nf_conntrack_standalone.c +@@ -576,6 +576,10 @@ enum nf_ct_sysctl_index { + NF_SYSCTL_CT_PROTO_TCP_MAX_RETRANS, + NF_SYSCTL_CT_PROTO_TIMEOUT_UDP, + NF_SYSCTL_CT_PROTO_TIMEOUT_UDP_STREAM, ++#if IS_ENABLED(CONFIG_NF_FLOW_TABLE) ++ NF_SYSCTL_CT_PROTO_TIMEOUT_UDP_OFFLOAD, ++ NF_SYSCTL_CT_PROTO_TIMEOUT_UDP_OFFLOAD_PICKUP, ++#endif + NF_SYSCTL_CT_PROTO_TIMEOUT_ICMP, + NF_SYSCTL_CT_PROTO_TIMEOUT_ICMPV6, + #ifdef CONFIG_NF_CT_PROTO_SCTP +@@ -810,6 +814,20 @@ static struct ctl_table nf_ct_sysctl_tab + .mode = 0644, + .proc_handler = proc_dointvec_jiffies, + }, ++#if IS_ENABLED(CONFIG_NFT_FLOW_OFFLOAD) ++ [NF_SYSCTL_CT_PROTO_TIMEOUT_UDP_OFFLOAD] = { ++ .procname = "nf_flowtable_udp_timeout", ++ .maxlen = sizeof(unsigned int), ++ .mode = 0644, ++ .proc_handler = proc_dointvec_jiffies, ++ }, ++ [NF_SYSCTL_CT_PROTO_TIMEOUT_UDP_OFFLOAD_PICKUP] = { ++ .procname = "nf_flowtable_udp_pickup", ++ .maxlen = sizeof(unsigned int), ++ .mode = 0644, ++ .proc_handler = proc_dointvec_jiffies, ++ }, ++#endif + [NF_SYSCTL_CT_PROTO_TIMEOUT_ICMP] = { + .procname = "nf_conntrack_icmp_timeout", + .maxlen = sizeof(unsigned int), +@@ -1078,6 +1096,10 @@ static int nf_conntrack_standalone_init_ + table[NF_SYSCTL_CT_PROTO_TIMEOUT_ICMPV6].data = &nf_icmpv6_pernet(net)->timeout; + table[NF_SYSCTL_CT_PROTO_TIMEOUT_UDP].data = &un->timeouts[UDP_CT_UNREPLIED]; + table[NF_SYSCTL_CT_PROTO_TIMEOUT_UDP_STREAM].data = &un->timeouts[UDP_CT_REPLIED]; ++#if IS_ENABLED(CONFIG_NF_FLOW_TABLE) ++ table[NF_SYSCTL_CT_PROTO_TIMEOUT_UDP_OFFLOAD].data = &un->offload_timeout; ++ table[NF_SYSCTL_CT_PROTO_TIMEOUT_UDP_OFFLOAD_PICKUP].data = &un->offload_pickup; ++#endif + + nf_conntrack_standalone_init_tcp_sysctl(net, table); + nf_conntrack_standalone_init_sctp_sysctl(net, table); diff --git a/target/linux/generic/backport-5.15/610-v5.13-57-netfilter-flowtable-Set-offload-timeouts-according-t.patch b/target/linux/generic/backport-5.15/610-v5.13-57-netfilter-flowtable-Set-offload-timeouts-according-t.patch new file mode 100644 index 0000000000..1e82308eaa --- /dev/null +++ b/target/linux/generic/backport-5.15/610-v5.13-57-netfilter-flowtable-Set-offload-timeouts-according-t.patch @@ -0,0 +1,134 @@ +From: Oz Shlomo <ozsh@nvidia.com> +Date: Thu, 3 Jun 2021 15:12:35 +0300 +Subject: [PATCH] netfilter: flowtable: Set offload timeouts according to proto + values + +Currently the aging period for tcp/udp connections is hard coded to +30 seconds. Aged tcp/udp connections configure a hard coded 120/30 +seconds pickup timeout for conntrack. +This configuration may be too aggressive or permissive for some users. + +Dynamically configure the nf flow table GC timeout intervals according +to the user defined values. + +Signed-off-by: Oz Shlomo <ozsh@nvidia.com> +Reviewed-by: Paul Blakey <paulb@nvidia.com> +Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org> +--- + +--- a/include/net/netfilter/nf_flow_table.h ++++ b/include/net/netfilter/nf_flow_table.h +@@ -174,6 +174,8 @@ struct flow_offload { + #define NF_FLOW_TIMEOUT (30 * HZ) + #define nf_flowtable_time_stamp (u32)jiffies + ++unsigned long flow_offload_get_timeout(struct flow_offload *flow); ++ + static inline __s32 nf_flow_timeout_delta(unsigned int timeout) + { + return (__s32)(timeout - nf_flowtable_time_stamp); +--- a/net/netfilter/nf_flow_table_core.c ++++ b/net/netfilter/nf_flow_table_core.c +@@ -175,12 +175,10 @@ static void flow_offload_fixup_tcp(struc + tcp->seen[1].td_maxwin = 0; + } + +-#define NF_FLOWTABLE_TCP_PICKUP_TIMEOUT (120 * HZ) +-#define NF_FLOWTABLE_UDP_PICKUP_TIMEOUT (30 * HZ) +- + static void flow_offload_fixup_ct_timeout(struct nf_conn *ct) + { + const struct nf_conntrack_l4proto *l4proto; ++ struct net *net = nf_ct_net(ct); + int l4num = nf_ct_protonum(ct); + unsigned int timeout; + +@@ -188,12 +186,17 @@ static void flow_offload_fixup_ct_timeou + if (!l4proto) + return; + +- if (l4num == IPPROTO_TCP) +- timeout = NF_FLOWTABLE_TCP_PICKUP_TIMEOUT; +- else if (l4num == IPPROTO_UDP) +- timeout = NF_FLOWTABLE_UDP_PICKUP_TIMEOUT; +- else ++ if (l4num == IPPROTO_TCP) { ++ struct nf_tcp_net *tn = nf_tcp_pernet(net); ++ ++ timeout = tn->offload_pickup; ++ } else if (l4num == IPPROTO_UDP) { ++ struct nf_udp_net *tn = nf_udp_pernet(net); ++ ++ timeout = tn->offload_pickup; ++ } else { + return; ++ } + + if (nf_flow_timeout_delta(READ_ONCE(ct->timeout)) > (__s32)timeout) + WRITE_ONCE(ct->timeout, nfct_time_stamp + timeout); +@@ -265,11 +268,35 @@ static const struct rhashtable_params nf + .automatic_shrinking = true, + }; + ++unsigned long flow_offload_get_timeout(struct flow_offload *flow) ++{ ++ const struct nf_conntrack_l4proto *l4proto; ++ unsigned long timeout = NF_FLOW_TIMEOUT; ++ struct net *net = nf_ct_net(flow->ct); ++ int l4num = nf_ct_protonum(flow->ct); ++ ++ l4proto = nf_ct_l4proto_find(l4num); ++ if (!l4proto) ++ return timeout; ++ ++ if (l4num == IPPROTO_TCP) { ++ struct nf_tcp_net *tn = nf_tcp_pernet(net); ++ ++ timeout = tn->offload_timeout; ++ } else if (l4num == IPPROTO_UDP) { ++ struct nf_udp_net *tn = nf_udp_pernet(net); ++ ++ timeout = tn->offload_timeout; ++ } ++ ++ return timeout; ++} ++ + int flow_offload_add(struct nf_flowtable *flow_table, struct flow_offload *flow) + { + int err; + +- flow->timeout = nf_flowtable_time_stamp + NF_FLOW_TIMEOUT; ++ flow->timeout = nf_flowtable_time_stamp + flow_offload_get_timeout(flow); + + err = rhashtable_insert_fast(&flow_table->rhashtable, + &flow->tuplehash[0].node, +@@ -301,7 +328,7 @@ EXPORT_SYMBOL_GPL(flow_offload_add); + void flow_offload_refresh(struct nf_flowtable *flow_table, + struct flow_offload *flow) + { +- flow->timeout = nf_flowtable_time_stamp + NF_FLOW_TIMEOUT; ++ flow->timeout = nf_flowtable_time_stamp + flow_offload_get_timeout(flow); + + if (likely(!nf_flowtable_hw_offload(flow_table))) + return; +--- a/net/netfilter/nf_flow_table_offload.c ++++ b/net/netfilter/nf_flow_table_offload.c +@@ -885,7 +885,7 @@ static void flow_offload_work_stats(stru + + lastused = max_t(u64, stats[0].lastused, stats[1].lastused); + offload->flow->timeout = max_t(u64, offload->flow->timeout, +- lastused + NF_FLOW_TIMEOUT); ++ lastused + flow_offload_get_timeout(offload->flow)); + + if (offload->flowtable->flags & NF_FLOWTABLE_COUNTER) { + if (stats[0].pkts) +@@ -989,7 +989,7 @@ void nf_flow_offload_stats(struct nf_flo + __s32 delta; + + delta = nf_flow_timeout_delta(flow->timeout); +- if ((delta >= (9 * NF_FLOW_TIMEOUT) / 10)) ++ if ((delta >= (9 * flow_offload_get_timeout(flow)) / 10)) + return; + + offload = nf_flow_offload_work_alloc(flowtable, flow, FLOW_CLS_STATS); diff --git a/target/linux/generic/backport-5.15/611-v5.12-net-ethernet-mediatek-support-setting-MTU.patch b/target/linux/generic/backport-5.15/611-v5.12-net-ethernet-mediatek-support-setting-MTU.patch new file mode 100644 index 0000000000..289d140f34 --- /dev/null +++ b/target/linux/generic/backport-5.15/611-v5.12-net-ethernet-mediatek-support-setting-MTU.patch @@ -0,0 +1,138 @@ +From 4fd59792097a6b2fb949d41264386a7ecade469e Mon Sep 17 00:00:00 2001 +From: DENG Qingfang <dqfext@gmail.com> +Date: Mon, 25 Jan 2021 12:20:46 +0800 +Subject: [PATCH] net: ethernet: mediatek: support setting MTU + +MT762x HW, except for MT7628, supports frame length up to 2048 +(maximum length on GDM), so allow setting MTU up to 2030. + +Also set the default frame length to the hardware default 1518. + +Signed-off-by: DENG Qingfang <dqfext@gmail.com> +Reviewed-by: Andrew Lunn <andrew@lunn.ch> +Link: https://lore.kernel.org/r/20210125042046.5599-1-dqfext@gmail.com +Signed-off-by: Jakub Kicinski <kuba@kernel.org> +--- + drivers/net/ethernet/mediatek/mtk_eth_soc.c | 43 ++++++++++++++++++--- + drivers/net/ethernet/mediatek/mtk_eth_soc.h | 12 ++++-- + 2 files changed, 47 insertions(+), 8 deletions(-) + +--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c ++++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c +@@ -355,7 +355,7 @@ static void mtk_mac_config(struct phylin + /* Setup gmac */ + mcr_cur = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id)); + mcr_new = mcr_cur; +- mcr_new |= MAC_MCR_MAX_RX_1536 | MAC_MCR_IPG_CFG | MAC_MCR_FORCE_MODE | ++ mcr_new |= MAC_MCR_IPG_CFG | MAC_MCR_FORCE_MODE | + MAC_MCR_BACKOFF_EN | MAC_MCR_BACKPR_EN | MAC_MCR_FORCE_LINK; + + /* Only update control register when needed! */ +@@ -782,8 +782,8 @@ static void mtk_get_stats64(struct net_d + static inline int mtk_max_frag_size(int mtu) + { + /* make sure buf_size will be at least MTK_MAX_RX_LENGTH */ +- if (mtu + MTK_RX_ETH_HLEN < MTK_MAX_RX_LENGTH) +- mtu = MTK_MAX_RX_LENGTH - MTK_RX_ETH_HLEN; ++ if (mtu + MTK_RX_ETH_HLEN < MTK_MAX_RX_LENGTH_2K) ++ mtu = MTK_MAX_RX_LENGTH_2K - MTK_RX_ETH_HLEN; + + return SKB_DATA_ALIGN(MTK_RX_HLEN + mtu) + + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); +@@ -794,7 +794,7 @@ static inline int mtk_max_buf_size(int f + int buf_size = frag_size - NET_SKB_PAD - NET_IP_ALIGN - + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); + +- WARN_ON(buf_size < MTK_MAX_RX_LENGTH); ++ WARN_ON(buf_size < MTK_MAX_RX_LENGTH_2K); + + return buf_size; + } +@@ -2606,6 +2606,35 @@ static void mtk_uninit(struct net_device + mtk_rx_irq_disable(eth, ~0); + } + ++static int mtk_change_mtu(struct net_device *dev, int new_mtu) ++{ ++ int length = new_mtu + MTK_RX_ETH_HLEN; ++ struct mtk_mac *mac = netdev_priv(dev); ++ struct mtk_eth *eth = mac->hw; ++ u32 mcr_cur, mcr_new; ++ ++ if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) { ++ mcr_cur = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id)); ++ mcr_new = mcr_cur & ~MAC_MCR_MAX_RX_MASK; ++ ++ if (length <= 1518) ++ mcr_new |= MAC_MCR_MAX_RX(MAC_MCR_MAX_RX_1518); ++ else if (length <= 1536) ++ mcr_new |= MAC_MCR_MAX_RX(MAC_MCR_MAX_RX_1536); ++ else if (length <= 1552) ++ mcr_new |= MAC_MCR_MAX_RX(MAC_MCR_MAX_RX_1552); ++ else ++ mcr_new |= MAC_MCR_MAX_RX(MAC_MCR_MAX_RX_2048); ++ ++ if (mcr_new != mcr_cur) ++ mtk_w32(mac->hw, mcr_new, MTK_MAC_MCR(mac->id)); ++ } ++ ++ dev->mtu = new_mtu; ++ ++ return 0; ++} ++ + static int mtk_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) + { + struct mtk_mac *mac = netdev_priv(dev); +@@ -2902,6 +2931,7 @@ static const struct net_device_ops mtk_n + .ndo_set_mac_address = mtk_set_mac_address, + .ndo_validate_addr = eth_validate_addr, + .ndo_do_ioctl = mtk_do_ioctl, ++ .ndo_change_mtu = mtk_change_mtu, + .ndo_tx_timeout = mtk_tx_timeout, + .ndo_get_stats64 = mtk_get_stats64, + .ndo_fix_features = mtk_fix_features, +@@ -3004,7 +3034,10 @@ static int mtk_add_mac(struct mtk_eth *e + eth->netdev[id]->irq = eth->irq[0]; + eth->netdev[id]->dev.of_node = np; + +- eth->netdev[id]->max_mtu = MTK_MAX_RX_LENGTH - MTK_RX_ETH_HLEN; ++ if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) ++ eth->netdev[id]->max_mtu = MTK_MAX_RX_LENGTH - MTK_RX_ETH_HLEN; ++ else ++ eth->netdev[id]->max_mtu = MTK_MAX_RX_LENGTH_2K - MTK_RX_ETH_HLEN; + + return 0; + +--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h ++++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h +@@ -20,12 +20,13 @@ + #include "mtk_ppe.h" + + #define MTK_QDMA_PAGE_SIZE 2048 +-#define MTK_MAX_RX_LENGTH 1536 ++#define MTK_MAX_RX_LENGTH 1536 ++#define MTK_MAX_RX_LENGTH_2K 2048 + #define MTK_TX_DMA_BUF_LEN 0x3fff + #define MTK_DMA_SIZE 512 + #define MTK_NAPI_WEIGHT 64 + #define MTK_MAC_COUNT 2 +-#define MTK_RX_ETH_HLEN (VLAN_ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN) ++#define MTK_RX_ETH_HLEN (ETH_HLEN + ETH_FCS_LEN) + #define MTK_RX_HLEN (NET_SKB_PAD + MTK_RX_ETH_HLEN + NET_IP_ALIGN) + #define MTK_DMA_DUMMY_DESC 0xffffffff + #define MTK_DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | \ +@@ -352,7 +353,12 @@ + + /* Mac control registers */ + #define MTK_MAC_MCR(x) (0x10100 + (x * 0x100)) +-#define MAC_MCR_MAX_RX_1536 BIT(24) ++#define MAC_MCR_MAX_RX_MASK GENMASK(25, 24) ++#define MAC_MCR_MAX_RX(_x) (MAC_MCR_MAX_RX_MASK & ((_x) << 24)) ++#define MAC_MCR_MAX_RX_1518 0x0 ++#define MAC_MCR_MAX_RX_1536 0x1 ++#define MAC_MCR_MAX_RX_1552 0x2 ++#define MAC_MCR_MAX_RX_2048 0x3 + #define MAC_MCR_IPG_CFG (BIT(18) | BIT(16)) + #define MAC_MCR_FORCE_MODE BIT(15) + #define MAC_MCR_TX_EN BIT(14) diff --git a/target/linux/generic/backport-5.15/705-net-phy-at803x-select-correct-page-on-config-init.patch b/target/linux/generic/backport-5.15/705-net-phy-at803x-select-correct-page-on-config-init.patch new file mode 100644 index 0000000000..00be403299 --- /dev/null +++ b/target/linux/generic/backport-5.15/705-net-phy-at803x-select-correct-page-on-config-init.patch @@ -0,0 +1,108 @@ +From c329e5afb42ff0a88285eb4d8a391a18793e4777 Mon Sep 17 00:00:00 2001 +From: David Bauer <mail@david-bauer.net> +Date: Thu, 15 Apr 2021 03:26:50 +0200 +Subject: [PATCH] net: phy: at803x: select correct page on config init + +The Atheros AR8031 and AR8033 expose different registers for SGMII/Fiber +as well as the copper side of the PHY depending on the BT_BX_REG_SEL bit +in the chip configure register. + +The driver assumes the copper side is selected on probe, but this might +not be the case depending which page was last selected by the +bootloader. Notably, Ubiquiti UniFi bootloaders show this behavior. + +Select the copper page when probing to circumvent this. + +Signed-off-by: David Bauer <mail@david-bauer.net> +Signed-off-by: David S. Miller <davem@davemloft.net> +--- + drivers/net/phy/at803x.c | 50 +++++++++++++++++++++++++++++++++++++++- + 1 file changed, 49 insertions(+), 1 deletion(-) + +--- a/drivers/net/phy/at803x.c ++++ b/drivers/net/phy/at803x.c +@@ -139,6 +139,9 @@ + #define ATH8035_PHY_ID 0x004dd072 + #define AT8030_PHY_ID_MASK 0xffffffef + ++#define AT803X_PAGE_FIBER 0 ++#define AT803X_PAGE_COPPER 1 ++ + MODULE_DESCRIPTION("Qualcomm Atheros AR803x PHY driver"); + MODULE_AUTHOR("Matus Ujhelyi"); + MODULE_LICENSE("GPL"); +@@ -190,6 +193,35 @@ static int at803x_debug_reg_mask(struct + return phy_write(phydev, AT803X_DEBUG_DATA, val); + } + ++static int at803x_write_page(struct phy_device *phydev, int page) ++{ ++ int mask; ++ int set; ++ ++ if (page == AT803X_PAGE_COPPER) { ++ set = AT803X_BT_BX_REG_SEL; ++ mask = 0; ++ } else { ++ set = 0; ++ mask = AT803X_BT_BX_REG_SEL; ++ } ++ ++ return __phy_modify(phydev, AT803X_REG_CHIP_CONFIG, mask, set); ++} ++ ++static int at803x_read_page(struct phy_device *phydev) ++{ ++ int ccr = __phy_read(phydev, AT803X_REG_CHIP_CONFIG); ++ ++ if (ccr < 0) ++ return ccr; ++ ++ if (ccr & AT803X_BT_BX_REG_SEL) ++ return AT803X_PAGE_COPPER; ++ ++ return AT803X_PAGE_FIBER; ++} ++ + static int at803x_enable_rx_delay(struct phy_device *phydev) + { + return at803x_debug_reg_mask(phydev, AT803X_DEBUG_REG_0, 0, +@@ -508,6 +540,7 @@ static int at803x_probe(struct phy_devic + { + struct device *dev = &phydev->mdio.dev; + struct at803x_priv *priv; ++ int ret; + + priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); + if (!priv) +@@ -515,7 +548,20 @@ static int at803x_probe(struct phy_devic + + phydev->priv = priv; + +- return at803x_parse_dt(phydev); ++ ret = at803x_parse_dt(phydev); ++ if (ret) ++ return ret; ++ ++ /* Some bootloaders leave the fiber page selected. ++ * Switch to the copper page, as otherwise we read ++ * the PHY capabilities from the fiber side. ++ */ ++ if (at803x_match_phy_id(phydev, ATH8031_PHY_ID)) { ++ ret = phy_select_page(phydev, AT803X_PAGE_COPPER); ++ ret = phy_restore_page(phydev, AT803X_PAGE_COPPER, ret); ++ } ++ ++ return ret; + } + + static void at803x_remove(struct phy_device *phydev) +@@ -1097,6 +1143,8 @@ static struct phy_driver at803x_driver[] + .get_wol = at803x_get_wol, + .suspend = at803x_suspend, + .resume = at803x_resume, ++ .read_page = at803x_read_page, ++ .write_page = at803x_write_page, + /* PHY_GBIT_FEATURES */ + .read_status = at803x_read_status, + .aneg_done = at803x_aneg_done, diff --git a/target/linux/generic/backport-5.15/706-net-phy-at803x-fix-probe-error-if-copper-page-is-sel.patch b/target/linux/generic/backport-5.15/706-net-phy-at803x-fix-probe-error-if-copper-page-is-sel.patch new file mode 100644 index 0000000000..d6ec7450e8 --- /dev/null +++ b/target/linux/generic/backport-5.15/706-net-phy-at803x-fix-probe-error-if-copper-page-is-sel.patch @@ -0,0 +1,73 @@ +From 8f7e876273e294b732b42af2e5e6bba91d798954 Mon Sep 17 00:00:00 2001 +From: Michael Walle <michael@walle.cc> +Date: Tue, 20 Apr 2021 12:29:29 +0200 +Subject: [PATCH] net: phy: at803x: fix probe error if copper page is selected + +The commit c329e5afb42f ("net: phy: at803x: select correct page on +config init") selects the copper page during probe. This fails if the +copper page was already selected. In this case, the value of the copper +page (which is 1) is propagated through phy_restore_page() and is +finally returned for at803x_probe(). Fix it, by just using the +at803x_page_write() directly. + +Also in case of an error, the regulator is not disabled and leads to a +WARN_ON() when the probe fails. This couldn't happen before, because +at803x_parse_dt() was the last call in at803x_probe(). It is hard to +see, that the parse_dt() actually enables the regulator. Thus move the +regulator_enable() to the probe function and undo it in case of an +error. + +Fixes: c329e5afb42f ("net: phy: at803x: select correct page on config init") +Signed-off-by: Michael Walle <michael@walle.cc> +Reviewed-by: David Bauer <mail@david-bauer.net> +Signed-off-by: David S. Miller <davem@davemloft.net> +--- + drivers/net/phy/at803x.c | 23 +++++++++++++++++------ + 1 file changed, 17 insertions(+), 6 deletions(-) + +--- a/drivers/net/phy/at803x.c ++++ b/drivers/net/phy/at803x.c +@@ -527,10 +527,6 @@ static int at803x_parse_dt(struct phy_de + phydev_err(phydev, "failed to get VDDIO regulator\n"); + return PTR_ERR(priv->vddio); + } +- +- ret = regulator_enable(priv->vddio); +- if (ret < 0) +- return ret; + } + + return 0; +@@ -552,15 +548,30 @@ static int at803x_probe(struct phy_devic + if (ret) + return ret; + ++ if (priv->vddio) { ++ ret = regulator_enable(priv->vddio); ++ if (ret < 0) ++ return ret; ++ } ++ + /* Some bootloaders leave the fiber page selected. + * Switch to the copper page, as otherwise we read + * the PHY capabilities from the fiber side. + */ + if (at803x_match_phy_id(phydev, ATH8031_PHY_ID)) { +- ret = phy_select_page(phydev, AT803X_PAGE_COPPER); +- ret = phy_restore_page(phydev, AT803X_PAGE_COPPER, ret); ++ phy_lock_mdio_bus(phydev); ++ ret = at803x_write_page(phydev, AT803X_PAGE_COPPER); ++ phy_unlock_mdio_bus(phydev); ++ if (ret) ++ goto err; + } + ++ return 0; ++ ++err: ++ if (priv->vddio) ++ regulator_disable(priv->vddio); ++ + return ret; + } + diff --git a/target/linux/generic/backport-5.15/710-v5.12-net-phy-Add-100-base-x-mode.patch b/target/linux/generic/backport-5.15/710-v5.12-net-phy-Add-100-base-x-mode.patch new file mode 100644 index 0000000000..5c7f97ea90 --- /dev/null +++ b/target/linux/generic/backport-5.15/710-v5.12-net-phy-Add-100-base-x-mode.patch @@ -0,0 +1,56 @@ +From b1ae3587d16a8c8fc9453e147c8708d6f006ffbb Mon Sep 17 00:00:00 2001 +From: Bjarni Jonasson <bjarni.jonasson@microchip.com> +Date: Wed, 13 Jan 2021 12:56:25 +0100 +Subject: [PATCH] net: phy: Add 100 base-x mode + +Sparx-5 supports this mode and it is missing in the PHY core. + +Signed-off-by: Bjarni Jonasson <bjarni.jonasson@microchip.com> +Reviewed-by: Russell King <rmk+kernel@armlinux.org.uk> +Signed-off-by: Jakub Kicinski <kuba@kernel.org> +--- + Documentation/networking/phy.rst | 5 +++++ + include/linux/phy.h | 4 ++++ + 2 files changed, 9 insertions(+) + +--- a/Documentation/networking/phy.rst ++++ b/Documentation/networking/phy.rst +@@ -286,6 +286,11 @@ Some of the interface modes are describe + Note: due to legacy usage, some 10GBASE-R usage incorrectly makes + use of this definition. + ++``PHY_INTERFACE_MODE_100BASEX`` ++ This defines IEEE 802.3 Clause 24. The link operates at a fixed data ++ rate of 125Mpbs using a 4B/5B encoding scheme, resulting in an underlying ++ data rate of 100Mpbs. ++ + Pause frames / flow control + =========================== + +--- a/include/linux/phy.h ++++ b/include/linux/phy.h +@@ -104,6 +104,7 @@ extern const int phy_10gbit_features_arr + * @PHY_INTERFACE_MODE_MOCA: Multimedia over Coax + * @PHY_INTERFACE_MODE_QSGMII: Quad SGMII + * @PHY_INTERFACE_MODE_TRGMII: Turbo RGMII ++ * @PHY_INTERFACE_MODE_100BASEX: 100 BaseX + * @PHY_INTERFACE_MODE_1000BASEX: 1000 BaseX + * @PHY_INTERFACE_MODE_2500BASEX: 2500 BaseX + * @PHY_INTERFACE_MODE_RXAUI: Reduced XAUI +@@ -135,6 +136,7 @@ typedef enum { + PHY_INTERFACE_MODE_MOCA, + PHY_INTERFACE_MODE_QSGMII, + PHY_INTERFACE_MODE_TRGMII, ++ PHY_INTERFACE_MODE_100BASEX, + PHY_INTERFACE_MODE_1000BASEX, + PHY_INTERFACE_MODE_2500BASEX, + PHY_INTERFACE_MODE_RXAUI, +@@ -217,6 +219,8 @@ static inline const char *phy_modes(phy_ + return "usxgmii"; + case PHY_INTERFACE_MODE_10GKR: + return "10gbase-kr"; ++ case PHY_INTERFACE_MODE_100BASEX: ++ return "100base-x"; + default: + return "unknown"; + } diff --git a/target/linux/generic/backport-5.15/711-v5.12-sfp-add-support-for-100-base-x-SFPs.patch b/target/linux/generic/backport-5.15/711-v5.12-sfp-add-support-for-100-base-x-SFPs.patch new file mode 100644 index 0000000000..7d06c235d9 --- /dev/null +++ b/target/linux/generic/backport-5.15/711-v5.12-sfp-add-support-for-100-base-x-SFPs.patch @@ -0,0 +1,40 @@ +From 6e12f35cef6b8a458d7ecf507ae330e0bffaad8c Mon Sep 17 00:00:00 2001 +From: Bjarni Jonasson <bjarni.jonasson@microchip.com> +Date: Wed, 13 Jan 2021 12:56:26 +0100 +Subject: [PATCH] sfp: add support for 100 base-x SFPs + +Add support for 100Base-FX, 100Base-LX, 100Base-PX and 100Base-BX10 modules +This is needed for Sparx-5 switch. + +Signed-off-by: Bjarni Jonasson <bjarni.jonasson@microchip.com> +Reviewed-by: Russell King <rmk+kernel@armlinux.org.uk> +Signed-off-by: Jakub Kicinski <kuba@kernel.org> +--- + drivers/net/phy/sfp-bus.c | 9 +++++++++ + 1 file changed, 9 insertions(+) + +--- a/drivers/net/phy/sfp-bus.c ++++ b/drivers/net/phy/sfp-bus.c +@@ -280,6 +280,12 @@ void sfp_parse_support(struct sfp_bus *b + br_min <= 1300 && br_max >= 1200) + phylink_set(modes, 1000baseX_Full); + ++ /* 100Base-FX, 100Base-LX, 100Base-PX, 100Base-BX10 */ ++ if (id->base.e100_base_fx || id->base.e100_base_lx) ++ phylink_set(modes, 100baseFX_Full); ++ if ((id->base.e_base_px || id->base.e_base_bx10) && br_nom == 100) ++ phylink_set(modes, 100baseFX_Full); ++ + /* For active or passive cables, select the link modes + * based on the bit rates and the cable compliance bytes. + */ +@@ -399,6 +405,9 @@ phy_interface_t sfp_select_interface(str + if (phylink_test(link_modes, 1000baseX_Full)) + return PHY_INTERFACE_MODE_1000BASEX; + ++ if (phylink_test(link_modes, 100baseFX_Full)) ++ return PHY_INTERFACE_MODE_100BASEX; ++ + dev_warn(bus->sfp_dev, "Unable to ascertain link mode\n"); + + return PHY_INTERFACE_MODE_NA; diff --git a/target/linux/generic/backport-5.15/712-v5.13-net-phy-marvell-refactor-HWMON-OOP-style.patch b/target/linux/generic/backport-5.15/712-v5.13-net-phy-marvell-refactor-HWMON-OOP-style.patch new file mode 100644 index 0000000000..278df46313 --- /dev/null +++ b/target/linux/generic/backport-5.15/712-v5.13-net-phy-marvell-refactor-HWMON-OOP-style.patch @@ -0,0 +1,549 @@ +From 41d26bf4aba070dfd2ab48866cc27a48ee6228c7 Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?Marek=20Beh=C3=BAn?= <kabel@kernel.org> +Date: Tue, 20 Apr 2021 09:53:59 +0200 +Subject: [PATCH] net: phy: marvell: refactor HWMON OOP style +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +Use a structure of Marvell PHY specific HWMON methods to reduce code +duplication. Store a pointer to this structure into the PHY driver's +driver_data member. + +Signed-off-by: Marek Behún <kabel@kernel.org> +Signed-off-by: David S. Miller <davem@davemloft.net> +--- + drivers/net/phy/marvell.c | 369 +++++++++++++------------------------- + 1 file changed, 125 insertions(+), 244 deletions(-) + +--- a/drivers/net/phy/marvell.c ++++ b/drivers/net/phy/marvell.c +@@ -2141,6 +2141,19 @@ static int marvell_vct7_cable_test_get_s + } + + #ifdef CONFIG_HWMON ++struct marvell_hwmon_ops { ++ int (*get_temp)(struct phy_device *phydev, long *temp); ++ int (*get_temp_critical)(struct phy_device *phydev, long *temp); ++ int (*set_temp_critical)(struct phy_device *phydev, long temp); ++ int (*get_temp_alarm)(struct phy_device *phydev, long *alarm); ++}; ++ ++static const struct marvell_hwmon_ops * ++to_marvell_hwmon_ops(const struct phy_device *phydev) ++{ ++ return phydev->drv->driver_data; ++} ++ + static int m88e1121_get_temp(struct phy_device *phydev, long *temp) + { + int oldpage; +@@ -2184,75 +2197,6 @@ error: + return phy_restore_page(phydev, oldpage, ret); + } + +-static int m88e1121_hwmon_read(struct device *dev, +- enum hwmon_sensor_types type, +- u32 attr, int channel, long *temp) +-{ +- struct phy_device *phydev = dev_get_drvdata(dev); +- int err; +- +- switch (attr) { +- case hwmon_temp_input: +- err = m88e1121_get_temp(phydev, temp); +- break; +- default: +- return -EOPNOTSUPP; +- } +- +- return err; +-} +- +-static umode_t m88e1121_hwmon_is_visible(const void *data, +- enum hwmon_sensor_types type, +- u32 attr, int channel) +-{ +- if (type != hwmon_temp) +- return 0; +- +- switch (attr) { +- case hwmon_temp_input: +- return 0444; +- default: +- return 0; +- } +-} +- +-static u32 m88e1121_hwmon_chip_config[] = { +- HWMON_C_REGISTER_TZ, +- 0 +-}; +- +-static const struct hwmon_channel_info m88e1121_hwmon_chip = { +- .type = hwmon_chip, +- .config = m88e1121_hwmon_chip_config, +-}; +- +-static u32 m88e1121_hwmon_temp_config[] = { +- HWMON_T_INPUT, +- 0 +-}; +- +-static const struct hwmon_channel_info m88e1121_hwmon_temp = { +- .type = hwmon_temp, +- .config = m88e1121_hwmon_temp_config, +-}; +- +-static const struct hwmon_channel_info *m88e1121_hwmon_info[] = { +- &m88e1121_hwmon_chip, +- &m88e1121_hwmon_temp, +- NULL +-}; +- +-static const struct hwmon_ops m88e1121_hwmon_hwmon_ops = { +- .is_visible = m88e1121_hwmon_is_visible, +- .read = m88e1121_hwmon_read, +-}; +- +-static const struct hwmon_chip_info m88e1121_hwmon_chip_info = { +- .ops = &m88e1121_hwmon_hwmon_ops, +- .info = m88e1121_hwmon_info, +-}; +- + static int m88e1510_get_temp(struct phy_device *phydev, long *temp) + { + int ret; +@@ -2315,92 +2259,6 @@ static int m88e1510_get_temp_alarm(struc + return 0; + } + +-static int m88e1510_hwmon_read(struct device *dev, +- enum hwmon_sensor_types type, +- u32 attr, int channel, long *temp) +-{ +- struct phy_device *phydev = dev_get_drvdata(dev); +- int err; +- +- switch (attr) { +- case hwmon_temp_input: +- err = m88e1510_get_temp(phydev, temp); +- break; +- case hwmon_temp_crit: +- err = m88e1510_get_temp_critical(phydev, temp); +- break; +- case hwmon_temp_max_alarm: +- err = m88e1510_get_temp_alarm(phydev, temp); +- break; +- default: +- return -EOPNOTSUPP; +- } +- +- return err; +-} +- +-static int m88e1510_hwmon_write(struct device *dev, +- enum hwmon_sensor_types type, +- u32 attr, int channel, long temp) +-{ +- struct phy_device *phydev = dev_get_drvdata(dev); +- int err; +- +- switch (attr) { +- case hwmon_temp_crit: +- err = m88e1510_set_temp_critical(phydev, temp); +- break; +- default: +- return -EOPNOTSUPP; +- } +- return err; +-} +- +-static umode_t m88e1510_hwmon_is_visible(const void *data, +- enum hwmon_sensor_types type, +- u32 attr, int channel) +-{ +- if (type != hwmon_temp) +- return 0; +- +- switch (attr) { +- case hwmon_temp_input: +- case hwmon_temp_max_alarm: +- return 0444; +- case hwmon_temp_crit: +- return 0644; +- default: +- return 0; +- } +-} +- +-static u32 m88e1510_hwmon_temp_config[] = { +- HWMON_T_INPUT | HWMON_T_CRIT | HWMON_T_MAX_ALARM, +- 0 +-}; +- +-static const struct hwmon_channel_info m88e1510_hwmon_temp = { +- .type = hwmon_temp, +- .config = m88e1510_hwmon_temp_config, +-}; +- +-static const struct hwmon_channel_info *m88e1510_hwmon_info[] = { +- &m88e1121_hwmon_chip, +- &m88e1510_hwmon_temp, +- NULL +-}; +- +-static const struct hwmon_ops m88e1510_hwmon_hwmon_ops = { +- .is_visible = m88e1510_hwmon_is_visible, +- .read = m88e1510_hwmon_read, +- .write = m88e1510_hwmon_write, +-}; +- +-static const struct hwmon_chip_info m88e1510_hwmon_chip_info = { +- .ops = &m88e1510_hwmon_hwmon_ops, +- .info = m88e1510_hwmon_info, +-}; +- + static int m88e6390_get_temp(struct phy_device *phydev, long *temp) + { + int sum = 0; +@@ -2459,63 +2317,112 @@ error: + return ret; + } + +-static int m88e6390_hwmon_read(struct device *dev, +- enum hwmon_sensor_types type, +- u32 attr, int channel, long *temp) ++static int marvell_hwmon_read(struct device *dev, enum hwmon_sensor_types type, ++ u32 attr, int channel, long *temp) + { + struct phy_device *phydev = dev_get_drvdata(dev); +- int err; ++ const struct marvell_hwmon_ops *ops = to_marvell_hwmon_ops(phydev); ++ int err = -EOPNOTSUPP; + + switch (attr) { + case hwmon_temp_input: +- err = m88e6390_get_temp(phydev, temp); ++ if (ops->get_temp) ++ err = ops->get_temp(phydev, temp); ++ break; ++ case hwmon_temp_crit: ++ if (ops->get_temp_critical) ++ err = ops->get_temp_critical(phydev, temp); ++ break; ++ case hwmon_temp_max_alarm: ++ if (ops->get_temp_alarm) ++ err = ops->get_temp_alarm(phydev, temp); ++ break; ++ } ++ ++ return err; ++} ++ ++static int marvell_hwmon_write(struct device *dev, enum hwmon_sensor_types type, ++ u32 attr, int channel, long temp) ++{ ++ struct phy_device *phydev = dev_get_drvdata(dev); ++ const struct marvell_hwmon_ops *ops = to_marvell_hwmon_ops(phydev); ++ int err = -EOPNOTSUPP; ++ ++ switch (attr) { ++ case hwmon_temp_crit: ++ if (ops->set_temp_critical) ++ err = ops->set_temp_critical(phydev, temp); + break; + default: +- return -EOPNOTSUPP; ++ fallthrough; + } + + return err; + } + +-static umode_t m88e6390_hwmon_is_visible(const void *data, +- enum hwmon_sensor_types type, +- u32 attr, int channel) ++static umode_t marvell_hwmon_is_visible(const void *data, ++ enum hwmon_sensor_types type, ++ u32 attr, int channel) + { ++ const struct phy_device *phydev = data; ++ const struct marvell_hwmon_ops *ops = to_marvell_hwmon_ops(phydev); ++ + if (type != hwmon_temp) + return 0; + + switch (attr) { + case hwmon_temp_input: +- return 0444; ++ return ops->get_temp ? 0444 : 0; ++ case hwmon_temp_max_alarm: ++ return ops->get_temp_alarm ? 0444 : 0; ++ case hwmon_temp_crit: ++ return (ops->get_temp_critical ? 0444 : 0) | ++ (ops->set_temp_critical ? 0200 : 0); + default: + return 0; + } + } + +-static u32 m88e6390_hwmon_temp_config[] = { +- HWMON_T_INPUT, ++static u32 marvell_hwmon_chip_config[] = { ++ HWMON_C_REGISTER_TZ, + 0 + }; + +-static const struct hwmon_channel_info m88e6390_hwmon_temp = { ++static const struct hwmon_channel_info marvell_hwmon_chip = { ++ .type = hwmon_chip, ++ .config = marvell_hwmon_chip_config, ++}; ++ ++/* we can define HWMON_T_CRIT and HWMON_T_MAX_ALARM even though these are not ++ * defined for all PHYs, because the hwmon code checks whether the attributes ++ * exists via the .is_visible method ++ */ ++static u32 marvell_hwmon_temp_config[] = { ++ HWMON_T_INPUT | HWMON_T_CRIT | HWMON_T_MAX_ALARM, ++ 0 ++}; ++ ++static const struct hwmon_channel_info marvell_hwmon_temp = { + .type = hwmon_temp, +- .config = m88e6390_hwmon_temp_config, ++ .config = marvell_hwmon_temp_config, + }; + +-static const struct hwmon_channel_info *m88e6390_hwmon_info[] = { +- &m88e1121_hwmon_chip, +- &m88e6390_hwmon_temp, ++static const struct hwmon_channel_info *marvell_hwmon_info[] = { ++ &marvell_hwmon_chip, ++ &marvell_hwmon_temp, + NULL + }; + +-static const struct hwmon_ops m88e6390_hwmon_hwmon_ops = { +- .is_visible = m88e6390_hwmon_is_visible, +- .read = m88e6390_hwmon_read, ++static const struct hwmon_ops marvell_hwmon_hwmon_ops = { ++ .is_visible = marvell_hwmon_is_visible, ++ .read = marvell_hwmon_read, ++ .write = marvell_hwmon_write, + }; + +-static const struct hwmon_chip_info m88e6390_hwmon_chip_info = { +- .ops = &m88e6390_hwmon_hwmon_ops, +- .info = m88e6390_hwmon_info, ++static const struct hwmon_chip_info marvell_hwmon_chip_info = { ++ .ops = &marvell_hwmon_hwmon_ops, ++ .info = marvell_hwmon_info, + }; + + static int marvell_hwmon_name(struct phy_device *phydev) +@@ -2538,49 +2445,48 @@ static int marvell_hwmon_name(struct phy + return 0; + } + +-static int marvell_hwmon_probe(struct phy_device *phydev, +- const struct hwmon_chip_info *chip) ++static int marvell_hwmon_probe(struct phy_device *phydev) + { ++ const struct marvell_hwmon_ops *ops = to_marvell_hwmon_ops(phydev); + struct marvell_priv *priv = phydev->priv; + struct device *dev = &phydev->mdio.dev; + int err; + ++ if (!ops) ++ return 0; ++ + err = marvell_hwmon_name(phydev); + if (err) + return err; + + priv->hwmon_dev = devm_hwmon_device_register_with_info( +- dev, priv->hwmon_name, phydev, chip, NULL); ++ dev, priv->hwmon_name, phydev, &marvell_hwmon_chip_info, NULL); + + return PTR_ERR_OR_ZERO(priv->hwmon_dev); + } + +-static int m88e1121_hwmon_probe(struct phy_device *phydev) +-{ +- return marvell_hwmon_probe(phydev, &m88e1121_hwmon_chip_info); +-} ++static const struct marvell_hwmon_ops m88e1121_hwmon_ops = { ++ .get_temp = m88e1121_get_temp, ++}; + +-static int m88e1510_hwmon_probe(struct phy_device *phydev) +-{ +- return marvell_hwmon_probe(phydev, &m88e1510_hwmon_chip_info); +-} ++static const struct marvell_hwmon_ops m88e1510_hwmon_ops = { ++ .get_temp = m88e1510_get_temp, ++ .get_temp_critical = m88e1510_get_temp_critical, ++ .set_temp_critical = m88e1510_set_temp_critical, ++ .get_temp_alarm = m88e1510_get_temp_alarm, ++}; ++ ++static const struct marvell_hwmon_ops m88e6390_hwmon_ops = { ++ .get_temp = m88e6390_get_temp, ++}; ++ ++#define DEF_MARVELL_HWMON_OPS(s) (&(s)) + +-static int m88e6390_hwmon_probe(struct phy_device *phydev) +-{ +- return marvell_hwmon_probe(phydev, &m88e6390_hwmon_chip_info); +-} + #else +-static int m88e1121_hwmon_probe(struct phy_device *phydev) +-{ +- return 0; +-} + +-static int m88e1510_hwmon_probe(struct phy_device *phydev) +-{ +- return 0; +-} ++#define DEF_MARVELL_HWMON_OPS(s) NULL + +-static int m88e6390_hwmon_probe(struct phy_device *phydev) ++static int marvell_hwmon_probe(struct phy_device *phydev) + { + return 0; + } +@@ -2596,40 +2502,7 @@ static int marvell_probe(struct phy_devi + + phydev->priv = priv; + +- return 0; +-} +- +-static int m88e1121_probe(struct phy_device *phydev) +-{ +- int err; +- +- err = marvell_probe(phydev); +- if (err) +- return err; +- +- return m88e1121_hwmon_probe(phydev); +-} +- +-static int m88e1510_probe(struct phy_device *phydev) +-{ +- int err; +- +- err = marvell_probe(phydev); +- if (err) +- return err; +- +- return m88e1510_hwmon_probe(phydev); +-} +- +-static int m88e6390_probe(struct phy_device *phydev) +-{ +- int err; +- +- err = marvell_probe(phydev); +- if (err) +- return err; +- +- return m88e6390_hwmon_probe(phydev); ++ return marvell_hwmon_probe(phydev); + } + + static struct phy_driver marvell_drivers[] = { +@@ -2714,8 +2587,9 @@ static struct phy_driver marvell_drivers + .phy_id = MARVELL_PHY_ID_88E1121R, + .phy_id_mask = MARVELL_PHY_ID_MASK, + .name = "Marvell 88E1121R", ++ .driver_data = DEF_MARVELL_HWMON_OPS(m88e1121_hwmon_ops), + /* PHY_GBIT_FEATURES */ +- .probe = m88e1121_probe, ++ .probe = marvell_probe, + .config_init = marvell_config_init, + .config_aneg = m88e1121_config_aneg, + .read_status = marvell_read_status, +@@ -2834,9 +2708,10 @@ static struct phy_driver marvell_drivers + .phy_id = MARVELL_PHY_ID_88E1510, + .phy_id_mask = MARVELL_PHY_ID_MASK, + .name = "Marvell 88E1510", ++ .driver_data = DEF_MARVELL_HWMON_OPS(m88e1510_hwmon_ops), + .features = PHY_GBIT_FIBRE_FEATURES, + .flags = PHY_POLL_CABLE_TEST, +- .probe = m88e1510_probe, ++ .probe = marvell_probe, + .config_init = m88e1510_config_init, + .config_aneg = m88e1510_config_aneg, + .read_status = marvell_read_status, +@@ -2863,9 +2738,10 @@ static struct phy_driver marvell_drivers + .phy_id = MARVELL_PHY_ID_88E1540, + .phy_id_mask = MARVELL_PHY_ID_MASK, + .name = "Marvell 88E1540", ++ .driver_data = DEF_MARVELL_HWMON_OPS(m88e1510_hwmon_ops), + /* PHY_GBIT_FEATURES */ + .flags = PHY_POLL_CABLE_TEST, +- .probe = m88e1510_probe, ++ .probe = marvell_probe, + .config_init = marvell_config_init, + .config_aneg = m88e1510_config_aneg, + .read_status = marvell_read_status, +@@ -2889,7 +2765,8 @@ static struct phy_driver marvell_drivers + .phy_id = MARVELL_PHY_ID_88E1545, + .phy_id_mask = MARVELL_PHY_ID_MASK, + .name = "Marvell 88E1545", +- .probe = m88e1510_probe, ++ .driver_data = DEF_MARVELL_HWMON_OPS(m88e1510_hwmon_ops), ++ .probe = marvell_probe, + /* PHY_GBIT_FEATURES */ + .flags = PHY_POLL_CABLE_TEST, + .config_init = marvell_config_init, +@@ -2935,9 +2812,10 @@ static struct phy_driver marvell_drivers + .phy_id = MARVELL_PHY_ID_88E6341_FAMILY, + .phy_id_mask = MARVELL_PHY_ID_MASK, + .name = "Marvell 88E6341 Family", ++ .driver_data = DEF_MARVELL_HWMON_OPS(m88e1510_hwmon_ops), + /* PHY_GBIT_FEATURES */ + .flags = PHY_POLL_CABLE_TEST, +- .probe = m88e1510_probe, ++ .probe = marvell_probe, + .config_init = marvell_config_init, + .config_aneg = m88e6390_config_aneg, + .read_status = marvell_read_status, +@@ -2961,9 +2839,10 @@ static struct phy_driver marvell_drivers + .phy_id = MARVELL_PHY_ID_88E6390_FAMILY, + .phy_id_mask = MARVELL_PHY_ID_MASK, + .name = "Marvell 88E6390 Family", ++ .driver_data = DEF_MARVELL_HWMON_OPS(m88e6390_hwmon_ops), + /* PHY_GBIT_FEATURES */ + .flags = PHY_POLL_CABLE_TEST, +- .probe = m88e6390_probe, ++ .probe = marvell_probe, + .config_init = marvell_config_init, + .config_aneg = m88e6390_config_aneg, + .read_status = marvell_read_status, +@@ -2987,7 +2866,8 @@ static struct phy_driver marvell_drivers + .phy_id = MARVELL_PHY_ID_88E1340S, + .phy_id_mask = MARVELL_PHY_ID_MASK, + .name = "Marvell 88E1340S", +- .probe = m88e1510_probe, ++ .driver_data = DEF_MARVELL_HWMON_OPS(m88e1510_hwmon_ops), ++ .probe = marvell_probe, + /* PHY_GBIT_FEATURES */ + .config_init = marvell_config_init, + .config_aneg = m88e1510_config_aneg, +@@ -3009,7 +2889,8 @@ static struct phy_driver marvell_drivers + .phy_id = MARVELL_PHY_ID_88E1548P, + .phy_id_mask = MARVELL_PHY_ID_MASK, + .name = "Marvell 88E1548P", +- .probe = m88e1510_probe, ++ .driver_data = DEF_MARVELL_HWMON_OPS(m88e1510_hwmon_ops), ++ .probe = marvell_probe, + .features = PHY_GBIT_FIBRE_FEATURES, + .config_init = marvell_config_init, + .config_aneg = m88e1510_config_aneg, diff --git a/target/linux/generic/backport-5.15/713-v5.15-net-phy-marvell-add-SFP-support-for-88E1510.patch b/target/linux/generic/backport-5.15/713-v5.15-net-phy-marvell-add-SFP-support-for-88E1510.patch new file mode 100644 index 0000000000..b86e9bf640 --- /dev/null +++ b/target/linux/generic/backport-5.15/713-v5.15-net-phy-marvell-add-SFP-support-for-88E1510.patch @@ -0,0 +1,161 @@ +From b697d9d38a5a5ab405d7cc4743d39fe2c5d7517c Mon Sep 17 00:00:00 2001 +From: Ivan Bornyakov <i.bornyakov@metrotek.ru> +Date: Thu, 12 Aug 2021 16:42:56 +0300 +Subject: [PATCH] net: phy: marvell: add SFP support for 88E1510 + +Add support for SFP cages connected to the Marvell 88E1512 transceiver. +88E1512 supports for SGMII/1000Base-X/100Base-FX media type with RGMII +on system interface. Configure PHY to appropriate mode depending on the +type of SFP inserted. On SFP removal configure PHY to the RGMII-copper +mode so RJ-45 port can still work. + +Signed-off-by: Ivan Bornyakov <i.bornyakov@metrotek.ru> +Link: https://lore.kernel.org/r/20210812134256.2436-1-i.bornyakov@metrotek.ru +Signed-off-by: Jakub Kicinski <kuba@kernel.org> +--- + drivers/net/phy/marvell.c | 105 +++++++++++++++++++++++++++++++++++++- + 1 file changed, 104 insertions(+), 1 deletion(-) + +--- a/drivers/net/phy/marvell.c ++++ b/drivers/net/phy/marvell.c +@@ -32,6 +32,7 @@ + #include <linux/marvell_phy.h> + #include <linux/bitfield.h> + #include <linux/of.h> ++#include <linux/sfp.h> + + #include <linux/io.h> + #include <asm/irq.h> +@@ -46,6 +47,7 @@ + #define MII_MARVELL_MISC_TEST_PAGE 0x06 + #define MII_MARVELL_VCT7_PAGE 0x07 + #define MII_MARVELL_WOL_PAGE 0x11 ++#define MII_MARVELL_MODE_PAGE 0x12 + + #define MII_M1011_IEVENT 0x13 + #define MII_M1011_IEVENT_CLEAR 0x0000 +@@ -162,7 +164,14 @@ + + #define MII_88E1510_GEN_CTRL_REG_1 0x14 + #define MII_88E1510_GEN_CTRL_REG_1_MODE_MASK 0x7 ++#define MII_88E1510_GEN_CTRL_REG_1_MODE_RGMII 0x0 /* RGMII to copper */ + #define MII_88E1510_GEN_CTRL_REG_1_MODE_SGMII 0x1 /* SGMII to copper */ ++/* RGMII to 1000BASE-X */ ++#define MII_88E1510_GEN_CTRL_REG_1_MODE_RGMII_1000X 0x2 ++/* RGMII to 100BASE-FX */ ++#define MII_88E1510_GEN_CTRL_REG_1_MODE_RGMII_100FX 0x3 ++/* RGMII to SGMII */ ++#define MII_88E1510_GEN_CTRL_REG_1_MODE_RGMII_SGMII 0x4 + #define MII_88E1510_GEN_CTRL_REG_1_RESET 0x8000 /* Soft reset */ + + #define MII_VCT5_TX_RX_MDI0_COUPLING 0x10 +@@ -2505,6 +2514,100 @@ static int marvell_probe(struct phy_devi + return marvell_hwmon_probe(phydev); + } + ++static int m88e1510_sfp_insert(void *upstream, const struct sfp_eeprom_id *id) ++{ ++ struct phy_device *phydev = upstream; ++ phy_interface_t interface; ++ struct device *dev; ++ int oldpage; ++ int ret = 0; ++ u16 mode; ++ ++ __ETHTOOL_DECLARE_LINK_MODE_MASK(supported) = { 0, }; ++ ++ dev = &phydev->mdio.dev; ++ ++ sfp_parse_support(phydev->sfp_bus, id, supported); ++ interface = sfp_select_interface(phydev->sfp_bus, supported); ++ ++ dev_info(dev, "%s SFP module inserted\n", phy_modes(interface)); ++ ++ switch (interface) { ++ case PHY_INTERFACE_MODE_1000BASEX: ++ mode = MII_88E1510_GEN_CTRL_REG_1_MODE_RGMII_1000X; ++ ++ break; ++ case PHY_INTERFACE_MODE_100BASEX: ++ mode = MII_88E1510_GEN_CTRL_REG_1_MODE_RGMII_100FX; ++ ++ break; ++ case PHY_INTERFACE_MODE_SGMII: ++ mode = MII_88E1510_GEN_CTRL_REG_1_MODE_RGMII_SGMII; ++ ++ break; ++ default: ++ dev_err(dev, "Incompatible SFP module inserted\n"); ++ ++ return -EINVAL; ++ } ++ ++ oldpage = phy_select_page(phydev, MII_MARVELL_MODE_PAGE); ++ if (oldpage < 0) ++ goto error; ++ ++ ret = __phy_modify(phydev, MII_88E1510_GEN_CTRL_REG_1, ++ MII_88E1510_GEN_CTRL_REG_1_MODE_MASK, mode); ++ if (ret < 0) ++ goto error; ++ ++ ret = __phy_set_bits(phydev, MII_88E1510_GEN_CTRL_REG_1, ++ MII_88E1510_GEN_CTRL_REG_1_RESET); ++ ++error: ++ return phy_restore_page(phydev, oldpage, ret); ++} ++ ++static void m88e1510_sfp_remove(void *upstream) ++{ ++ struct phy_device *phydev = upstream; ++ int oldpage; ++ int ret = 0; ++ ++ oldpage = phy_select_page(phydev, MII_MARVELL_MODE_PAGE); ++ if (oldpage < 0) ++ goto error; ++ ++ ret = __phy_modify(phydev, MII_88E1510_GEN_CTRL_REG_1, ++ MII_88E1510_GEN_CTRL_REG_1_MODE_MASK, ++ MII_88E1510_GEN_CTRL_REG_1_MODE_RGMII); ++ if (ret < 0) ++ goto error; ++ ++ ret = __phy_set_bits(phydev, MII_88E1510_GEN_CTRL_REG_1, ++ MII_88E1510_GEN_CTRL_REG_1_RESET); ++ ++error: ++ phy_restore_page(phydev, oldpage, ret); ++} ++ ++static const struct sfp_upstream_ops m88e1510_sfp_ops = { ++ .module_insert = m88e1510_sfp_insert, ++ .module_remove = m88e1510_sfp_remove, ++ .attach = phy_sfp_attach, ++ .detach = phy_sfp_detach, ++}; ++ ++static int m88e1510_probe(struct phy_device *phydev) ++{ ++ int err; ++ ++ err = marvell_probe(phydev); ++ if (err) ++ return err; ++ ++ return phy_sfp_probe(phydev, &m88e1510_sfp_ops); ++} ++ + static struct phy_driver marvell_drivers[] = { + { + .phy_id = MARVELL_PHY_ID_88E1101, +@@ -2711,7 +2814,7 @@ static struct phy_driver marvell_drivers + .driver_data = DEF_MARVELL_HWMON_OPS(m88e1510_hwmon_ops), + .features = PHY_GBIT_FIBRE_FEATURES, + .flags = PHY_POLL_CABLE_TEST, +- .probe = marvell_probe, ++ .probe = m88e1510_probe, + .config_init = m88e1510_config_init, + .config_aneg = m88e1510_config_aneg, + .read_status = marvell_read_status, diff --git a/target/linux/generic/backport-5.15/719-v5.12-net-dsa-automatically-bring-up-DSA-master-when-openi.patch b/target/linux/generic/backport-5.15/719-v5.12-net-dsa-automatically-bring-up-DSA-master-when-openi.patch new file mode 100644 index 0000000000..3b630377f9 --- /dev/null +++ b/target/linux/generic/backport-5.15/719-v5.12-net-dsa-automatically-bring-up-DSA-master-when-openi.patch @@ -0,0 +1,85 @@ +From 9d5ef190e5615a7b63af89f88c4106a5bc127974 Mon Sep 17 00:00:00 2001 +From: Vladimir Oltean <vladimir.oltean@nxp.com> +Date: Fri, 5 Feb 2021 15:37:10 +0200 +Subject: [PATCH] net: dsa: automatically bring up DSA master when opening user + port + +DSA wants the master interface to be open before the user port is due to +historical reasons. The promiscuity of interfaces that are down used to +have issues, as referenced Lennert Buytenhek in commit df02c6ff2e39 +("dsa: fix master interface allmulti/promisc handling"). + +The bugfix mentioned there, commit b6c40d68ff64 ("net: only invoke +dev->change_rx_flags when device is UP"), was basically a "don't do +that" approach to working around the promiscuity while down issue. + +Further work done by Vlad Yasevich in commit d2615bf45069 ("net: core: +Always propagate flag changes to interfaces") has resolved the +underlying issue, and it is strictly up to the DSA and 8021q drivers +now, it is no longer mandated by the networking core that the master +interface must be up when changing its promiscuity. + +From DSA's point of view, deciding to error out in dsa_slave_open +because the master isn't up is +(a) a bad user experience and +(b) knocking at an open door. +Even if there still was an issue with promiscuity while down, DSA could +still just open the master and avoid it. + +Doing it this way has the additional benefit that user space can now +remove DSA-specific workarounds, like systemd-networkd with BindCarrier: +https://github.com/systemd/systemd/issues/7478 + +And we can finally remove one of the 2 bullets in the "Common pitfalls +using DSA setups" chapter. + +Tested with two cascaded DSA switches: + +$ ip link set sw0p2 up +fsl_enetc 0000:00:00.2 eno2: configuring for fixed/internal link mode +fsl_enetc 0000:00:00.2 eno2: Link is Up - 1Gbps/Full - flow control rx/tx +mscc_felix 0000:00:00.5 swp0: configuring for fixed/sgmii link mode +mscc_felix 0000:00:00.5 swp0: Link is Up - 1Gbps/Full - flow control off +8021q: adding VLAN 0 to HW filter on device swp0 +sja1105 spi2.0 sw0p2: configuring for phy/rgmii-id link mode +IPv6: ADDRCONF(NETDEV_CHANGE): eno2: link becomes ready +IPv6: ADDRCONF(NETDEV_CHANGE): swp0: link becomes ready + +Signed-off-by: Vladimir Oltean <vladimir.oltean@nxp.com> +Reviewed-by: Andrew Lunn <andrew@lunn.ch> +Reviewed-by: Florian Fainelli <f.fainelli@gmail.com> +Signed-off-by: Jakub Kicinski <kuba@kernel.org> +--- + Documentation/networking/dsa/dsa.rst | 4 ---- + net/dsa/slave.c | 7 +++++-- + 2 files changed, 5 insertions(+), 6 deletions(-) + +--- a/Documentation/networking/dsa/dsa.rst ++++ b/Documentation/networking/dsa/dsa.rst +@@ -273,10 +273,6 @@ will not make us go through the switch t + the Ethernet switch on the other end, expecting a tag will typically drop this + frame. + +-Slave network devices check that the master network device is UP before allowing +-you to administratively bring UP these slave network devices. A common +-configuration mistake is forgetting to bring UP the master network device first. +- + Interactions with other subsystems + ================================== + +--- a/net/dsa/slave.c ++++ b/net/dsa/slave.c +@@ -68,8 +68,11 @@ static int dsa_slave_open(struct net_dev + struct dsa_port *dp = dsa_slave_to_port(dev); + int err; + +- if (!(master->flags & IFF_UP)) +- return -ENETDOWN; ++ err = dev_open(master, NULL); ++ if (err < 0) { ++ netdev_err(dev, "failed to open master %s\n", master->name); ++ goto out; ++ } + + if (!ether_addr_equal(dev->dev_addr, master->dev_addr)) { + err = dev_uc_add(master, dev->dev_addr); diff --git a/target/linux/generic/backport-5.15/720-v5.12-net-bridge-notify-switchdev-of-disappearance-of-old-.patch b/target/linux/generic/backport-5.15/720-v5.12-net-bridge-notify-switchdev-of-disappearance-of-old-.patch new file mode 100644 index 0000000000..c43cb4d1f2 --- /dev/null +++ b/target/linux/generic/backport-5.15/720-v5.12-net-bridge-notify-switchdev-of-disappearance-of-old-.patch @@ -0,0 +1,126 @@ +From 90dc8fd36078a536671adae884d0b929cce6480a Mon Sep 17 00:00:00 2001 +From: Vladimir Oltean <vladimir.oltean@nxp.com> +Date: Wed, 6 Jan 2021 11:51:30 +0200 +Subject: [PATCH] net: bridge: notify switchdev of disappearance of old FDB + entry upon migration + +Currently the bridge emits atomic switchdev notifications for +dynamically learnt FDB entries. Monitoring these notifications works +wonders for switchdev drivers that want to keep their hardware FDB in +sync with the bridge's FDB. + +For example station A wants to talk to station B in the diagram below, +and we are concerned with the behavior of the bridge on the DUT device: + + DUT + +-------------------------------------+ + | br0 | + | +------+ +------+ +------+ +------+ | + | | | | | | | | | | + | | swp0 | | swp1 | | swp2 | | eth0 | | + +-------------------------------------+ + | | | + Station A | | + | | + +--+------+--+ +--+------+--+ + | | | | | | | | + | | swp0 | | | | swp0 | | + Another | +------+ | | +------+ | Another + switch | br0 | | br0 | switch + | +------+ | | +------+ | + | | | | | | | | + | | swp1 | | | | swp1 | | + +--+------+--+ +--+------+--+ + | + Station B + +Interfaces swp0, swp1, swp2 are handled by a switchdev driver that has +the following property: frames injected from its control interface bypass +the internal address analyzer logic, and therefore, this hardware does +not learn from the source address of packets transmitted by the network +stack through it. So, since bridging between eth0 (where Station B is +attached) and swp0 (where Station A is attached) is done in software, +the switchdev hardware will never learn the source address of Station B. +So the traffic towards that destination will be treated as unknown, i.e. +flooded. + +This is where the bridge notifications come in handy. When br0 on the +DUT sees frames with Station B's MAC address on eth0, the switchdev +driver gets these notifications and can install a rule to send frames +towards Station B's address that are incoming from swp0, swp1, swp2, +only towards the control interface. This is all switchdev driver private +business, which the notification makes possible. + +All is fine until someone unplugs Station B's cable and moves it to the +other switch: + + DUT + +-------------------------------------+ + | br0 | + | +------+ +------+ +------+ +------+ | + | | | | | | | | | | + | | swp0 | | swp1 | | swp2 | | eth0 | | + +-------------------------------------+ + | | | + Station A | | + | | + +--+------+--+ +--+------+--+ + | | | | | | | | + | | swp0 | | | | swp0 | | + Another | +------+ | | +------+ | Another + switch | br0 | | br0 | switch + | +------+ | | +------+ | + | | | | | | | | + | | swp1 | | | | swp1 | | + +--+------+--+ +--+------+--+ + | + Station B + +Luckily for the use cases we care about, Station B is noisy enough that +the DUT hears it (on swp1 this time). swp1 receives the frames and +delivers them to the bridge, who enters the unlikely path in br_fdb_update +of updating an existing entry. It moves the entry in the software bridge +to swp1 and emits an addition notification towards that. + +As far as the switchdev driver is concerned, all that it needs to ensure +is that traffic between Station A and Station B is not forever broken. +If it does nothing, then the stale rule to send frames for Station B +towards the control interface remains in place. But Station B is no +longer reachable via the control interface, but via a port that can +offload the bridge port learning attribute. It's just that the port is +prevented from learning this address, since the rule overrides FDB +updates. So the rule needs to go. The question is via what mechanism. + +It sure would be possible for this switchdev driver to keep track of all +addresses which are sent to the control interface, and then also listen +for bridge notifier events on its own ports, searching for the ones that +have a MAC address which was previously sent to the control interface. +But this is cumbersome and inefficient. Instead, with one small change, +the bridge could notify of the address deletion from the old port, in a +symmetrical manner with how it did for the insertion. Then the switchdev +driver would not be required to monitor learn/forget events for its own +ports. It could just delete the rule towards the control interface upon +bridge entry migration. This would make hardware address learning be +possible again. Then it would take a few more packets until the hardware +and software FDB would be in sync again. + +Signed-off-by: Vladimir Oltean <vladimir.oltean@nxp.com> +Acked-by: Nikolay Aleksandrov <nikolay@nvidia.com> +Reviewed-by: Ido Schimmel <idosch@nvidia.com> +Reviewed-by: Andrew Lunn <andrew@lunn.ch> +Reviewed-by: Florian Fainelli <f.fainelli@gmail.com> +Signed-off-by: Jakub Kicinski <kuba@kernel.org> +--- + net/bridge/br_fdb.c | 1 + + 1 file changed, 1 insertion(+) + +--- a/net/bridge/br_fdb.c ++++ b/net/bridge/br_fdb.c +@@ -602,6 +602,7 @@ void br_fdb_update(struct net_bridge *br + /* fastpath: update of existing entry */ + if (unlikely(source != fdb->dst && + !test_bit(BR_FDB_STICKY, &fdb->flags))) { ++ br_switchdev_fdb_notify(fdb, RTM_DELNEIGH); + fdb->dst = source; + fdb_modified = true; + /* Take over HW learned entry */ diff --git a/target/linux/generic/backport-5.15/721-v5.12-net-dsa-be-louder-when-a-non-legacy-FDB-operation-fa.patch b/target/linux/generic/backport-5.15/721-v5.12-net-dsa-be-louder-when-a-non-legacy-FDB-operation-fa.patch new file mode 100644 index 0000000000..f9337590f7 --- /dev/null +++ b/target/linux/generic/backport-5.15/721-v5.12-net-dsa-be-louder-when-a-non-legacy-FDB-operation-fa.patch @@ -0,0 +1,52 @@ +From 2fd186501b1cff155cc4a755c210793cfc0dffb5 Mon Sep 17 00:00:00 2001 +From: Vladimir Oltean <vladimir.oltean@nxp.com> +Date: Wed, 6 Jan 2021 11:51:31 +0200 +Subject: [PATCH] net: dsa: be louder when a non-legacy FDB operation fails + +The dev_close() call was added in commit c9eb3e0f8701 ("net: dsa: Add +support for learning FDB through notification") "to indicate inconsistent +situation" when we could not delete an FDB entry from the port. + +bridge fdb del d8:58:d7:00:ca:6d dev swp0 self master + +It is a bit drastic and at the same time not helpful if the above fails +to only print with netdev_dbg log level, but on the other hand to bring +the interface down. + +So increase the verbosity of the error message, and drop dev_close(). + +Signed-off-by: Vladimir Oltean <vladimir.oltean@nxp.com> +Reviewed-by: Andrew Lunn <andrew@lunn.ch> +Reviewed-by: Florian Fainelli <f.fainelli@gmail.com> +Signed-off-by: Jakub Kicinski <kuba@kernel.org> +--- + net/dsa/slave.c | 10 +++++++--- + 1 file changed, 7 insertions(+), 3 deletions(-) + +--- a/net/dsa/slave.c ++++ b/net/dsa/slave.c +@@ -2112,7 +2112,9 @@ static void dsa_slave_switchdev_event_wo + + err = dsa_port_fdb_add(dp, fdb_info->addr, fdb_info->vid); + if (err) { +- netdev_dbg(dev, "fdb add failed err=%d\n", err); ++ netdev_err(dev, ++ "failed to add %pM vid %d to fdb: %d\n", ++ fdb_info->addr, fdb_info->vid, err); + break; + } + fdb_info->offloaded = true; +@@ -2127,9 +2129,11 @@ static void dsa_slave_switchdev_event_wo + + err = dsa_port_fdb_del(dp, fdb_info->addr, fdb_info->vid); + if (err) { +- netdev_dbg(dev, "fdb del failed err=%d\n", err); +- dev_close(dev); ++ netdev_err(dev, ++ "failed to delete %pM vid %d from fdb: %d\n", ++ fdb_info->addr, fdb_info->vid, err); + } ++ + break; + } + rtnl_unlock(); diff --git a/target/linux/generic/backport-5.15/722-v5.12-net-dsa-don-t-use-switchdev_notifier_fdb_info-in-dsa.patch b/target/linux/generic/backport-5.15/722-v5.12-net-dsa-don-t-use-switchdev_notifier_fdb_info-in-dsa.patch new file mode 100644 index 0000000000..c1aa8fda82 --- /dev/null +++ b/target/linux/generic/backport-5.15/722-v5.12-net-dsa-don-t-use-switchdev_notifier_fdb_info-in-dsa.patch @@ -0,0 +1,226 @@ +From c4bb76a9a0ef87c4cc1f636defed5f12deb9f5a7 Mon Sep 17 00:00:00 2001 +From: Vladimir Oltean <vladimir.oltean@nxp.com> +Date: Wed, 6 Jan 2021 11:51:32 +0200 +Subject: [PATCH] net: dsa: don't use switchdev_notifier_fdb_info in + dsa_switchdev_event_work + +Currently DSA doesn't add FDB entries on the CPU port, because it only +does so through switchdev, which is associated with a net_device, and +there are none of those for the CPU port. + +But actually FDB addresses on the CPU port have some use cases of their +own, if the switchdev operations are initiated from within the DSA +layer. There is just one problem with the existing code: it passes a +structure in dsa_switchdev_event_work which was retrieved directly from +switchdev, so it contains a net_device. We need to generalize the +contents to something that covers the CPU port as well: the "ds, port" +tuple is fine for that. + +Note that the new procedure for notifying the successful FDB offload is +inspired from the rocker model. + +Also, nothing was being done if added_by_user was false. Let's check for +that a lot earlier, and don't actually bother to schedule the worker +for nothing. + +Signed-off-by: Vladimir Oltean <vladimir.oltean@nxp.com> +Reviewed-by: Florian Fainelli <f.fainelli@gmail.com> +Signed-off-by: Jakub Kicinski <kuba@kernel.org> +--- + net/dsa/dsa_priv.h | 12 +++++ + net/dsa/slave.c | 106 ++++++++++++++++++++++----------------------- + 2 files changed, 65 insertions(+), 53 deletions(-) + +--- a/net/dsa/dsa_priv.h ++++ b/net/dsa/dsa_priv.h +@@ -73,6 +73,18 @@ struct dsa_notifier_mtu_info { + int mtu; + }; + ++struct dsa_switchdev_event_work { ++ struct dsa_switch *ds; ++ int port; ++ struct work_struct work; ++ unsigned long event; ++ /* Specific for SWITCHDEV_FDB_ADD_TO_DEVICE and ++ * SWITCHDEV_FDB_DEL_TO_DEVICE ++ */ ++ unsigned char addr[ETH_ALEN]; ++ u16 vid; ++}; ++ + struct dsa_slave_priv { + /* Copy of CPU port xmit for faster access in slave transmit hot path */ + struct sk_buff * (*xmit)(struct sk_buff *skb, +--- a/net/dsa/slave.c ++++ b/net/dsa/slave.c +@@ -2087,76 +2087,66 @@ static int dsa_slave_netdevice_event(str + return NOTIFY_DONE; + } + +-struct dsa_switchdev_event_work { +- struct work_struct work; +- struct switchdev_notifier_fdb_info fdb_info; +- struct net_device *dev; +- unsigned long event; +-}; ++static void ++dsa_fdb_offload_notify(struct dsa_switchdev_event_work *switchdev_work) ++{ ++ struct dsa_switch *ds = switchdev_work->ds; ++ struct switchdev_notifier_fdb_info info; ++ struct dsa_port *dp; ++ ++ if (!dsa_is_user_port(ds, switchdev_work->port)) ++ return; ++ ++ info.addr = switchdev_work->addr; ++ info.vid = switchdev_work->vid; ++ info.offloaded = true; ++ dp = dsa_to_port(ds, switchdev_work->port); ++ call_switchdev_notifiers(SWITCHDEV_FDB_OFFLOADED, ++ dp->slave, &info.info, NULL); ++} + + static void dsa_slave_switchdev_event_work(struct work_struct *work) + { + struct dsa_switchdev_event_work *switchdev_work = + container_of(work, struct dsa_switchdev_event_work, work); +- struct net_device *dev = switchdev_work->dev; +- struct switchdev_notifier_fdb_info *fdb_info; +- struct dsa_port *dp = dsa_slave_to_port(dev); ++ struct dsa_switch *ds = switchdev_work->ds; ++ struct dsa_port *dp; + int err; + ++ dp = dsa_to_port(ds, switchdev_work->port); ++ + rtnl_lock(); + switch (switchdev_work->event) { + case SWITCHDEV_FDB_ADD_TO_DEVICE: +- fdb_info = &switchdev_work->fdb_info; +- if (!fdb_info->added_by_user) +- break; +- +- err = dsa_port_fdb_add(dp, fdb_info->addr, fdb_info->vid); ++ err = dsa_port_fdb_add(dp, switchdev_work->addr, ++ switchdev_work->vid); + if (err) { +- netdev_err(dev, +- "failed to add %pM vid %d to fdb: %d\n", +- fdb_info->addr, fdb_info->vid, err); ++ dev_err(ds->dev, ++ "port %d failed to add %pM vid %d to fdb: %d\n", ++ dp->index, switchdev_work->addr, ++ switchdev_work->vid, err); + break; + } +- fdb_info->offloaded = true; +- call_switchdev_notifiers(SWITCHDEV_FDB_OFFLOADED, dev, +- &fdb_info->info, NULL); ++ dsa_fdb_offload_notify(switchdev_work); + break; + + case SWITCHDEV_FDB_DEL_TO_DEVICE: +- fdb_info = &switchdev_work->fdb_info; +- if (!fdb_info->added_by_user) +- break; +- +- err = dsa_port_fdb_del(dp, fdb_info->addr, fdb_info->vid); ++ err = dsa_port_fdb_del(dp, switchdev_work->addr, ++ switchdev_work->vid); + if (err) { +- netdev_err(dev, +- "failed to delete %pM vid %d from fdb: %d\n", +- fdb_info->addr, fdb_info->vid, err); ++ dev_err(ds->dev, ++ "port %d failed to delete %pM vid %d from fdb: %d\n", ++ dp->index, switchdev_work->addr, ++ switchdev_work->vid, err); + } + + break; + } + rtnl_unlock(); + +- kfree(switchdev_work->fdb_info.addr); + kfree(switchdev_work); +- dev_put(dev); +-} +- +-static int +-dsa_slave_switchdev_fdb_work_init(struct dsa_switchdev_event_work * +- switchdev_work, +- const struct switchdev_notifier_fdb_info * +- fdb_info) +-{ +- memcpy(&switchdev_work->fdb_info, fdb_info, +- sizeof(switchdev_work->fdb_info)); +- switchdev_work->fdb_info.addr = kzalloc(ETH_ALEN, GFP_ATOMIC); +- if (!switchdev_work->fdb_info.addr) +- return -ENOMEM; +- ether_addr_copy((u8 *)switchdev_work->fdb_info.addr, +- fdb_info->addr); +- return 0; ++ if (dsa_is_user_port(ds, dp->index)) ++ dev_put(dp->slave); + } + + /* Called under rcu_read_lock() */ +@@ -2164,7 +2154,9 @@ static int dsa_slave_switchdev_event(str + unsigned long event, void *ptr) + { + struct net_device *dev = switchdev_notifier_info_to_dev(ptr); ++ const struct switchdev_notifier_fdb_info *fdb_info; + struct dsa_switchdev_event_work *switchdev_work; ++ struct dsa_port *dp; + int err; + + if (event == SWITCHDEV_PORT_ATTR_SET) { +@@ -2177,20 +2169,32 @@ static int dsa_slave_switchdev_event(str + if (!dsa_slave_dev_check(dev)) + return NOTIFY_DONE; + ++ dp = dsa_slave_to_port(dev); ++ + switchdev_work = kzalloc(sizeof(*switchdev_work), GFP_ATOMIC); + if (!switchdev_work) + return NOTIFY_BAD; + + INIT_WORK(&switchdev_work->work, + dsa_slave_switchdev_event_work); +- switchdev_work->dev = dev; ++ switchdev_work->ds = dp->ds; ++ switchdev_work->port = dp->index; + switchdev_work->event = event; + + switch (event) { + case SWITCHDEV_FDB_ADD_TO_DEVICE: + case SWITCHDEV_FDB_DEL_TO_DEVICE: +- if (dsa_slave_switchdev_fdb_work_init(switchdev_work, ptr)) +- goto err_fdb_work_init; ++ fdb_info = ptr; ++ ++ if (!fdb_info->added_by_user) { ++ kfree(switchdev_work); ++ return NOTIFY_OK; ++ } ++ ++ ether_addr_copy(switchdev_work->addr, ++ fdb_info->addr); ++ switchdev_work->vid = fdb_info->vid; ++ + dev_hold(dev); + break; + default: +@@ -2200,10 +2204,6 @@ static int dsa_slave_switchdev_event(str + + dsa_schedule_work(&switchdev_work->work); + return NOTIFY_OK; +- +-err_fdb_work_init: +- kfree(switchdev_work); +- return NOTIFY_BAD; + } + + static int dsa_slave_switchdev_blocking_event(struct notifier_block *unused, diff --git a/target/linux/generic/backport-5.15/723-v5.12-net-dsa-move-switchdev-event-implementation-under-th.patch b/target/linux/generic/backport-5.15/723-v5.12-net-dsa-move-switchdev-event-implementation-under-th.patch new file mode 100644 index 0000000000..9131df70d3 --- /dev/null +++ b/target/linux/generic/backport-5.15/723-v5.12-net-dsa-move-switchdev-event-implementation-under-th.patch @@ -0,0 +1,85 @@ +From 447d290a58bd335d68f665713842365d3d6447df Mon Sep 17 00:00:00 2001 +From: Vladimir Oltean <vladimir.oltean@nxp.com> +Date: Wed, 6 Jan 2021 11:51:33 +0200 +Subject: [PATCH] net: dsa: move switchdev event implementation under the same + switch/case statement + +We'll need to start listening to SWITCHDEV_FDB_{ADD,DEL}_TO_DEVICE +events even for interfaces where dsa_slave_dev_check returns false, so +we need that check inside the switch-case statement for SWITCHDEV_FDB_*. + +This movement also avoids a useless allocation / free of switchdev_work +on the untreated "default event" case. + +Signed-off-by: Vladimir Oltean <vladimir.oltean@nxp.com> +Reviewed-by: Florian Fainelli <f.fainelli@gmail.com> +Signed-off-by: Jakub Kicinski <kuba@kernel.org> +--- + net/dsa/slave.c | 35 ++++++++++++++++------------------- + 1 file changed, 16 insertions(+), 19 deletions(-) + +--- a/net/dsa/slave.c ++++ b/net/dsa/slave.c +@@ -2159,31 +2159,29 @@ static int dsa_slave_switchdev_event(str + struct dsa_port *dp; + int err; + +- if (event == SWITCHDEV_PORT_ATTR_SET) { ++ switch (event) { ++ case SWITCHDEV_PORT_ATTR_SET: + err = switchdev_handle_port_attr_set(dev, ptr, + dsa_slave_dev_check, + dsa_slave_port_attr_set); + return notifier_from_errno(err); +- } +- +- if (!dsa_slave_dev_check(dev)) +- return NOTIFY_DONE; ++ case SWITCHDEV_FDB_ADD_TO_DEVICE: ++ case SWITCHDEV_FDB_DEL_TO_DEVICE: ++ if (!dsa_slave_dev_check(dev)) ++ return NOTIFY_DONE; + +- dp = dsa_slave_to_port(dev); ++ dp = dsa_slave_to_port(dev); + +- switchdev_work = kzalloc(sizeof(*switchdev_work), GFP_ATOMIC); +- if (!switchdev_work) +- return NOTIFY_BAD; +- +- INIT_WORK(&switchdev_work->work, +- dsa_slave_switchdev_event_work); +- switchdev_work->ds = dp->ds; +- switchdev_work->port = dp->index; +- switchdev_work->event = event; ++ switchdev_work = kzalloc(sizeof(*switchdev_work), GFP_ATOMIC); ++ if (!switchdev_work) ++ return NOTIFY_BAD; ++ ++ INIT_WORK(&switchdev_work->work, ++ dsa_slave_switchdev_event_work); ++ switchdev_work->ds = dp->ds; ++ switchdev_work->port = dp->index; ++ switchdev_work->event = event; + +- switch (event) { +- case SWITCHDEV_FDB_ADD_TO_DEVICE: +- case SWITCHDEV_FDB_DEL_TO_DEVICE: + fdb_info = ptr; + + if (!fdb_info->added_by_user) { +@@ -2196,13 +2194,12 @@ static int dsa_slave_switchdev_event(str + switchdev_work->vid = fdb_info->vid; + + dev_hold(dev); ++ dsa_schedule_work(&switchdev_work->work); + break; + default: +- kfree(switchdev_work); + return NOTIFY_DONE; + } + +- dsa_schedule_work(&switchdev_work->work); + return NOTIFY_OK; + } + diff --git a/target/linux/generic/backport-5.15/724-v5.12-net-dsa-exit-early-in-dsa_slave_switchdev_event-if-w.patch b/target/linux/generic/backport-5.15/724-v5.12-net-dsa-exit-early-in-dsa_slave_switchdev_event-if-w.patch new file mode 100644 index 0000000000..b7b6ebe461 --- /dev/null +++ b/target/linux/generic/backport-5.15/724-v5.12-net-dsa-exit-early-in-dsa_slave_switchdev_event-if-w.patch @@ -0,0 +1,42 @@ +From 5fb4a451a87d8ed3363d28b63a3295399373d6c4 Mon Sep 17 00:00:00 2001 +From: Vladimir Oltean <vladimir.oltean@nxp.com> +Date: Wed, 6 Jan 2021 11:51:34 +0200 +Subject: [PATCH] net: dsa: exit early in dsa_slave_switchdev_event if we can't + program the FDB + +Right now, the following would happen for a switch driver that does not +implement .port_fdb_add or .port_fdb_del. + +dsa_slave_switchdev_event returns NOTIFY_OK and schedules: +-> dsa_slave_switchdev_event_work + -> dsa_port_fdb_add + -> dsa_port_notify(DSA_NOTIFIER_FDB_ADD) + -> dsa_switch_fdb_add + -> if (!ds->ops->port_fdb_add) return -EOPNOTSUPP; + -> an error is printed with dev_dbg, and + dsa_fdb_offload_notify(switchdev_work) is not called. + +We can avoid scheduling the worker for nothing and say NOTIFY_DONE. +Because we don't call dsa_fdb_offload_notify, the static FDB entry will +remain just in the software bridge. + +Signed-off-by: Vladimir Oltean <vladimir.oltean@nxp.com> +Reviewed-by: Florian Fainelli <f.fainelli@gmail.com> +Reviewed-by: Andrew Lunn <andrew@lunn.ch> +Signed-off-by: Jakub Kicinski <kuba@kernel.org> +--- + net/dsa/slave.c | 3 +++ + 1 file changed, 3 insertions(+) + +--- a/net/dsa/slave.c ++++ b/net/dsa/slave.c +@@ -2172,6 +2172,9 @@ static int dsa_slave_switchdev_event(str + + dp = dsa_slave_to_port(dev); + ++ if (!dp->ds->ops->port_fdb_add || !dp->ds->ops->port_fdb_del) ++ return NOTIFY_DONE; ++ + switchdev_work = kzalloc(sizeof(*switchdev_work), GFP_ATOMIC); + if (!switchdev_work) + return NOTIFY_BAD; diff --git a/target/linux/generic/backport-5.15/725-v5.12-net-dsa-listen-for-SWITCHDEV_-FDB-DEL-_ADD_TO_DEVICE.patch b/target/linux/generic/backport-5.15/725-v5.12-net-dsa-listen-for-SWITCHDEV_-FDB-DEL-_ADD_TO_DEVICE.patch new file mode 100644 index 0000000000..e7b9af1951 --- /dev/null +++ b/target/linux/generic/backport-5.15/725-v5.12-net-dsa-listen-for-SWITCHDEV_-FDB-DEL-_ADD_TO_DEVICE.patch @@ -0,0 +1,264 @@ +From d5f19486cee79d04c054427577ac96ed123706db Mon Sep 17 00:00:00 2001 +From: Vladimir Oltean <vladimir.oltean@nxp.com> +Date: Wed, 6 Jan 2021 11:51:35 +0200 +Subject: [PATCH] net: dsa: listen for SWITCHDEV_{FDB,DEL}_ADD_TO_DEVICE on + foreign bridge neighbors + +Some DSA switches (and not only) cannot learn source MAC addresses from +packets injected from the CPU. They only perform hardware address +learning from inbound traffic. + +This can be problematic when we have a bridge spanning some DSA switch +ports and some non-DSA ports (which we'll call "foreign interfaces" from +DSA's perspective). + +There are 2 classes of problems created by the lack of learning on +CPU-injected traffic: +- excessive flooding, due to the fact that DSA treats those addresses as + unknown +- the risk of stale routes, which can lead to temporary packet loss + +To illustrate the second class, consider the following situation, which +is common in production equipment (wireless access points, where there +is a WLAN interface and an Ethernet switch, and these form a single +bridging domain). + + AP 1: + +------------------------------------------------------------------------+ + | br0 | + +------------------------------------------------------------------------+ + +------------+ +------------+ +------------+ +------------+ +------------+ + | swp0 | | swp1 | | swp2 | | swp3 | | wlan0 | + +------------+ +------------+ +------------+ +------------+ +------------+ + | ^ ^ + | | | + | | | + | Client A Client B + | + | + | + +------------+ +------------+ +------------+ +------------+ +------------+ + | swp0 | | swp1 | | swp2 | | swp3 | | wlan0 | + +------------+ +------------+ +------------+ +------------+ +------------+ + +------------------------------------------------------------------------+ + | br0 | + +------------------------------------------------------------------------+ + AP 2 + +- br0 of AP 1 will know that Clients A and B are reachable via wlan0 +- the hardware fdb of a DSA switch driver today is not kept in sync with + the software entries on other bridge ports, so it will not know that + clients A and B are reachable via the CPU port UNLESS the hardware + switch itself performs SA learning from traffic injected from the CPU. + Nonetheless, a substantial number of switches don't. +- the hardware fdb of the DSA switch on AP 2 may autonomously learn that + Client A and B are reachable through swp0. Therefore, the software br0 + of AP 2 also may or may not learn this. In the example we're + illustrating, some Ethernet traffic has been going on, and br0 from AP + 2 has indeed learnt that it can reach Client B through swp0. + +One of the wireless clients, say Client B, disconnects from AP 1 and +roams to AP 2. The topology now looks like this: + + AP 1: + +------------------------------------------------------------------------+ + | br0 | + +------------------------------------------------------------------------+ + +------------+ +------------+ +------------+ +------------+ +------------+ + | swp0 | | swp1 | | swp2 | | swp3 | | wlan0 | + +------------+ +------------+ +------------+ +------------+ +------------+ + | ^ + | | + | Client A + | + | + | Client B + | | + | v + +------------+ +------------+ +------------+ +------------+ +------------+ + | swp0 | | swp1 | | swp2 | | swp3 | | wlan0 | + +------------+ +------------+ +------------+ +------------+ +------------+ + +------------------------------------------------------------------------+ + | br0 | + +------------------------------------------------------------------------+ + AP 2 + +- br0 of AP 1 still knows that Client A is reachable via wlan0 (no change) +- br0 of AP 1 will (possibly) know that Client B has left wlan0. There + are cases where it might never find out though. Either way, DSA today + does not process that notification in any way. +- the hardware FDB of the DSA switch on AP 1 may learn autonomously that + Client B can be reached via swp0, if it receives any packet with + Client 1's source MAC address over Ethernet. +- the hardware FDB of the DSA switch on AP 2 still thinks that Client B + can be reached via swp0. It does not know that it has roamed to wlan0, + because it doesn't perform SA learning from the CPU port. + +Now Client A contacts Client B. +AP 1 routes the packet fine towards swp0 and delivers it on the Ethernet +segment. +AP 2 sees a frame on swp0 and its fdb says that the destination is swp0. +Hairpinning is disabled => drop. + +This problem comes from the fact that these switches have a 'blind spot' +for addresses coming from software bridging. The generic solution is not +to assume that hardware learning can be enabled somehow, but to listen +to more bridge learning events. It turns out that the bridge driver does +learn in software from all inbound frames, in __br_handle_local_finish. +A proper SWITCHDEV_FDB_ADD_TO_DEVICE notification is emitted for the +addresses serviced by the bridge on 'foreign' interfaces. The software +bridge also does the right thing on migration, by notifying that the old +entry is deleted, so that does not need to be special-cased in DSA. When +it is deleted, we just need to delete our static FDB entry towards the +CPU too, and wait. + +The problem is that DSA currently only cares about SWITCHDEV_FDB_ADD_TO_DEVICE +events received on its own interfaces, such as static FDB entries. + +Luckily we can change that, and DSA can listen to all switchdev FDB +add/del events in the system and figure out if those events were emitted +by a bridge that spans at least one of DSA's own ports. In case that is +true, DSA will also offload that address towards its own CPU port, in +the eventuality that there might be bridge clients attached to the DSA +switch who want to talk to the station connected to the foreign +interface. + +In terms of implementation, we need to keep the fdb_info->added_by_user +check for the case where the switchdev event was targeted directly at a +DSA switch port. But we don't need to look at that flag for snooped +events. So the check is currently too late, we need to move it earlier. +This also simplifies the code a bit, since we avoid uselessly allocating +and freeing switchdev_work. + +We could probably do some improvements in the future. For example, +multi-bridge support is rudimentary at the moment. If there are two +bridges spanning a DSA switch's ports, and both of them need to service +the same MAC address, then what will happen is that the migration of one +of those stations will trigger the deletion of the FDB entry from the +CPU port while it is still used by other bridge. That could be improved +with reference counting but is left for another time. + +This behavior needs to be enabled at driver level by setting +ds->assisted_learning_on_cpu_port = true. This is because we don't want +to inflict a potential performance penalty (accesses through +MDIO/I2C/SPI are expensive) to hardware that really doesn't need it +because address learning on the CPU port works there. + +Reported-by: DENG Qingfang <dqfext@gmail.com> +Signed-off-by: Vladimir Oltean <vladimir.oltean@nxp.com> +Reviewed-by: Florian Fainelli <f.fainelli@gmail.com> +Reviewed-by: Andrew Lunn <andrew@lunn.ch> +Signed-off-by: Jakub Kicinski <kuba@kernel.org> +--- + include/net/dsa.h | 5 +++++ + net/dsa/slave.c | 66 +++++++++++++++++++++++++++++++++++++++++++++---------- + 2 files changed, 60 insertions(+), 11 deletions(-) + +--- a/include/net/dsa.h ++++ b/include/net/dsa.h +@@ -317,6 +317,11 @@ struct dsa_switch { + */ + bool untag_bridge_pvid; + ++ /* Let DSA manage the FDB entries towards the CPU, based on the ++ * software bridge database. ++ */ ++ bool assisted_learning_on_cpu_port; ++ + /* In case vlan_filtering_is_global is set, the VLAN awareness state + * should be retrieved from here and not from the per-port settings. + */ +--- a/net/dsa/slave.c ++++ b/net/dsa/slave.c +@@ -2149,6 +2149,28 @@ static void dsa_slave_switchdev_event_wo + dev_put(dp->slave); + } + ++static int dsa_lower_dev_walk(struct net_device *lower_dev, ++ struct netdev_nested_priv *priv) ++{ ++ if (dsa_slave_dev_check(lower_dev)) { ++ priv->data = (void *)netdev_priv(lower_dev); ++ return 1; ++ } ++ ++ return 0; ++} ++ ++static struct dsa_slave_priv *dsa_slave_dev_lower_find(struct net_device *dev) ++{ ++ struct netdev_nested_priv priv = { ++ .data = NULL, ++ }; ++ ++ netdev_walk_all_lower_dev_rcu(dev, dsa_lower_dev_walk, &priv); ++ ++ return (struct dsa_slave_priv *)priv.data; ++} ++ + /* Called under rcu_read_lock() */ + static int dsa_slave_switchdev_event(struct notifier_block *unused, + unsigned long event, void *ptr) +@@ -2167,10 +2189,37 @@ static int dsa_slave_switchdev_event(str + return notifier_from_errno(err); + case SWITCHDEV_FDB_ADD_TO_DEVICE: + case SWITCHDEV_FDB_DEL_TO_DEVICE: +- if (!dsa_slave_dev_check(dev)) +- return NOTIFY_DONE; ++ fdb_info = ptr; ++ ++ if (dsa_slave_dev_check(dev)) { ++ if (!fdb_info->added_by_user) ++ return NOTIFY_OK; ++ ++ dp = dsa_slave_to_port(dev); ++ } else { ++ /* Snoop addresses learnt on foreign interfaces ++ * bridged with us, for switches that don't ++ * automatically learn SA from CPU-injected traffic ++ */ ++ struct net_device *br_dev; ++ struct dsa_slave_priv *p; ++ ++ br_dev = netdev_master_upper_dev_get_rcu(dev); ++ if (!br_dev) ++ return NOTIFY_DONE; ++ ++ if (!netif_is_bridge_master(br_dev)) ++ return NOTIFY_DONE; ++ ++ p = dsa_slave_dev_lower_find(br_dev); ++ if (!p) ++ return NOTIFY_DONE; + +- dp = dsa_slave_to_port(dev); ++ dp = p->dp->cpu_dp; ++ ++ if (!dp->ds->assisted_learning_on_cpu_port) ++ return NOTIFY_DONE; ++ } + + if (!dp->ds->ops->port_fdb_add || !dp->ds->ops->port_fdb_del) + return NOTIFY_DONE; +@@ -2185,18 +2234,13 @@ static int dsa_slave_switchdev_event(str + switchdev_work->port = dp->index; + switchdev_work->event = event; + +- fdb_info = ptr; +- +- if (!fdb_info->added_by_user) { +- kfree(switchdev_work); +- return NOTIFY_OK; +- } +- + ether_addr_copy(switchdev_work->addr, + fdb_info->addr); + switchdev_work->vid = fdb_info->vid; + +- dev_hold(dev); ++ /* Hold a reference on the slave for dsa_fdb_offload_notify */ ++ if (dsa_is_user_port(dp->ds, dp->index)) ++ dev_hold(dev); + dsa_schedule_work(&switchdev_work->work); + break; + default: diff --git a/target/linux/generic/backport-5.15/730-net-dsa-mt7530-setup-core-clock-even-in-TRGMII-mode.patch b/target/linux/generic/backport-5.15/730-net-dsa-mt7530-setup-core-clock-even-in-TRGMII-mode.patch new file mode 100644 index 0000000000..f3a6f948ad --- /dev/null +++ b/target/linux/generic/backport-5.15/730-net-dsa-mt7530-setup-core-clock-even-in-TRGMII-mode.patch @@ -0,0 +1,84 @@ +From c3b8e07909dbe67b0d580416c1a5257643a73be7 Mon Sep 17 00:00:00 2001 +From: Ilya Lipnitskiy <ilya.lipnitskiy@gmail.com> +Date: Fri, 12 Mar 2021 00:07:03 -0800 +Subject: [PATCH] net: dsa: mt7530: setup core clock even in TRGMII mode + +A recent change to MIPS ralink reset logic made it so mt7530 actually +resets the switch on platforms such as mt7621 (where bit 2 is the reset +line for the switch). That exposed an issue where the switch would not +function properly in TRGMII mode after a reset. + +Reconfigure core clock in TRGMII mode to fix the issue. + +Tested on Ubiquiti ER-X (MT7621) with TRGMII mode enabled. + +Fixes: 3f9ef7785a9c ("MIPS: ralink: manage low reset lines") +Signed-off-by: Ilya Lipnitskiy <ilya.lipnitskiy@gmail.com> +Signed-off-by: David S. Miller <davem@davemloft.net> +--- + drivers/net/dsa/mt7530.c | 52 +++++++++++++++++++--------------------- + 1 file changed, 25 insertions(+), 27 deletions(-) + +--- a/drivers/net/dsa/mt7530.c ++++ b/drivers/net/dsa/mt7530.c +@@ -436,34 +436,32 @@ mt7530_pad_clk_setup(struct dsa_switch * + TD_DM_DRVP(8) | TD_DM_DRVN(8)); + + /* Setup core clock for MT7530 */ +- if (!trgint) { +- /* Disable MT7530 core clock */ +- core_clear(priv, CORE_TRGMII_GSW_CLK_CG, REG_GSWCK_EN); +- +- /* Disable PLL, since phy_device has not yet been created +- * provided for phy_[read,write]_mmd_indirect is called, we +- * provide our own core_write_mmd_indirect to complete this +- * function. +- */ +- core_write_mmd_indirect(priv, +- CORE_GSWPLL_GRP1, +- MDIO_MMD_VEND2, +- 0); +- +- /* Set core clock into 500Mhz */ +- core_write(priv, CORE_GSWPLL_GRP2, +- RG_GSWPLL_POSDIV_500M(1) | +- RG_GSWPLL_FBKDIV_500M(25)); +- +- /* Enable PLL */ +- core_write(priv, CORE_GSWPLL_GRP1, +- RG_GSWPLL_EN_PRE | +- RG_GSWPLL_POSDIV_200M(2) | +- RG_GSWPLL_FBKDIV_200M(32)); +- +- /* Enable MT7530 core clock */ +- core_set(priv, CORE_TRGMII_GSW_CLK_CG, REG_GSWCK_EN); +- } ++ /* Disable MT7530 core clock */ ++ core_clear(priv, CORE_TRGMII_GSW_CLK_CG, REG_GSWCK_EN); ++ ++ /* Disable PLL, since phy_device has not yet been created ++ * provided for phy_[read,write]_mmd_indirect is called, we ++ * provide our own core_write_mmd_indirect to complete this ++ * function. ++ */ ++ core_write_mmd_indirect(priv, ++ CORE_GSWPLL_GRP1, ++ MDIO_MMD_VEND2, ++ 0); ++ ++ /* Set core clock into 500Mhz */ ++ core_write(priv, CORE_GSWPLL_GRP2, ++ RG_GSWPLL_POSDIV_500M(1) | ++ RG_GSWPLL_FBKDIV_500M(25)); ++ ++ /* Enable PLL */ ++ core_write(priv, CORE_GSWPLL_GRP1, ++ RG_GSWPLL_EN_PRE | ++ RG_GSWPLL_POSDIV_200M(2) | ++ RG_GSWPLL_FBKDIV_200M(32)); ++ ++ /* Enable MT7530 core clock */ ++ core_set(priv, CORE_TRGMII_GSW_CLK_CG, REG_GSWCK_EN); + + /* Setup the MT7530 TRGMII Tx Clock */ + core_set(priv, CORE_TRGMII_GSW_CLK_CG, REG_GSWCK_EN); diff --git a/target/linux/generic/backport-5.15/731-v5.12-net-dsa-mt7530-MT7530-optional-GPIO-support.patch b/target/linux/generic/backport-5.15/731-v5.12-net-dsa-mt7530-MT7530-optional-GPIO-support.patch new file mode 100644 index 0000000000..9e5047a16b --- /dev/null +++ b/target/linux/generic/backport-5.15/731-v5.12-net-dsa-mt7530-MT7530-optional-GPIO-support.patch @@ -0,0 +1,181 @@ +From 429a0edeefd88cbfca5c417dfb8561047bb50769 Mon Sep 17 00:00:00 2001 +From: DENG Qingfang <dqfext@gmail.com> +Date: Mon, 25 Jan 2021 12:43:22 +0800 +Subject: [PATCH] net: dsa: mt7530: MT7530 optional GPIO support + +MT7530's LED controller can drive up to 15 LED/GPIOs. + +Add support for GPIO control and allow users to use its GPIOs by +setting gpio-controller property in device tree. + +Signed-off-by: DENG Qingfang <dqfext@gmail.com> +Reviewed-by: Linus Walleij <linus.walleij@linaro.org> +Reviewed-by: Andrew Lunn <andrew@lunn.ch> +Signed-off-by: Jakub Kicinski <kuba@kernel.org> +--- + drivers/net/dsa/mt7530.c | 110 +++++++++++++++++++++++++++++++++++++++ + drivers/net/dsa/mt7530.h | 20 +++++++ + 2 files changed, 130 insertions(+) + +--- a/drivers/net/dsa/mt7530.c ++++ b/drivers/net/dsa/mt7530.c +@@ -18,6 +18,7 @@ + #include <linux/regulator/consumer.h> + #include <linux/reset.h> + #include <linux/gpio/consumer.h> ++#include <linux/gpio/driver.h> + #include <net/dsa.h> + + #include "mt7530.h" +@@ -1534,6 +1535,109 @@ mtk_get_tag_protocol(struct dsa_switch * + } + } + ++static inline u32 ++mt7530_gpio_to_bit(unsigned int offset) ++{ ++ /* Map GPIO offset to register bit ++ * [ 2: 0] port 0 LED 0..2 as GPIO 0..2 ++ * [ 6: 4] port 1 LED 0..2 as GPIO 3..5 ++ * [10: 8] port 2 LED 0..2 as GPIO 6..8 ++ * [14:12] port 3 LED 0..2 as GPIO 9..11 ++ * [18:16] port 4 LED 0..2 as GPIO 12..14 ++ */ ++ return BIT(offset + offset / 3); ++} ++ ++static int ++mt7530_gpio_get(struct gpio_chip *gc, unsigned int offset) ++{ ++ struct mt7530_priv *priv = gpiochip_get_data(gc); ++ u32 bit = mt7530_gpio_to_bit(offset); ++ ++ return !!(mt7530_read(priv, MT7530_LED_GPIO_DATA) & bit); ++} ++ ++static void ++mt7530_gpio_set(struct gpio_chip *gc, unsigned int offset, int value) ++{ ++ struct mt7530_priv *priv = gpiochip_get_data(gc); ++ u32 bit = mt7530_gpio_to_bit(offset); ++ ++ if (value) ++ mt7530_set(priv, MT7530_LED_GPIO_DATA, bit); ++ else ++ mt7530_clear(priv, MT7530_LED_GPIO_DATA, bit); ++} ++ ++static int ++mt7530_gpio_get_direction(struct gpio_chip *gc, unsigned int offset) ++{ ++ struct mt7530_priv *priv = gpiochip_get_data(gc); ++ u32 bit = mt7530_gpio_to_bit(offset); ++ ++ return (mt7530_read(priv, MT7530_LED_GPIO_DIR) & bit) ? ++ GPIO_LINE_DIRECTION_OUT : GPIO_LINE_DIRECTION_IN; ++} ++ ++static int ++mt7530_gpio_direction_input(struct gpio_chip *gc, unsigned int offset) ++{ ++ struct mt7530_priv *priv = gpiochip_get_data(gc); ++ u32 bit = mt7530_gpio_to_bit(offset); ++ ++ mt7530_clear(priv, MT7530_LED_GPIO_OE, bit); ++ mt7530_clear(priv, MT7530_LED_GPIO_DIR, bit); ++ ++ return 0; ++} ++ ++static int ++mt7530_gpio_direction_output(struct gpio_chip *gc, unsigned int offset, int value) ++{ ++ struct mt7530_priv *priv = gpiochip_get_data(gc); ++ u32 bit = mt7530_gpio_to_bit(offset); ++ ++ mt7530_set(priv, MT7530_LED_GPIO_DIR, bit); ++ ++ if (value) ++ mt7530_set(priv, MT7530_LED_GPIO_DATA, bit); ++ else ++ mt7530_clear(priv, MT7530_LED_GPIO_DATA, bit); ++ ++ mt7530_set(priv, MT7530_LED_GPIO_OE, bit); ++ ++ return 0; ++} ++ ++static int ++mt7530_setup_gpio(struct mt7530_priv *priv) ++{ ++ struct device *dev = priv->dev; ++ struct gpio_chip *gc; ++ ++ gc = devm_kzalloc(dev, sizeof(*gc), GFP_KERNEL); ++ if (!gc) ++ return -ENOMEM; ++ ++ mt7530_write(priv, MT7530_LED_GPIO_OE, 0); ++ mt7530_write(priv, MT7530_LED_GPIO_DIR, 0); ++ mt7530_write(priv, MT7530_LED_IO_MODE, 0); ++ ++ gc->label = "mt7530"; ++ gc->parent = dev; ++ gc->owner = THIS_MODULE; ++ gc->get_direction = mt7530_gpio_get_direction; ++ gc->direction_input = mt7530_gpio_direction_input; ++ gc->direction_output = mt7530_gpio_direction_output; ++ gc->get = mt7530_gpio_get; ++ gc->set = mt7530_gpio_set; ++ gc->base = -1; ++ gc->ngpio = 15; ++ gc->can_sleep = true; ++ ++ return devm_gpiochip_add_data(dev, gc, priv); ++} ++ + static int + mt7530_setup(struct dsa_switch *ds) + { +@@ -1675,6 +1779,12 @@ mt7530_setup(struct dsa_switch *ds) + } + } + ++ if (of_property_read_bool(priv->dev->of_node, "gpio-controller")) { ++ ret = mt7530_setup_gpio(priv); ++ if (ret) ++ return ret; ++ } ++ + mt7530_setup_port5(ds, interface); + + /* Flush the FDB table */ +--- a/drivers/net/dsa/mt7530.h ++++ b/drivers/net/dsa/mt7530.h +@@ -529,6 +529,26 @@ enum mt7531_clk_skew { + #define MT7531_GPIO12_RG_RXD3_MASK GENMASK(19, 16) + #define MT7531_EXT_P_MDIO_12 (2 << 16) + ++/* Registers for LED GPIO control (MT7530 only) ++ * All registers follow this pattern: ++ * [ 2: 0] port 0 ++ * [ 6: 4] port 1 ++ * [10: 8] port 2 ++ * [14:12] port 3 ++ * [18:16] port 4 ++ */ ++ ++/* LED enable, 0: Disable, 1: Enable (Default) */ ++#define MT7530_LED_EN 0x7d00 ++/* LED mode, 0: GPIO mode, 1: PHY mode (Default) */ ++#define MT7530_LED_IO_MODE 0x7d04 ++/* GPIO direction, 0: Input, 1: Output */ ++#define MT7530_LED_GPIO_DIR 0x7d10 ++/* GPIO output enable, 0: Disable, 1: Enable */ ++#define MT7530_LED_GPIO_OE 0x7d14 ++/* GPIO value, 0: Low, 1: High */ ++#define MT7530_LED_GPIO_DATA 0x7d18 ++ + #define MT7530_CREV 0x7ffc + #define CHIP_NAME_SHIFT 16 + #define MT7530_ID 0x7530 diff --git a/target/linux/generic/backport-5.15/731-v5.13-net-dsa-mt7530-Add-support-for-EEE-features.patch b/target/linux/generic/backport-5.15/731-v5.13-net-dsa-mt7530-Add-support-for-EEE-features.patch new file mode 100644 index 0000000000..38dd8a7296 --- /dev/null +++ b/target/linux/generic/backport-5.15/731-v5.13-net-dsa-mt7530-Add-support-for-EEE-features.patch @@ -0,0 +1,120 @@ +From 40b5d2f15c091fa9c854acde91ad2acb504027d7 Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?Ren=C3=A9=20van=20Dorst?= <opensource@vdorst.com> +Date: Mon, 12 Apr 2021 08:50:31 +0200 +Subject: [PATCH] net: dsa: mt7530: Add support for EEE features +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +This patch adds EEE support. + +Signed-off-by: René van Dorst <opensource@vdorst.com> +Signed-off-by: David S. Miller <davem@davemloft.net> +--- + drivers/net/dsa/mt7530.c | 43 ++++++++++++++++++++++++++++++++++++++++ + drivers/net/dsa/mt7530.h | 14 ++++++++++++- + 2 files changed, 56 insertions(+), 1 deletion(-) + +--- a/drivers/net/dsa/mt7530.c ++++ b/drivers/net/dsa/mt7530.c +@@ -2371,6 +2371,17 @@ static void mt753x_phylink_mac_link_up(s + mcr |= PMCR_RX_FC_EN; + } + ++ if (mode == MLO_AN_PHY && phydev && phy_init_eee(phydev, 0) >= 0) { ++ switch (speed) { ++ case SPEED_1000: ++ mcr |= PMCR_FORCE_EEE1G; ++ break; ++ case SPEED_100: ++ mcr |= PMCR_FORCE_EEE100; ++ break; ++ } ++ } ++ + mt7530_set(priv, MT7530_PMCR_P(port), mcr); + } + +@@ -2601,6 +2612,36 @@ mt753x_phy_write(struct dsa_switch *ds, + return priv->info->phy_write(ds, port, regnum, val); + } + ++static int mt753x_get_mac_eee(struct dsa_switch *ds, int port, ++ struct ethtool_eee *e) ++{ ++ struct mt7530_priv *priv = ds->priv; ++ u32 eeecr = mt7530_read(priv, MT7530_PMEEECR_P(port)); ++ ++ e->tx_lpi_enabled = !(eeecr & LPI_MODE_EN); ++ e->tx_lpi_timer = GET_LPI_THRESH(eeecr); ++ ++ return 0; ++} ++ ++static int mt753x_set_mac_eee(struct dsa_switch *ds, int port, ++ struct ethtool_eee *e) ++{ ++ struct mt7530_priv *priv = ds->priv; ++ u32 set, mask = LPI_THRESH_MASK | LPI_MODE_EN; ++ ++ if (e->tx_lpi_timer > 0xFFF) ++ return -EINVAL; ++ ++ set = SET_LPI_THRESH(e->tx_lpi_timer); ++ if (!e->tx_lpi_enabled) ++ /* Force LPI Mode without a delay */ ++ set |= LPI_MODE_EN; ++ mt7530_rmw(priv, MT7530_PMEEECR_P(port), mask, set); ++ ++ return 0; ++} ++ + static const struct dsa_switch_ops mt7530_switch_ops = { + .get_tag_protocol = mtk_get_tag_protocol, + .setup = mt753x_setup, +@@ -2629,6 +2670,8 @@ static const struct dsa_switch_ops mt753 + .phylink_mac_an_restart = mt753x_phylink_mac_an_restart, + .phylink_mac_link_down = mt753x_phylink_mac_link_down, + .phylink_mac_link_up = mt753x_phylink_mac_link_up, ++ .get_mac_eee = mt753x_get_mac_eee, ++ .set_mac_eee = mt753x_set_mac_eee, + }; + + static const struct mt753x_info mt753x_table[] = { +--- a/drivers/net/dsa/mt7530.h ++++ b/drivers/net/dsa/mt7530.h +@@ -240,6 +240,8 @@ enum mt7530_vlan_port_attr { + #define PMCR_RX_EN BIT(13) + #define PMCR_BACKOFF_EN BIT(9) + #define PMCR_BACKPR_EN BIT(8) ++#define PMCR_FORCE_EEE1G BIT(7) ++#define PMCR_FORCE_EEE100 BIT(6) + #define PMCR_TX_FC_EN BIT(5) + #define PMCR_RX_FC_EN BIT(4) + #define PMCR_FORCE_SPEED_1000 BIT(3) +@@ -264,7 +266,8 @@ enum mt7530_vlan_port_attr { + #define PMCR_LINK_SETTINGS_MASK (PMCR_TX_EN | PMCR_FORCE_SPEED_1000 | \ + PMCR_RX_EN | PMCR_FORCE_SPEED_100 | \ + PMCR_TX_FC_EN | PMCR_RX_FC_EN | \ +- PMCR_FORCE_FDX | PMCR_FORCE_LNK) ++ PMCR_FORCE_FDX | PMCR_FORCE_LNK | \ ++ PMCR_FORCE_EEE1G | PMCR_FORCE_EEE100) + #define PMCR_CPU_PORT_SETTING(id) (PMCR_FORCE_MODE_ID((id)) | \ + PMCR_IFG_XMIT(1) | PMCR_MAC_MODE | \ + PMCR_BACKOFF_EN | PMCR_BACKPR_EN | \ +@@ -273,6 +276,15 @@ enum mt7530_vlan_port_attr { + PMCR_FORCE_SPEED_1000 | \ + PMCR_FORCE_FDX | PMCR_FORCE_LNK) + ++#define MT7530_PMEEECR_P(x) (0x3004 + (x) * 0x100) ++#define WAKEUP_TIME_1000(x) (((x) & 0xFF) << 24) ++#define WAKEUP_TIME_100(x) (((x) & 0xFF) << 16) ++#define LPI_THRESH_MASK GENMASK(15, 4) ++#define LPI_THRESH_SHT 4 ++#define SET_LPI_THRESH(x) (((x) << LPI_THRESH_SHT) & LPI_THRESH_MASK) ++#define GET_LPI_THRESH(x) (((x) & LPI_THRESH_MASK) >> LPI_THRESH_SHT) ++#define LPI_MODE_EN BIT(0) ++ + #define MT7530_PMSR_P(x) (0x3008 + (x) * 0x100) + #define PMSR_EEE1G BIT(7) + #define PMSR_EEE100M BIT(6) diff --git a/target/linux/generic/backport-5.15/732-net-next-1-of-net-pass-the-dst-buffer-to-of_get_mac_address.patch b/target/linux/generic/backport-5.15/732-net-next-1-of-net-pass-the-dst-buffer-to-of_get_mac_address.patch new file mode 100644 index 0000000000..60b07e5a3e --- /dev/null +++ b/target/linux/generic/backport-5.15/732-net-next-1-of-net-pass-the-dst-buffer-to-of_get_mac_address.patch @@ -0,0 +1,1935 @@ +From 83216e3988cd196183542937c9bd58b279f946af Mon Sep 17 00:00:00 2001 +From: Michael Walle <michael@walle.cc> +Date: Mon, 12 Apr 2021 19:47:17 +0200 +Subject: of: net: pass the dst buffer to of_get_mac_address() + +of_get_mac_address() returns a "const void*" pointer to a MAC address. +Lately, support to fetch the MAC address by an NVMEM provider was added. +But this will only work with platform devices. It will not work with +PCI devices (e.g. of an integrated root complex) and esp. not with DSA +ports. + +There is an of_* variant of the nvmem binding which works without +devices. The returned data of a nvmem_cell_read() has to be freed after +use. On the other hand the return of_get_mac_address() points to some +static data without a lifetime. The trick for now, was to allocate a +device resource managed buffer which is then returned. This will only +work if we have an actual device. + +Change it, so that the caller of of_get_mac_address() has to supply a +buffer where the MAC address is written to. Unfortunately, this will +touch all drivers which use the of_get_mac_address(). + +Usually the code looks like: + + const char *addr; + addr = of_get_mac_address(np); + if (!IS_ERR(addr)) + ether_addr_copy(ndev->dev_addr, addr); + +This can then be simply rewritten as: + + of_get_mac_address(np, ndev->dev_addr); + +Sometimes is_valid_ether_addr() is used to test the MAC address. +of_get_mac_address() already makes sure, it just returns a valid MAC +address. Thus we can just test its return code. But we have to be +careful if there are still other sources for the MAC address before the +of_get_mac_address(). In this case we have to keep the +is_valid_ether_addr() call. + +The following coccinelle patch was used to convert common cases to the +new style. Afterwards, I've manually gone over the drivers and fixed the +return code variable: either used a new one or if one was already +available use that. Mansour Moufid, thanks for that coccinelle patch! + +<spml> +@a@ +identifier x; +expression y, z; +@@ +- x = of_get_mac_address(y); ++ x = of_get_mac_address(y, z); + <... +- ether_addr_copy(z, x); + ...> + +@@ +identifier a.x; +@@ +- if (<+... x ...+>) {} + +@@ +identifier a.x; +@@ + if (<+... x ...+>) { + ... + } +- else {} + +@@ +identifier a.x; +expression e; +@@ +- if (<+... x ...+>@e) +- {} +- else ++ if (!(e)) + {...} + +@@ +expression x, y, z; +@@ +- x = of_get_mac_address(y, z); ++ of_get_mac_address(y, z); + ... when != x +</spml> + +All drivers, except drivers/net/ethernet/aeroflex/greth.c, were +compile-time tested. + +Suggested-by: Andrew Lunn <andrew@lunn.ch> +Signed-off-by: Michael Walle <michael@walle.cc> +Reviewed-by: Andrew Lunn <andrew@lunn.ch> +Signed-off-by: David S. Miller <davem@davemloft.net> +--- + arch/arm/mach-mvebu/kirkwood.c | 3 +- + arch/powerpc/sysdev/tsi108_dev.c | 5 +- + drivers/net/ethernet/aeroflex/greth.c | 6 +-- + drivers/net/ethernet/allwinner/sun4i-emac.c | 10 ++-- + drivers/net/ethernet/altera/altera_tse_main.c | 7 +-- + drivers/net/ethernet/arc/emac_main.c | 8 +-- + drivers/net/ethernet/atheros/ag71xx.c | 7 +-- + drivers/net/ethernet/broadcom/bcm4908_enet.c | 7 +-- + drivers/net/ethernet/broadcom/bcmsysport.c | 7 +-- + drivers/net/ethernet/broadcom/bgmac-bcma.c | 10 ++-- + drivers/net/ethernet/broadcom/bgmac-platform.c | 11 ++-- + drivers/net/ethernet/cadence/macb_main.c | 11 ++-- + drivers/net/ethernet/cavium/octeon/octeon_mgmt.c | 8 +-- + drivers/net/ethernet/cavium/thunder/thunder_bgx.c | 5 +- + drivers/net/ethernet/davicom/dm9000.c | 10 ++-- + drivers/net/ethernet/ethoc.c | 6 +-- + drivers/net/ethernet/ezchip/nps_enet.c | 7 +-- + drivers/net/ethernet/freescale/fec_main.c | 7 +-- + drivers/net/ethernet/freescale/fec_mpc52xx.c | 7 +-- + drivers/net/ethernet/freescale/fman/mac.c | 9 ++-- + .../net/ethernet/freescale/fs_enet/fs_enet-main.c | 5 +- + drivers/net/ethernet/freescale/gianfar.c | 8 +-- + drivers/net/ethernet/freescale/ucc_geth.c | 5 +- + drivers/net/ethernet/hisilicon/hisi_femac.c | 7 +-- + drivers/net/ethernet/hisilicon/hix5hd2_gmac.c | 7 +-- + drivers/net/ethernet/lantiq_xrx200.c | 7 +-- + drivers/net/ethernet/marvell/mv643xx_eth.c | 5 +- + drivers/net/ethernet/marvell/mvneta.c | 6 +-- + .../net/ethernet/marvell/prestera/prestera_main.c | 11 ++-- + drivers/net/ethernet/marvell/pxa168_eth.c | 9 +--- + drivers/net/ethernet/marvell/sky2.c | 8 ++- + drivers/net/ethernet/mediatek/mtk_eth_soc.c | 11 ++-- + drivers/net/ethernet/micrel/ks8851_common.c | 7 ++- + drivers/net/ethernet/microchip/lan743x_main.c | 5 +- + drivers/net/ethernet/nxp/lpc_eth.c | 4 +- + drivers/net/ethernet/qualcomm/qca_spi.c | 10 ++-- + drivers/net/ethernet/qualcomm/qca_uart.c | 9 +--- + drivers/net/ethernet/renesas/ravb_main.c | 12 +++-- + drivers/net/ethernet/renesas/sh_eth.c | 5 +- + .../net/ethernet/samsung/sxgbe/sxgbe_platform.c | 13 ++--- + drivers/net/ethernet/socionext/sni_ave.c | 10 ++-- + .../net/ethernet/stmicro/stmmac/dwmac-anarion.c | 2 +- + .../ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c | 2 +- + .../net/ethernet/stmicro/stmmac/dwmac-generic.c | 2 +- + drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c | 2 +- + .../net/ethernet/stmicro/stmmac/dwmac-intel-plat.c | 2 +- + .../net/ethernet/stmicro/stmmac/dwmac-ipq806x.c | 2 +- + .../net/ethernet/stmicro/stmmac/dwmac-lpc18xx.c | 2 +- + .../net/ethernet/stmicro/stmmac/dwmac-mediatek.c | 2 +- + drivers/net/ethernet/stmicro/stmmac/dwmac-meson.c | 2 +- + .../net/ethernet/stmicro/stmmac/dwmac-meson8b.c | 2 +- + drivers/net/ethernet/stmicro/stmmac/dwmac-oxnas.c | 2 +- + .../ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c | 2 +- + drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c | 2 +- + .../net/ethernet/stmicro/stmmac/dwmac-socfpga.c | 2 +- + drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c | 2 +- + drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c | 2 +- + drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c | 2 +- + drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c | 2 +- + .../net/ethernet/stmicro/stmmac/dwmac-visconti.c | 2 +- + drivers/net/ethernet/stmicro/stmmac/stmmac.h | 2 +- + drivers/net/ethernet/stmicro/stmmac/stmmac_main.c | 2 +- + .../net/ethernet/stmicro/stmmac/stmmac_platform.c | 14 ++--- + .../net/ethernet/stmicro/stmmac/stmmac_platform.h | 2 +- + drivers/net/ethernet/ti/am65-cpsw-nuss.c | 19 ++++--- + drivers/net/ethernet/ti/cpsw.c | 7 +-- + drivers/net/ethernet/ti/cpsw_new.c | 7 +-- + drivers/net/ethernet/ti/davinci_emac.c | 8 +-- + drivers/net/ethernet/ti/netcp_core.c | 7 +-- + drivers/net/ethernet/wiznet/w5100-spi.c | 8 ++- + drivers/net/ethernet/wiznet/w5100.c | 2 +- + drivers/net/ethernet/xilinx/ll_temac_main.c | 8 +-- + drivers/net/ethernet/xilinx/xilinx_axienet_main.c | 15 +++--- + drivers/net/ethernet/xilinx/xilinx_emaclite.c | 8 +-- + drivers/net/wireless/ath/ath9k/init.c | 5 +- + drivers/net/wireless/mediatek/mt76/eeprom.c | 9 +--- + drivers/net/wireless/ralink/rt2x00/rt2x00dev.c | 6 +-- + drivers/of/of_net.c | 60 ++++++++++------------ + drivers/staging/octeon/ethernet.c | 10 ++-- + drivers/staging/wfx/main.c | 7 ++- + include/linux/of_net.h | 6 +-- + include/net/dsa.h | 2 +- + net/dsa/dsa2.c | 2 +- + net/dsa/slave.c | 2 +- + net/ethernet/eth.c | 11 ++-- + 85 files changed, 218 insertions(+), 364 deletions(-) + +--- a/arch/arm/mach-mvebu/kirkwood.c ++++ b/arch/arm/mach-mvebu/kirkwood.c +@@ -84,6 +84,7 @@ static void __init kirkwood_dt_eth_fixup + struct device_node *pnp = of_get_parent(np); + struct clk *clk; + struct property *pmac; ++ u8 tmpmac[ETH_ALEN]; + void __iomem *io; + u8 *macaddr; + u32 reg; +@@ -93,7 +94,7 @@ static void __init kirkwood_dt_eth_fixup + + /* skip disabled nodes or nodes with valid MAC address*/ + if (!of_device_is_available(pnp) || +- !IS_ERR(of_get_mac_address(np))) ++ !of_get_mac_address(np, tmpmac)) + goto eth_fixup_skip; + + clk = of_clk_get(pnp, 0); +--- a/arch/powerpc/sysdev/tsi108_dev.c ++++ b/arch/powerpc/sysdev/tsi108_dev.c +@@ -73,7 +73,6 @@ static int __init tsi108_eth_of_init(voi + struct device_node *phy, *mdio; + hw_info tsi_eth_data; + const unsigned int *phy_id; +- const void *mac_addr; + const phandle *ph; + + memset(r, 0, sizeof(r)); +@@ -101,9 +100,7 @@ static int __init tsi108_eth_of_init(voi + goto err; + } + +- mac_addr = of_get_mac_address(np); +- if (!IS_ERR(mac_addr)) +- ether_addr_copy(tsi_eth_data.mac_addr, mac_addr); ++ of_get_mac_address(np, tsi_eth_data.mac_addr); + + ph = of_get_property(np, "mdio-handle", NULL); + mdio = of_find_node_by_phandle(*ph); +--- a/drivers/net/ethernet/aeroflex/greth.c ++++ b/drivers/net/ethernet/aeroflex/greth.c +@@ -1449,10 +1449,10 @@ static int greth_of_probe(struct platfor + break; + } + if (i == 6) { +- const u8 *addr; ++ u8 addr[ETH_ALEN]; + +- addr = of_get_mac_address(ofdev->dev.of_node); +- if (!IS_ERR(addr)) { ++ err = of_get_mac_address(ofdev->dev.of_node, addr); ++ if (!err) { + for (i = 0; i < 6; i++) + macaddr[i] = (unsigned int) addr[i]; + } else { +--- a/drivers/net/ethernet/allwinner/sun4i-emac.c ++++ b/drivers/net/ethernet/allwinner/sun4i-emac.c +@@ -790,7 +790,6 @@ static int emac_probe(struct platform_de + struct emac_board_info *db; + struct net_device *ndev; + int ret = 0; +- const char *mac_addr; + + ndev = alloc_etherdev(sizeof(struct emac_board_info)); + if (!ndev) { +@@ -853,12 +852,9 @@ static int emac_probe(struct platform_de + } + + /* Read MAC-address from DT */ +- mac_addr = of_get_mac_address(np); +- if (!IS_ERR(mac_addr)) +- ether_addr_copy(ndev->dev_addr, mac_addr); +- +- /* Check if the MAC address is valid, if not get a random one */ +- if (!is_valid_ether_addr(ndev->dev_addr)) { ++ ret = of_get_mac_address(np, ndev->dev_addr); ++ if (ret) { ++ /* if the MAC address is invalid get a random one */ + eth_hw_addr_random(ndev); + dev_warn(&pdev->dev, "using random MAC address %pM\n", + ndev->dev_addr); +--- a/drivers/net/ethernet/altera/altera_tse_main.c ++++ b/drivers/net/ethernet/altera/altera_tse_main.c +@@ -1351,7 +1351,6 @@ static int altera_tse_probe(struct platf + struct resource *control_port; + struct resource *dma_res; + struct altera_tse_private *priv; +- const unsigned char *macaddr; + void __iomem *descmap; + const struct of_device_id *of_id = NULL; + +@@ -1528,10 +1527,8 @@ static int altera_tse_probe(struct platf + priv->rx_dma_buf_sz = ALTERA_RXDMABUFFER_SIZE; + + /* get default MAC address from device tree */ +- macaddr = of_get_mac_address(pdev->dev.of_node); +- if (!IS_ERR(macaddr)) +- ether_addr_copy(ndev->dev_addr, macaddr); +- else ++ ret = of_get_mac_address(pdev->dev.of_node, ndev->dev_addr); ++ if (ret) + eth_hw_addr_random(ndev); + + /* get phy addr and create mdio */ +--- a/drivers/net/ethernet/arc/emac_main.c ++++ b/drivers/net/ethernet/arc/emac_main.c +@@ -857,7 +857,6 @@ int arc_emac_probe(struct net_device *nd + struct device_node *phy_node; + struct phy_device *phydev = NULL; + struct arc_emac_priv *priv; +- const char *mac_addr; + unsigned int id, clock_frequency, irq; + int err; + +@@ -942,11 +941,8 @@ int arc_emac_probe(struct net_device *nd + } + + /* Get MAC address from device tree */ +- mac_addr = of_get_mac_address(dev->of_node); +- +- if (!IS_ERR(mac_addr)) +- ether_addr_copy(ndev->dev_addr, mac_addr); +- else ++ err = of_get_mac_address(dev->of_node, ndev->dev_addr); ++ if (err) + eth_hw_addr_random(ndev); + + arc_emac_set_address_internal(ndev); +--- a/drivers/net/ethernet/atheros/ag71xx.c ++++ b/drivers/net/ethernet/atheros/ag71xx.c +@@ -1856,7 +1856,6 @@ static int ag71xx_probe(struct platform_ + const struct ag71xx_dcfg *dcfg; + struct net_device *ndev; + struct resource *res; +- const void *mac_addr; + int tx_size, err, i; + struct ag71xx *ag; + +@@ -1952,10 +1951,8 @@ static int ag71xx_probe(struct platform_ + ag->stop_desc->ctrl = 0; + ag->stop_desc->next = (u32)ag->stop_desc_dma; + +- mac_addr = of_get_mac_address(np); +- if (!IS_ERR(mac_addr)) +- memcpy(ndev->dev_addr, mac_addr, ETH_ALEN); +- if (IS_ERR(mac_addr) || !is_valid_ether_addr(ndev->dev_addr)) { ++ err = of_get_mac_address(np, ndev->dev_addr); ++ if (err) { + netif_err(ag, probe, ndev, "invalid MAC address, using random address\n"); + eth_random_addr(ndev->dev_addr); + } +--- a/drivers/net/ethernet/broadcom/bcmsysport.c ++++ b/drivers/net/ethernet/broadcom/bcmsysport.c +@@ -2468,7 +2468,6 @@ static int bcm_sysport_probe(struct plat + struct bcm_sysport_priv *priv; + struct device_node *dn; + struct net_device *dev; +- const void *macaddr; + u32 txq, rxq; + int ret; + +@@ -2563,12 +2562,10 @@ static int bcm_sysport_probe(struct plat + } + + /* Initialize netdevice members */ +- macaddr = of_get_mac_address(dn); +- if (IS_ERR(macaddr)) { ++ ret = of_get_mac_address(dn, dev->dev_addr); ++ if (ret) { + dev_warn(&pdev->dev, "using random Ethernet MAC\n"); + eth_hw_addr_random(dev); +- } else { +- ether_addr_copy(dev->dev_addr, macaddr); + } + + SET_NETDEV_DEV(dev, &pdev->dev); +--- a/drivers/net/ethernet/broadcom/bgmac-bcma.c ++++ b/drivers/net/ethernet/broadcom/bgmac-bcma.c +@@ -115,7 +115,7 @@ static int bgmac_probe(struct bcma_devic + struct ssb_sprom *sprom = &core->bus->sprom; + struct mii_bus *mii_bus; + struct bgmac *bgmac; +- const u8 *mac = NULL; ++ const u8 *mac; + int err; + + bgmac = bgmac_alloc(&core->dev); +@@ -128,11 +128,10 @@ static int bgmac_probe(struct bcma_devic + + bcma_set_drvdata(core, bgmac); + +- if (bgmac->dev->of_node) +- mac = of_get_mac_address(bgmac->dev->of_node); ++ err = of_get_mac_address(bgmac->dev->of_node, bgmac->net_dev->dev_addr); + + /* If no MAC address assigned via device tree, check SPROM */ +- if (IS_ERR_OR_NULL(mac)) { ++ if (err) { + switch (core->core_unit) { + case 0: + mac = sprom->et0mac; +@@ -149,10 +148,9 @@ static int bgmac_probe(struct bcma_devic + err = -ENOTSUPP; + goto err; + } ++ ether_addr_copy(bgmac->net_dev->dev_addr, mac); + } + +- ether_addr_copy(bgmac->net_dev->dev_addr, mac); +- + /* On BCM4706 we need common core to access PHY */ + if (core->id.id == BCMA_CORE_4706_MAC_GBIT && + !core->bus->drv_gmac_cmn.core) { +--- a/drivers/net/ethernet/broadcom/bgmac-platform.c ++++ b/drivers/net/ethernet/broadcom/bgmac-platform.c +@@ -173,7 +173,7 @@ static int bgmac_probe(struct platform_d + struct device_node *np = pdev->dev.of_node; + struct bgmac *bgmac; + struct resource *regs; +- const u8 *mac_addr; ++ int ret; + + bgmac = bgmac_alloc(&pdev->dev); + if (!bgmac) +@@ -192,11 +192,10 @@ static int bgmac_probe(struct platform_d + bgmac->dev = &pdev->dev; + bgmac->dma_dev = &pdev->dev; + +- mac_addr = of_get_mac_address(np); +- if (!IS_ERR(mac_addr)) +- ether_addr_copy(bgmac->net_dev->dev_addr, mac_addr); +- else +- dev_warn(&pdev->dev, "MAC address not present in device tree\n"); ++ ret = of_get_mac_address(np, bgmac->net_dev->dev_addr); ++ if (ret) ++ dev_warn(&pdev->dev, ++ "MAC address not present in device tree\n"); + + bgmac->irq = platform_get_irq(pdev, 0); + if (bgmac->irq < 0) +--- a/drivers/net/ethernet/cadence/macb_main.c ++++ b/drivers/net/ethernet/cadence/macb_main.c +@@ -4479,7 +4479,6 @@ static int macb_probe(struct platform_de + struct net_device *dev; + struct resource *regs; + void __iomem *mem; +- const char *mac; + struct macb *bp; + int err, val; + +@@ -4592,15 +4591,11 @@ static int macb_probe(struct platform_de + if (bp->caps & MACB_CAPS_NEEDS_RSTONUBR) + bp->rx_intr_mask |= MACB_BIT(RXUBR); + +- mac = of_get_mac_address(np); +- if (PTR_ERR(mac) == -EPROBE_DEFER) { +- err = -EPROBE_DEFER; ++ err = of_get_mac_address(np, bp->dev->dev_addr); ++ if (err == -EPROBE_DEFER) + goto err_out_free_netdev; +- } else if (!IS_ERR_OR_NULL(mac)) { +- ether_addr_copy(bp->dev->dev_addr, mac); +- } else { ++ else if (err) + macb_get_hwaddr(bp); +- } + + err = of_get_phy_mode(np, &interface); + if (err) +--- a/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c ++++ b/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c +@@ -1385,7 +1385,6 @@ static int octeon_mgmt_probe(struct plat + struct net_device *netdev; + struct octeon_mgmt *p; + const __be32 *data; +- const u8 *mac; + struct resource *res_mix; + struct resource *res_agl; + struct resource *res_agl_prt_ctl; +@@ -1502,11 +1501,8 @@ static int octeon_mgmt_probe(struct plat + netdev->min_mtu = 64 - OCTEON_MGMT_RX_HEADROOM; + netdev->max_mtu = 16383 - OCTEON_MGMT_RX_HEADROOM - VLAN_HLEN; + +- mac = of_get_mac_address(pdev->dev.of_node); +- +- if (!IS_ERR(mac)) +- ether_addr_copy(netdev->dev_addr, mac); +- else ++ result = of_get_mac_address(pdev->dev.of_node, netdev->dev_addr); ++ if (result) + eth_hw_addr_random(netdev); + + p->phy_np = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0); +--- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c ++++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c +@@ -1474,7 +1474,6 @@ static int bgx_init_of_phy(struct bgx *b + device_for_each_child_node(&bgx->pdev->dev, fwn) { + struct phy_device *pd; + struct device_node *phy_np; +- const char *mac; + + /* Should always be an OF node. But if it is not, we + * cannot handle it, so exit the loop. +@@ -1483,9 +1482,7 @@ static int bgx_init_of_phy(struct bgx *b + if (!node) + break; + +- mac = of_get_mac_address(node); +- if (!IS_ERR(mac)) +- ether_addr_copy(bgx->lmac[lmac].mac, mac); ++ of_get_mac_address(node, bgx->lmac[lmac].mac); + + SET_NETDEV_DEV(&bgx->lmac[lmac].netdev, &bgx->pdev->dev); + bgx->lmac[lmac].lmacid = lmac; +--- a/drivers/net/ethernet/davicom/dm9000.c ++++ b/drivers/net/ethernet/davicom/dm9000.c +@@ -1388,7 +1388,7 @@ static struct dm9000_plat_data *dm9000_p + { + struct dm9000_plat_data *pdata; + struct device_node *np = dev->of_node; +- const void *mac_addr; ++ int ret; + + if (!IS_ENABLED(CONFIG_OF) || !np) + return ERR_PTR(-ENXIO); +@@ -1402,11 +1402,9 @@ static struct dm9000_plat_data *dm9000_p + if (of_find_property(np, "davicom,no-eeprom", NULL)) + pdata->flags |= DM9000_PLATF_NO_EEPROM; + +- mac_addr = of_get_mac_address(np); +- if (!IS_ERR(mac_addr)) +- ether_addr_copy(pdata->dev_addr, mac_addr); +- else if (PTR_ERR(mac_addr) == -EPROBE_DEFER) +- return ERR_CAST(mac_addr); ++ ret = of_get_mac_address(np, pdata->dev_addr); ++ if (ret == -EPROBE_DEFER) ++ return ERR_PTR(ret); + + return pdata; + } +--- a/drivers/net/ethernet/ethoc.c ++++ b/drivers/net/ethernet/ethoc.c +@@ -1151,11 +1151,7 @@ static int ethoc_probe(struct platform_d + ether_addr_copy(netdev->dev_addr, pdata->hwaddr); + priv->phy_id = pdata->phy_id; + } else { +- const void *mac; +- +- mac = of_get_mac_address(pdev->dev.of_node); +- if (!IS_ERR(mac)) +- ether_addr_copy(netdev->dev_addr, mac); ++ of_get_mac_address(pdev->dev.of_node, netdev->dev_addr); + priv->phy_id = -1; + } + +--- a/drivers/net/ethernet/ezchip/nps_enet.c ++++ b/drivers/net/ethernet/ezchip/nps_enet.c +@@ -575,7 +575,6 @@ static s32 nps_enet_probe(struct platfor + struct net_device *ndev; + struct nps_enet_priv *priv; + s32 err = 0; +- const char *mac_addr; + + if (!dev->of_node) + return -ENODEV; +@@ -602,10 +601,8 @@ static s32 nps_enet_probe(struct platfor + dev_dbg(dev, "Registers base address is 0x%p\n", priv->regs_base); + + /* set kernel MAC address to dev */ +- mac_addr = of_get_mac_address(dev->of_node); +- if (!IS_ERR(mac_addr)) +- ether_addr_copy(ndev->dev_addr, mac_addr); +- else ++ err = of_get_mac_address(dev->of_node, ndev->dev_addr); ++ if (err) + eth_hw_addr_random(ndev); + + /* Get IRQ number */ +--- a/drivers/net/ethernet/freescale/fec_main.c ++++ b/drivers/net/ethernet/freescale/fec_main.c +@@ -1666,6 +1666,7 @@ static void fec_get_mac(struct net_devic + struct fec_enet_private *fep = netdev_priv(ndev); + struct fec_platform_data *pdata = dev_get_platdata(&fep->pdev->dev); + unsigned char *iap, tmpaddr[ETH_ALEN]; ++ int ret; + + /* + * try to get mac address in following order: +@@ -1681,9 +1682,9 @@ static void fec_get_mac(struct net_devic + if (!is_valid_ether_addr(iap)) { + struct device_node *np = fep->pdev->dev.of_node; + if (np) { +- const char *mac = of_get_mac_address(np); +- if (!IS_ERR(mac)) +- iap = (unsigned char *) mac; ++ ret = of_get_mac_address(np, tmpaddr); ++ if (!ret) ++ iap = tmpaddr; + } + } + +--- a/drivers/net/ethernet/freescale/fec_mpc52xx.c ++++ b/drivers/net/ethernet/freescale/fec_mpc52xx.c +@@ -813,7 +813,6 @@ static int mpc52xx_fec_probe(struct plat + const u32 *prop; + int prop_size; + struct device_node *np = op->dev.of_node; +- const char *mac_addr; + + phys_addr_t rx_fifo; + phys_addr_t tx_fifo; +@@ -891,10 +890,8 @@ static int mpc52xx_fec_probe(struct plat + * + * First try to read MAC address from DT + */ +- mac_addr = of_get_mac_address(np); +- if (!IS_ERR(mac_addr)) { +- ether_addr_copy(ndev->dev_addr, mac_addr); +- } else { ++ rv = of_get_mac_address(np, ndev->dev_addr); ++ if (rv) { + struct mpc52xx_fec __iomem *fec = priv->fec; + + /* +--- a/drivers/net/ethernet/freescale/fman/mac.c ++++ b/drivers/net/ethernet/freescale/fman/mac.c +@@ -616,7 +616,6 @@ static int mac_probe(struct platform_dev + struct platform_device *of_dev; + struct resource res; + struct mac_priv_s *priv; +- const u8 *mac_addr; + u32 val; + u8 fman_id; + phy_interface_t phy_if; +@@ -734,11 +733,9 @@ static int mac_probe(struct platform_dev + priv->cell_index = (u8)val; + + /* Get the MAC address */ +- mac_addr = of_get_mac_address(mac_node); +- if (IS_ERR(mac_addr)) ++ err = of_get_mac_address(mac_node, mac_dev->addr); ++ if (err) + dev_warn(dev, "of_get_mac_address(%pOF) failed\n", mac_node); +- else +- ether_addr_copy(mac_dev->addr, mac_addr); + + /* Get the port handles */ + nph = of_count_phandle_with_args(mac_node, "fsl,fman-ports", NULL); +@@ -864,7 +861,7 @@ static int mac_probe(struct platform_dev + if (err < 0) + dev_err(dev, "fman_set_mac_active_pause() = %d\n", err); + +- if (!IS_ERR(mac_addr)) ++ if (!is_zero_ether_addr(mac_dev->addr)) + dev_info(dev, "FMan MAC address: %pM\n", mac_dev->addr); + + priv->eth_dev = dpaa_eth_add_device(fman_id, mac_dev); +--- a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c ++++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c +@@ -918,7 +918,6 @@ static int fs_enet_probe(struct platform + const u32 *data; + struct clk *clk; + int err; +- const u8 *mac_addr; + const char *phy_connection_type; + int privsize, len, ret = -ENODEV; + +@@ -1006,9 +1005,7 @@ static int fs_enet_probe(struct platform + spin_lock_init(&fep->lock); + spin_lock_init(&fep->tx_lock); + +- mac_addr = of_get_mac_address(ofdev->dev.of_node); +- if (!IS_ERR(mac_addr)) +- ether_addr_copy(ndev->dev_addr, mac_addr); ++ of_get_mac_address(ofdev->dev.of_node, ndev->dev_addr); + + ret = fep->ops->allocate_bd(ndev); + if (ret) +--- a/drivers/net/ethernet/freescale/gianfar.c ++++ b/drivers/net/ethernet/freescale/gianfar.c +@@ -641,7 +641,6 @@ static phy_interface_t gfar_get_interfac + static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev) + { + const char *model; +- const void *mac_addr; + int err = 0, i; + phy_interface_t interface; + struct net_device *dev = NULL; +@@ -783,11 +782,8 @@ static int gfar_of_init(struct platform_ + if (stash_len || stash_idx) + priv->device_flags |= FSL_GIANFAR_DEV_HAS_BUF_STASHING; + +- mac_addr = of_get_mac_address(np); +- +- if (!IS_ERR(mac_addr)) { +- ether_addr_copy(dev->dev_addr, mac_addr); +- } else { ++ err = of_get_mac_address(np, dev->dev_addr); ++ if (err) { + eth_hw_addr_random(dev); + dev_info(&ofdev->dev, "Using random MAC address: %pM\n", dev->dev_addr); + } +--- a/drivers/net/ethernet/freescale/ucc_geth.c ++++ b/drivers/net/ethernet/freescale/ucc_geth.c +@@ -3696,7 +3696,6 @@ static int ucc_geth_probe(struct platfor + int err, ucc_num, max_speed = 0; + const unsigned int *prop; + const char *sprop; +- const void *mac_addr; + phy_interface_t phy_interface; + static const int enet_to_speed[] = { + SPEED_10, SPEED_10, SPEED_10, +@@ -3906,9 +3905,7 @@ static int ucc_geth_probe(struct platfor + goto err_free_netdev; + } + +- mac_addr = of_get_mac_address(np); +- if (!IS_ERR(mac_addr)) +- ether_addr_copy(dev->dev_addr, mac_addr); ++ of_get_mac_address(np, dev->dev_addr); + + ugeth->ug_info = ug_info; + ugeth->dev = device; +--- a/drivers/net/ethernet/hisilicon/hisi_femac.c ++++ b/drivers/net/ethernet/hisilicon/hisi_femac.c +@@ -772,7 +772,6 @@ static int hisi_femac_drv_probe(struct p + struct net_device *ndev; + struct hisi_femac_priv *priv; + struct phy_device *phy; +- const char *mac_addr; + int ret; + + ndev = alloc_etherdev(sizeof(*priv)); +@@ -842,10 +841,8 @@ static int hisi_femac_drv_probe(struct p + (unsigned long)phy->phy_id, + phy_modes(phy->interface)); + +- mac_addr = of_get_mac_address(node); +- if (!IS_ERR(mac_addr)) +- ether_addr_copy(ndev->dev_addr, mac_addr); +- if (!is_valid_ether_addr(ndev->dev_addr)) { ++ ret = of_get_mac_address(node, ndev->dev_addr); ++ if (ret) { + eth_hw_addr_random(ndev); + dev_warn(dev, "using random MAC address %pM\n", + ndev->dev_addr); +--- a/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c ++++ b/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c +@@ -1098,7 +1098,6 @@ static int hix5hd2_dev_probe(struct plat + struct net_device *ndev; + struct hix5hd2_priv *priv; + struct mii_bus *bus; +- const char *mac_addr; + int ret; + + ndev = alloc_etherdev(sizeof(struct hix5hd2_priv)); +@@ -1220,10 +1219,8 @@ static int hix5hd2_dev_probe(struct plat + goto out_phy_node; + } + +- mac_addr = of_get_mac_address(node); +- if (!IS_ERR(mac_addr)) +- ether_addr_copy(ndev->dev_addr, mac_addr); +- if (!is_valid_ether_addr(ndev->dev_addr)) { ++ ret = of_get_mac_address(node, ndev->dev_addr); ++ if (ret) { + eth_hw_addr_random(ndev); + netdev_warn(ndev, "using random MAC address %pM\n", + ndev->dev_addr); +--- a/drivers/net/ethernet/lantiq_xrx200.c ++++ b/drivers/net/ethernet/lantiq_xrx200.c +@@ -440,7 +440,6 @@ static int xrx200_probe(struct platform_ + struct resource *res; + struct xrx200_priv *priv; + struct net_device *net_dev; +- const u8 *mac; + int err; + + /* alloc the network device */ +@@ -484,10 +483,8 @@ static int xrx200_probe(struct platform_ + return PTR_ERR(priv->clk); + } + +- mac = of_get_mac_address(np); +- if (!IS_ERR(mac)) +- ether_addr_copy(net_dev->dev_addr, mac); +- else ++ err = of_get_mac_address(np, net_dev->dev_addr); ++ if (err) + eth_hw_addr_random(net_dev); + + /* bring up the dma engine and IP core */ +--- a/drivers/net/ethernet/marvell/mv643xx_eth.c ++++ b/drivers/net/ethernet/marvell/mv643xx_eth.c +@@ -2700,7 +2700,6 @@ static int mv643xx_eth_shared_of_add_por + struct platform_device *ppdev; + struct mv643xx_eth_platform_data ppd; + struct resource res; +- const char *mac_addr; + int ret; + int dev_num = 0; + +@@ -2731,9 +2730,7 @@ static int mv643xx_eth_shared_of_add_por + return -EINVAL; + } + +- mac_addr = of_get_mac_address(pnp); +- if (!IS_ERR(mac_addr)) +- ether_addr_copy(ppd.mac_addr, mac_addr); ++ of_get_mac_address(pnp, ppd.mac_addr); + + mv643xx_eth_property(pnp, "tx-queue-size", ppd.tx_queue_size); + mv643xx_eth_property(pnp, "tx-sram-addr", ppd.tx_sram_addr); +--- a/drivers/net/ethernet/marvell/mvneta.c ++++ b/drivers/net/ethernet/marvell/mvneta.c +@@ -5062,7 +5062,6 @@ static int mvneta_probe(struct platform_ + struct net_device *dev; + struct phylink *phylink; + struct phy *comphy; +- const char *dt_mac_addr; + char hw_mac_addr[ETH_ALEN]; + phy_interface_t phy_mode; + const char *mac_from; +@@ -5158,10 +5157,9 @@ static int mvneta_probe(struct platform_ + goto err_free_ports; + } + +- dt_mac_addr = of_get_mac_address(dn); +- if (!IS_ERR(dt_mac_addr)) { ++ err = of_get_mac_address(dn, dev->dev_addr); ++ if (!err) { + mac_from = "device tree"; +- ether_addr_copy(dev->dev_addr, dt_mac_addr); + } else { + mvneta_get_mac_addr(pp, hw_mac_addr); + if (is_valid_ether_addr(hw_mac_addr)) { +--- a/drivers/net/ethernet/marvell/prestera/prestera_main.c ++++ b/drivers/net/ethernet/marvell/prestera/prestera_main.c +@@ -466,20 +466,17 @@ static int prestera_switch_set_base_mac_ + { + struct device_node *base_mac_np; + struct device_node *np; +- const char *base_mac; ++ int ret; + + np = of_find_compatible_node(NULL, NULL, "marvell,prestera"); + base_mac_np = of_parse_phandle(np, "base-mac-provider", 0); + +- base_mac = of_get_mac_address(base_mac_np); +- of_node_put(base_mac_np); +- if (!IS_ERR(base_mac)) +- ether_addr_copy(sw->base_mac, base_mac); +- +- if (!is_valid_ether_addr(sw->base_mac)) { ++ ret = of_get_mac_address(base_mac_np, sw->base_mac); ++ if (ret) { + eth_random_addr(sw->base_mac); + dev_info(prestera_dev(sw), "using random base mac address\n"); + } ++ of_node_put(base_mac_np); + + return prestera_hw_switch_mac_set(sw, sw->base_mac); + } +--- a/drivers/net/ethernet/marvell/pxa168_eth.c ++++ b/drivers/net/ethernet/marvell/pxa168_eth.c +@@ -1392,7 +1392,6 @@ static int pxa168_eth_probe(struct platf + struct resource *res; + struct clk *clk; + struct device_node *np; +- const unsigned char *mac_addr = NULL; + int err; + + printk(KERN_NOTICE "PXA168 10/100 Ethernet Driver\n"); +@@ -1435,12 +1434,8 @@ static int pxa168_eth_probe(struct platf + + INIT_WORK(&pep->tx_timeout_task, pxa168_eth_tx_timeout_task); + +- if (pdev->dev.of_node) +- mac_addr = of_get_mac_address(pdev->dev.of_node); +- +- if (!IS_ERR_OR_NULL(mac_addr)) { +- ether_addr_copy(dev->dev_addr, mac_addr); +- } else { ++ err = of_get_mac_address(pdev->dev.of_node, dev->dev_addr); ++ if (err) { + /* try reading the mac address, if set by the bootloader */ + pxa168_eth_get_mac_address(dev, dev->dev_addr); + if (!is_valid_ether_addr(dev->dev_addr)) { +--- a/drivers/net/ethernet/marvell/sky2.c ++++ b/drivers/net/ethernet/marvell/sky2.c +@@ -4725,7 +4725,7 @@ static struct net_device *sky2_init_netd + { + struct sky2_port *sky2; + struct net_device *dev = alloc_etherdev(sizeof(*sky2)); +- const void *iap; ++ int ret; + + if (!dev) + return NULL; +@@ -4795,10 +4795,8 @@ static struct net_device *sky2_init_netd + * 1) from device tree data + * 2) from internal registers set by bootloader + */ +- iap = of_get_mac_address(hw->pdev->dev.of_node); +- if (!IS_ERR(iap)) +- ether_addr_copy(dev->dev_addr, iap); +- else ++ ret = of_get_mac_address(hw->pdev->dev.of_node, dev->dev_addr); ++ if (ret) + memcpy_fromio(dev->dev_addr, hw->regs + B2_MAC_1 + port * 8, + ETH_ALEN); + +--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c ++++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c +@@ -2580,14 +2580,11 @@ static int __init mtk_init(struct net_de + { + struct mtk_mac *mac = netdev_priv(dev); + struct mtk_eth *eth = mac->hw; +- const char *mac_addr; ++ int ret; + +- mac_addr = of_get_mac_address(mac->of_node); +- if (!IS_ERR(mac_addr)) +- ether_addr_copy(dev->dev_addr, mac_addr); +- +- /* If the mac address is invalid, use random mac address */ +- if (!is_valid_ether_addr(dev->dev_addr)) { ++ ret = of_get_mac_address(mac->of_node, dev->dev_addr); ++ if (ret) { ++ /* If the mac address is invalid, use random mac address */ + eth_hw_addr_random(dev); + dev_err(eth->dev, "generated random MAC address %pM\n", + dev->dev_addr); +--- a/drivers/net/ethernet/micrel/ks8851_common.c ++++ b/drivers/net/ethernet/micrel/ks8851_common.c +@@ -194,11 +194,10 @@ static void ks8851_read_mac_addr(struct + static void ks8851_init_mac(struct ks8851_net *ks, struct device_node *np) + { + struct net_device *dev = ks->netdev; +- const u8 *mac_addr; ++ int ret; + +- mac_addr = of_get_mac_address(np); +- if (!IS_ERR(mac_addr)) { +- ether_addr_copy(dev->dev_addr, mac_addr); ++ ret = of_get_mac_address(np, dev->dev_addr); ++ if (!ret) { + ks8851_write_mac_addr(dev); + return; + } +--- a/drivers/net/ethernet/microchip/lan743x_main.c ++++ b/drivers/net/ethernet/microchip/lan743x_main.c +@@ -2831,7 +2831,6 @@ static int lan743x_pcidev_probe(struct p + { + struct lan743x_adapter *adapter = NULL; + struct net_device *netdev = NULL; +- const void *mac_addr; + int ret = -ENODEV; + + netdev = devm_alloc_etherdev(&pdev->dev, +@@ -2848,9 +2847,7 @@ static int lan743x_pcidev_probe(struct p + NETIF_MSG_IFDOWN | NETIF_MSG_TX_QUEUED; + netdev->max_mtu = LAN743X_MAX_FRAME_SIZE; + +- mac_addr = of_get_mac_address(pdev->dev.of_node); +- if (!IS_ERR(mac_addr)) +- ether_addr_copy(adapter->mac_address, mac_addr); ++ of_get_mac_address(pdev->dev.of_node, adapter->mac_address); + + ret = lan743x_pci_init(adapter, pdev); + if (ret) +--- a/drivers/net/ethernet/nxp/lpc_eth.c ++++ b/drivers/net/ethernet/nxp/lpc_eth.c +@@ -1347,9 +1347,7 @@ static int lpc_eth_drv_probe(struct plat + __lpc_get_mac(pldat, ndev->dev_addr); + + if (!is_valid_ether_addr(ndev->dev_addr)) { +- const char *macaddr = of_get_mac_address(np); +- if (!IS_ERR(macaddr)) +- ether_addr_copy(ndev->dev_addr, macaddr); ++ of_get_mac_address(np, ndev->dev_addr); + } + if (!is_valid_ether_addr(ndev->dev_addr)) + eth_hw_addr_random(ndev); +--- a/drivers/net/ethernet/qualcomm/qca_spi.c ++++ b/drivers/net/ethernet/qualcomm/qca_spi.c +@@ -885,7 +885,7 @@ qca_spi_probe(struct spi_device *spi) + struct net_device *qcaspi_devs = NULL; + u8 legacy_mode = 0; + u16 signature; +- const char *mac; ++ int ret; + + if (!spi->dev.of_node) { + dev_err(&spi->dev, "Missing device tree\n"); +@@ -962,12 +962,8 @@ qca_spi_probe(struct spi_device *spi) + + spi_set_drvdata(spi, qcaspi_devs); + +- mac = of_get_mac_address(spi->dev.of_node); +- +- if (!IS_ERR(mac)) +- ether_addr_copy(qca->net_dev->dev_addr, mac); +- +- if (!is_valid_ether_addr(qca->net_dev->dev_addr)) { ++ ret = of_get_mac_address(spi->dev.of_node, qca->net_dev->dev_addr); ++ if (ret) { + eth_hw_addr_random(qca->net_dev); + dev_info(&spi->dev, "Using random MAC address: %pM\n", + qca->net_dev->dev_addr); +--- a/drivers/net/ethernet/qualcomm/qca_uart.c ++++ b/drivers/net/ethernet/qualcomm/qca_uart.c +@@ -323,7 +323,6 @@ static int qca_uart_probe(struct serdev_ + { + struct net_device *qcauart_dev = alloc_etherdev(sizeof(struct qcauart)); + struct qcauart *qca; +- const char *mac; + u32 speed = 115200; + int ret; + +@@ -348,12 +347,8 @@ static int qca_uart_probe(struct serdev_ + + of_property_read_u32(serdev->dev.of_node, "current-speed", &speed); + +- mac = of_get_mac_address(serdev->dev.of_node); +- +- if (!IS_ERR(mac)) +- ether_addr_copy(qca->net_dev->dev_addr, mac); +- +- if (!is_valid_ether_addr(qca->net_dev->dev_addr)) { ++ ret = of_get_mac_address(serdev->dev.of_node, qca->net_dev->dev_addr); ++ if (ret) { + eth_hw_addr_random(qca->net_dev); + dev_info(&serdev->dev, "Using random MAC address: %pM\n", + qca->net_dev->dev_addr); +--- a/drivers/net/ethernet/renesas/ravb_main.c ++++ b/drivers/net/ethernet/renesas/ravb_main.c +@@ -109,11 +109,13 @@ static void ravb_set_buffer_align(struct + * Ethernet AVB device doesn't have ROM for MAC address. + * This function gets the MAC address that was used by a bootloader. + */ +-static void ravb_read_mac_address(struct net_device *ndev, const u8 *mac) ++static void ravb_read_mac_address(struct device_node *np, ++ struct net_device *ndev) + { +- if (!IS_ERR(mac)) { +- ether_addr_copy(ndev->dev_addr, mac); +- } else { ++ int ret; ++ ++ ret = of_get_mac_address(np, ndev->dev_addr); ++ if (ret) { + u32 mahr = ravb_read(ndev, MAHR); + u32 malr = ravb_read(ndev, MALR); + +@@ -2189,7 +2191,7 @@ static int ravb_probe(struct platform_de + priv->msg_enable = RAVB_DEF_MSG_ENABLE; + + /* Read and set MAC address */ +- ravb_read_mac_address(ndev, of_get_mac_address(np)); ++ ravb_read_mac_address(np, ndev); + if (!is_valid_ether_addr(ndev->dev_addr)) { + dev_warn(&pdev->dev, + "no valid MAC address supplied, using a random one\n"); +--- a/drivers/net/ethernet/renesas/sh_eth.c ++++ b/drivers/net/ethernet/renesas/sh_eth.c +@@ -3145,7 +3145,6 @@ static struct sh_eth_plat_data *sh_eth_p + struct device_node *np = dev->of_node; + struct sh_eth_plat_data *pdata; + phy_interface_t interface; +- const char *mac_addr; + int ret; + + pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL); +@@ -3157,9 +3156,7 @@ static struct sh_eth_plat_data *sh_eth_p + return NULL; + pdata->phy_interface = interface; + +- mac_addr = of_get_mac_address(np); +- if (!IS_ERR(mac_addr)) +- ether_addr_copy(pdata->mac_addr, mac_addr); ++ of_get_mac_address(np, pdata->mac_addr); + + pdata->no_ether_link = + of_property_read_bool(np, "renesas,no-ether-link"); +--- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c ++++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c +@@ -25,8 +25,7 @@ + + #ifdef CONFIG_OF + static int sxgbe_probe_config_dt(struct platform_device *pdev, +- struct sxgbe_plat_data *plat, +- const char **mac) ++ struct sxgbe_plat_data *plat) + { + struct device_node *np = pdev->dev.of_node; + struct sxgbe_dma_cfg *dma_cfg; +@@ -35,7 +34,6 @@ static int sxgbe_probe_config_dt(struct + if (!np) + return -ENODEV; + +- *mac = of_get_mac_address(np); + err = of_get_phy_mode(np, &plat->interface); + if (err && err != -ENODEV) + return err; +@@ -63,8 +61,7 @@ static int sxgbe_probe_config_dt(struct + } + #else + static int sxgbe_probe_config_dt(struct platform_device *pdev, +- struct sxgbe_plat_data *plat, +- const char **mac) ++ struct sxgbe_plat_data *plat) + { + return -ENOSYS; + } +@@ -85,7 +82,6 @@ static int sxgbe_platform_probe(struct p + void __iomem *addr; + struct sxgbe_priv_data *priv = NULL; + struct sxgbe_plat_data *plat_dat = NULL; +- const char *mac = NULL; + struct net_device *ndev = platform_get_drvdata(pdev); + struct device_node *node = dev->of_node; + +@@ -101,7 +97,7 @@ static int sxgbe_platform_probe(struct p + if (!plat_dat) + return -ENOMEM; + +- ret = sxgbe_probe_config_dt(pdev, plat_dat, &mac); ++ ret = sxgbe_probe_config_dt(pdev, plat_dat); + if (ret) { + pr_err("%s: main dt probe failed\n", __func__); + return ret; +@@ -122,8 +118,7 @@ static int sxgbe_platform_probe(struct p + } + + /* Get MAC address if available (DT) */ +- if (!IS_ERR_OR_NULL(mac)) +- ether_addr_copy(priv->dev->dev_addr, mac); ++ of_get_mac_address(node, priv->dev->dev_addr); + + /* Get the TX/RX IRQ numbers */ + for (i = 0, chan = 1; i < SXGBE_TX_QUEUES; i++) { +--- a/drivers/net/ethernet/socionext/sni_ave.c ++++ b/drivers/net/ethernet/socionext/sni_ave.c +@@ -1559,7 +1559,6 @@ static int ave_probe(struct platform_dev + struct ave_private *priv; + struct net_device *ndev; + struct device_node *np; +- const void *mac_addr; + void __iomem *base; + const char *name; + int i, irq, ret; +@@ -1600,12 +1599,9 @@ static int ave_probe(struct platform_dev + + ndev->max_mtu = AVE_MAX_ETHFRAME - (ETH_HLEN + ETH_FCS_LEN); + +- mac_addr = of_get_mac_address(np); +- if (!IS_ERR(mac_addr)) +- ether_addr_copy(ndev->dev_addr, mac_addr); +- +- /* if the mac address is invalid, use random mac address */ +- if (!is_valid_ether_addr(ndev->dev_addr)) { ++ ret = of_get_mac_address(np, ndev->dev_addr); ++ if (ret) { ++ /* if the mac address is invalid, use random mac address */ + eth_hw_addr_random(ndev); + dev_warn(dev, "Using random MAC address: %pM\n", + ndev->dev_addr); +--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-anarion.c ++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-anarion.c +@@ -115,7 +115,7 @@ static int anarion_dwmac_probe(struct pl + if (IS_ERR(gmac)) + return PTR_ERR(gmac); + +- plat_dat = stmmac_probe_config_dt(pdev, &stmmac_res.mac); ++ plat_dat = stmmac_probe_config_dt(pdev, stmmac_res.mac); + if (IS_ERR(plat_dat)) + return PTR_ERR(plat_dat); + +--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c ++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c +@@ -444,7 +444,7 @@ static int dwc_eth_dwmac_probe(struct pl + if (IS_ERR(stmmac_res.addr)) + return PTR_ERR(stmmac_res.addr); + +- plat_dat = stmmac_probe_config_dt(pdev, &stmmac_res.mac); ++ plat_dat = stmmac_probe_config_dt(pdev, stmmac_res.mac); + if (IS_ERR(plat_dat)) + return PTR_ERR(plat_dat); + +--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-generic.c ++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-generic.c +@@ -27,7 +27,7 @@ static int dwmac_generic_probe(struct pl + return ret; + + if (pdev->dev.of_node) { +- plat_dat = stmmac_probe_config_dt(pdev, &stmmac_res.mac); ++ plat_dat = stmmac_probe_config_dt(pdev, stmmac_res.mac); + if (IS_ERR(plat_dat)) { + dev_err(&pdev->dev, "dt configuration failed\n"); + return PTR_ERR(plat_dat); +--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c ++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c +@@ -226,7 +226,7 @@ static int imx_dwmac_probe(struct platfo + if (!dwmac) + return -ENOMEM; + +- plat_dat = stmmac_probe_config_dt(pdev, &stmmac_res.mac); ++ plat_dat = stmmac_probe_config_dt(pdev, stmmac_res.mac); + if (IS_ERR(plat_dat)) + return PTR_ERR(plat_dat); + +--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel-plat.c ++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel-plat.c +@@ -88,7 +88,7 @@ static int intel_eth_plat_probe(struct p + if (ret) + return ret; + +- plat_dat = stmmac_probe_config_dt(pdev, &stmmac_res.mac); ++ plat_dat = stmmac_probe_config_dt(pdev, stmmac_res.mac); + if (IS_ERR(plat_dat)) { + dev_err(&pdev->dev, "dt configuration failed\n"); + return PTR_ERR(plat_dat); +--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c ++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c +@@ -255,7 +255,7 @@ static int ipq806x_gmac_probe(struct pla + if (val) + return val; + +- plat_dat = stmmac_probe_config_dt(pdev, &stmmac_res.mac); ++ plat_dat = stmmac_probe_config_dt(pdev, stmmac_res.mac); + if (IS_ERR(plat_dat)) + return PTR_ERR(plat_dat); + +--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-lpc18xx.c ++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-lpc18xx.c +@@ -37,7 +37,7 @@ static int lpc18xx_dwmac_probe(struct pl + if (ret) + return ret; + +- plat_dat = stmmac_probe_config_dt(pdev, &stmmac_res.mac); ++ plat_dat = stmmac_probe_config_dt(pdev, stmmac_res.mac); + if (IS_ERR(plat_dat)) + return PTR_ERR(plat_dat); + +--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c ++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c +@@ -407,7 +407,7 @@ static int mediatek_dwmac_probe(struct p + if (ret) + return ret; + +- plat_dat = stmmac_probe_config_dt(pdev, &stmmac_res.mac); ++ plat_dat = stmmac_probe_config_dt(pdev, stmmac_res.mac); + if (IS_ERR(plat_dat)) + return PTR_ERR(plat_dat); + +--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson.c ++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson.c +@@ -52,7 +52,7 @@ static int meson6_dwmac_probe(struct pla + if (ret) + return ret; + +- plat_dat = stmmac_probe_config_dt(pdev, &stmmac_res.mac); ++ plat_dat = stmmac_probe_config_dt(pdev, stmmac_res.mac); + if (IS_ERR(plat_dat)) + return PTR_ERR(plat_dat); + +--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c ++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c +@@ -372,7 +372,7 @@ static int meson8b_dwmac_probe(struct pl + if (ret) + return ret; + +- plat_dat = stmmac_probe_config_dt(pdev, &stmmac_res.mac); ++ plat_dat = stmmac_probe_config_dt(pdev, stmmac_res.mac); + if (IS_ERR(plat_dat)) + return PTR_ERR(plat_dat); + +--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-oxnas.c ++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-oxnas.c +@@ -118,7 +118,7 @@ static int oxnas_dwmac_probe(struct plat + if (ret) + return ret; + +- plat_dat = stmmac_probe_config_dt(pdev, &stmmac_res.mac); ++ plat_dat = stmmac_probe_config_dt(pdev, stmmac_res.mac); + if (IS_ERR(plat_dat)) + return PTR_ERR(plat_dat); + +--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c ++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c +@@ -461,7 +461,7 @@ static int qcom_ethqos_probe(struct plat + if (ret) + return ret; + +- plat_dat = stmmac_probe_config_dt(pdev, &stmmac_res.mac); ++ plat_dat = stmmac_probe_config_dt(pdev, stmmac_res.mac); + if (IS_ERR(plat_dat)) { + dev_err(&pdev->dev, "dt configuration failed\n"); + return PTR_ERR(plat_dat); +--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c ++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c +@@ -1392,7 +1392,7 @@ static int rk_gmac_probe(struct platform + if (ret) + return ret; + +- plat_dat = stmmac_probe_config_dt(pdev, &stmmac_res.mac); ++ plat_dat = stmmac_probe_config_dt(pdev, stmmac_res.mac); + if (IS_ERR(plat_dat)) + return PTR_ERR(plat_dat); + +--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c ++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c +@@ -398,7 +398,7 @@ static int socfpga_dwmac_probe(struct pl + if (ret) + return ret; + +- plat_dat = stmmac_probe_config_dt(pdev, &stmmac_res.mac); ++ plat_dat = stmmac_probe_config_dt(pdev, stmmac_res.mac); + if (IS_ERR(plat_dat)) + return PTR_ERR(plat_dat); + +--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c ++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c +@@ -325,7 +325,7 @@ static int sti_dwmac_probe(struct platfo + if (ret) + return ret; + +- plat_dat = stmmac_probe_config_dt(pdev, &stmmac_res.mac); ++ plat_dat = stmmac_probe_config_dt(pdev, stmmac_res.mac); + if (IS_ERR(plat_dat)) + return PTR_ERR(plat_dat); + +--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c ++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c +@@ -371,7 +371,7 @@ static int stm32_dwmac_probe(struct plat + if (ret) + return ret; + +- plat_dat = stmmac_probe_config_dt(pdev, &stmmac_res.mac); ++ plat_dat = stmmac_probe_config_dt(pdev, stmmac_res.mac); + if (IS_ERR(plat_dat)) + return PTR_ERR(plat_dat); + +--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c ++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c +@@ -1202,7 +1202,7 @@ static int sun8i_dwmac_probe(struct plat + if (ret) + return -EINVAL; + +- plat_dat = stmmac_probe_config_dt(pdev, &stmmac_res.mac); ++ plat_dat = stmmac_probe_config_dt(pdev, stmmac_res.mac); + if (IS_ERR(plat_dat)) + return PTR_ERR(plat_dat); + +--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c ++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c +@@ -108,7 +108,7 @@ static int sun7i_gmac_probe(struct platf + if (ret) + return ret; + +- plat_dat = stmmac_probe_config_dt(pdev, &stmmac_res.mac); ++ plat_dat = stmmac_probe_config_dt(pdev, stmmac_res.mac); + if (IS_ERR(plat_dat)) + return PTR_ERR(plat_dat); + +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h +@@ -25,7 +25,7 @@ + + struct stmmac_resources { + void __iomem *addr; +- const char *mac; ++ u8 mac[ETH_ALEN]; + int wol_irq; + int lpi_irq; + int irq; +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +@@ -4988,7 +4988,7 @@ int stmmac_dvr_probe(struct device *devi + priv->wol_irq = res->wol_irq; + priv->lpi_irq = res->lpi_irq; + +- if (!IS_ERR_OR_NULL(res->mac)) ++ if (!is_zero_ether_addr(res->mac)) + memcpy(priv->dev->dev_addr, res->mac, ETH_ALEN); + + dev_set_drvdata(device, priv->dev); +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c +@@ -395,7 +395,7 @@ static int stmmac_of_get_mac_mode(struct + * set some private fields that will be used by the main at runtime. + */ + struct plat_stmmacenet_data * +-stmmac_probe_config_dt(struct platform_device *pdev, const char **mac) ++stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac) + { + struct device_node *np = pdev->dev.of_node; + struct plat_stmmacenet_data *plat; +@@ -407,12 +407,12 @@ stmmac_probe_config_dt(struct platform_d + if (!plat) + return ERR_PTR(-ENOMEM); + +- *mac = of_get_mac_address(np); +- if (IS_ERR(*mac)) { +- if (PTR_ERR(*mac) == -EPROBE_DEFER) +- return ERR_CAST(*mac); ++ rc = of_get_mac_address(np, mac); ++ if (rc) { ++ if (rc == -EPROBE_DEFER) ++ return ERR_PTR(rc); + +- *mac = NULL; ++ eth_zero_addr(mac); + } + + phy_mode = device_get_phy_mode(&pdev->dev); +@@ -644,7 +644,7 @@ void stmmac_remove_config_dt(struct plat + } + #else + struct plat_stmmacenet_data * +-stmmac_probe_config_dt(struct platform_device *pdev, const char **mac) ++stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac) + { + return ERR_PTR(-EINVAL); + } +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.h ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.h +@@ -12,7 +12,7 @@ + #include "stmmac.h" + + struct plat_stmmacenet_data * +-stmmac_probe_config_dt(struct platform_device *pdev, const char **mac); ++stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac); + void stmmac_remove_config_dt(struct platform_device *pdev, + struct plat_stmmacenet_data *plat); + +--- a/drivers/net/ethernet/ti/am65-cpsw-nuss.c ++++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.c +@@ -1741,7 +1741,6 @@ static int am65_cpsw_nuss_init_slave_por + + for_each_child_of_node(node, port_np) { + struct am65_cpsw_port *port; +- const void *mac_addr; + u32 port_id; + + /* it is not a slave port node, continue */ +@@ -1820,15 +1819,15 @@ static int am65_cpsw_nuss_init_slave_por + return ret; + } + +- mac_addr = of_get_mac_address(port_np); +- if (!IS_ERR(mac_addr)) { +- ether_addr_copy(port->slave.mac_addr, mac_addr); +- } else if (am65_cpsw_am654_get_efuse_macid(port_np, +- port->port_id, +- port->slave.mac_addr) || +- !is_valid_ether_addr(port->slave.mac_addr)) { +- random_ether_addr(port->slave.mac_addr); +- dev_err(dev, "Use random MAC address\n"); ++ ret = of_get_mac_address(port_np, port->slave.mac_addr); ++ if (ret) { ++ am65_cpsw_am654_get_efuse_macid(port_np, ++ port->port_id, ++ port->slave.mac_addr); ++ if (!is_valid_ether_addr(port->slave.mac_addr)) { ++ random_ether_addr(port->slave.mac_addr); ++ dev_err(dev, "Use random MAC address\n"); ++ } + } + } + of_node_put(node); +--- a/drivers/net/ethernet/ti/cpsw.c ++++ b/drivers/net/ethernet/ti/cpsw.c +@@ -1306,7 +1306,6 @@ static int cpsw_probe_dt(struct cpsw_pla + + for_each_available_child_of_node(node, slave_node) { + struct cpsw_slave_data *slave_data = data->slave_data + i; +- const void *mac_addr = NULL; + int lenp; + const __be32 *parp; + +@@ -1378,10 +1377,8 @@ static int cpsw_probe_dt(struct cpsw_pla + } + + no_phy_slave: +- mac_addr = of_get_mac_address(slave_node); +- if (!IS_ERR(mac_addr)) { +- ether_addr_copy(slave_data->mac_addr, mac_addr); +- } else { ++ ret = of_get_mac_address(slave_node, slave_data->mac_addr); ++ if (ret) { + ret = ti_cm_get_macid(&pdev->dev, i, + slave_data->mac_addr); + if (ret) +--- a/drivers/net/ethernet/ti/cpsw_new.c ++++ b/drivers/net/ethernet/ti/cpsw_new.c +@@ -1267,7 +1267,6 @@ static int cpsw_probe_dt(struct cpsw_com + + for_each_child_of_node(tmp_node, port_np) { + struct cpsw_slave_data *slave_data; +- const void *mac_addr; + u32 port_id; + + ret = of_property_read_u32(port_np, "reg", &port_id); +@@ -1326,10 +1325,8 @@ static int cpsw_probe_dt(struct cpsw_com + goto err_node_put; + } + +- mac_addr = of_get_mac_address(port_np); +- if (!IS_ERR(mac_addr)) { +- ether_addr_copy(slave_data->mac_addr, mac_addr); +- } else { ++ ret = of_get_mac_address(port_np, slave_data->mac_addr); ++ if (ret) { + ret = ti_cm_get_macid(dev, port_id - 1, + slave_data->mac_addr); + if (ret) +--- a/drivers/net/ethernet/ti/davinci_emac.c ++++ b/drivers/net/ethernet/ti/davinci_emac.c +@@ -1699,7 +1699,6 @@ davinci_emac_of_get_pdata(struct platfor + const struct of_device_id *match; + const struct emac_platform_data *auxdata; + struct emac_platform_data *pdata = NULL; +- const u8 *mac_addr; + + if (!IS_ENABLED(CONFIG_OF) || !pdev->dev.of_node) + return dev_get_platdata(&pdev->dev); +@@ -1711,11 +1710,8 @@ davinci_emac_of_get_pdata(struct platfor + np = pdev->dev.of_node; + pdata->version = EMAC_VERSION_2; + +- if (!is_valid_ether_addr(pdata->mac_addr)) { +- mac_addr = of_get_mac_address(np); +- if (!IS_ERR(mac_addr)) +- ether_addr_copy(pdata->mac_addr, mac_addr); +- } ++ if (!is_valid_ether_addr(pdata->mac_addr)) ++ of_get_mac_address(np, pdata->mac_addr); + + of_property_read_u32(np, "ti,davinci-ctrl-reg-offset", + &pdata->ctrl_reg_offset); +--- a/drivers/net/ethernet/ti/netcp_core.c ++++ b/drivers/net/ethernet/ti/netcp_core.c +@@ -1966,7 +1966,6 @@ static int netcp_create_interface(struct + struct resource res; + void __iomem *efuse = NULL; + u32 efuse_mac = 0; +- const void *mac_addr; + u8 efuse_mac_addr[6]; + u32 temp[2]; + int ret = 0; +@@ -2036,10 +2035,8 @@ static int netcp_create_interface(struct + devm_iounmap(dev, efuse); + devm_release_mem_region(dev, res.start, size); + } else { +- mac_addr = of_get_mac_address(node_interface); +- if (!IS_ERR(mac_addr)) +- ether_addr_copy(ndev->dev_addr, mac_addr); +- else ++ ret = of_get_mac_address(node_interface, ndev->dev_addr); ++ if (ret) + eth_random_addr(ndev->dev_addr); + } + +--- a/drivers/net/ethernet/wiznet/w5100-spi.c ++++ b/drivers/net/ethernet/wiznet/w5100-spi.c +@@ -423,8 +423,14 @@ static int w5100_spi_probe(struct spi_de + const struct of_device_id *of_id; + const struct w5100_ops *ops; + kernel_ulong_t driver_data; ++ const void *mac = NULL; ++ u8 tmpmac[ETH_ALEN]; + int priv_size; +- const void *mac = of_get_mac_address(spi->dev.of_node); ++ int ret; ++ ++ ret = of_get_mac_address(spi->dev.of_node, tmpmac); ++ if (!ret) ++ mac = tmpmac; + + if (spi->dev.of_node) { + of_id = of_match_device(w5100_of_match, &spi->dev); +--- a/drivers/net/ethernet/wiznet/w5100.c ++++ b/drivers/net/ethernet/wiznet/w5100.c +@@ -1159,7 +1159,7 @@ int w5100_probe(struct device *dev, cons + INIT_WORK(&priv->setrx_work, w5100_setrx_work); + INIT_WORK(&priv->restart_work, w5100_restart_work); + +- if (!IS_ERR_OR_NULL(mac_addr)) ++ if (mac_addr) + memcpy(ndev->dev_addr, mac_addr, ETH_ALEN); + else + eth_hw_addr_random(ndev); +--- a/drivers/net/ethernet/xilinx/ll_temac_main.c ++++ b/drivers/net/ethernet/xilinx/ll_temac_main.c +@@ -438,7 +438,7 @@ static void temac_do_set_mac_address(str + + static int temac_init_mac_address(struct net_device *ndev, const void *address) + { +- ether_addr_copy(ndev->dev_addr, address); ++ memcpy(ndev->dev_addr, address, ETH_ALEN); + if (!is_valid_ether_addr(ndev->dev_addr)) + eth_hw_addr_random(ndev); + temac_do_set_mac_address(ndev); +@@ -1370,7 +1370,7 @@ static int temac_probe(struct platform_d + struct device_node *temac_np = dev_of_node(&pdev->dev), *dma_np; + struct temac_local *lp; + struct net_device *ndev; +- const void *addr; ++ u8 addr[ETH_ALEN]; + __be32 *p; + bool little_endian; + int rc = 0; +@@ -1563,8 +1563,8 @@ static int temac_probe(struct platform_d + + if (temac_np) { + /* Retrieve the MAC address */ +- addr = of_get_mac_address(temac_np); +- if (IS_ERR(addr)) { ++ rc = of_get_mac_address(temac_np, addr); ++ if (rc) { + dev_err(&pdev->dev, "could not find MAC address\n"); + return -ENODEV; + } +--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c ++++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c +@@ -1831,8 +1831,8 @@ static int axienet_probe(struct platform + struct device_node *np; + struct axienet_local *lp; + struct net_device *ndev; +- const void *mac_addr; + struct resource *ethres; ++ u8 mac_addr[ETH_ALEN]; + int addr_width = 32; + u32 value; + +@@ -2032,13 +2032,14 @@ static int axienet_probe(struct platform + dev_info(&pdev->dev, "Ethernet core IRQ not defined\n"); + + /* Retrieve the MAC address */ +- mac_addr = of_get_mac_address(pdev->dev.of_node); +- if (IS_ERR(mac_addr)) { +- dev_warn(&pdev->dev, "could not find MAC address property: %ld\n", +- PTR_ERR(mac_addr)); +- mac_addr = NULL; ++ ret = of_get_mac_address(pdev->dev.of_node, mac_addr); ++ if (!ret) { ++ axienet_set_mac_address(ndev, mac_addr); ++ } else { ++ dev_warn(&pdev->dev, "could not find MAC address property: %d\n", ++ ret); ++ axienet_set_mac_address(ndev, NULL); + } +- axienet_set_mac_address(ndev, mac_addr); + + lp->coalesce_count_rx = XAXIDMA_DFT_RX_THRESHOLD; + lp->coalesce_count_tx = XAXIDMA_DFT_TX_THRESHOLD; +--- a/drivers/net/ethernet/xilinx/xilinx_emaclite.c ++++ b/drivers/net/ethernet/xilinx/xilinx_emaclite.c +@@ -1113,7 +1113,6 @@ static int xemaclite_of_probe(struct pla + struct net_device *ndev = NULL; + struct net_local *lp = NULL; + struct device *dev = &ofdev->dev; +- const void *mac_address; + + int rc = 0; + +@@ -1155,12 +1154,9 @@ static int xemaclite_of_probe(struct pla + lp->next_rx_buf_to_use = 0x0; + lp->tx_ping_pong = get_bool(ofdev, "xlnx,tx-ping-pong"); + lp->rx_ping_pong = get_bool(ofdev, "xlnx,rx-ping-pong"); +- mac_address = of_get_mac_address(ofdev->dev.of_node); + +- if (!IS_ERR(mac_address)) { +- /* Set the MAC address. */ +- ether_addr_copy(ndev->dev_addr, mac_address); +- } else { ++ rc = of_get_mac_address(ofdev->dev.of_node, ndev->dev_addr); ++ if (rc) { + dev_warn(dev, "No MAC address found, using random\n"); + eth_hw_addr_random(ndev); + } +--- a/drivers/net/wireless/ath/ath9k/init.c ++++ b/drivers/net/wireless/ath/ath9k/init.c +@@ -618,7 +618,6 @@ static int ath9k_of_init(struct ath_soft + struct ath_hw *ah = sc->sc_ah; + struct ath_common *common = ath9k_hw_common(ah); + enum ath_bus_type bus_type = common->bus_ops->ath_bus_type; +- const char *mac; + char eeprom_name[100]; + int ret; + +@@ -641,9 +640,7 @@ static int ath9k_of_init(struct ath_soft + ah->ah_flags |= AH_NO_EEP_SWAP; + } + +- mac = of_get_mac_address(np); +- if (!IS_ERR(mac)) +- ether_addr_copy(common->macaddr, mac); ++ of_get_mac_address(np, common->macaddr); + + return 0; + } +--- a/drivers/net/wireless/mediatek/mt76/eeprom.c ++++ b/drivers/net/wireless/mediatek/mt76/eeprom.c +@@ -90,15 +90,9 @@ out_put_node: + void + mt76_eeprom_override(struct mt76_dev *dev) + { +-#ifdef CONFIG_OF + struct device_node *np = dev->dev->of_node; +- const u8 *mac = NULL; + +- if (np) +- mac = of_get_mac_address(np); +- if (!IS_ERR_OR_NULL(mac)) +- ether_addr_copy(dev->macaddr, mac); +-#endif ++ of_get_mac_address(np, dev->macaddr); + + if (!is_valid_ether_addr(dev->macaddr)) { + eth_random_addr(dev->macaddr); +--- a/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c ++++ b/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c +@@ -990,11 +990,7 @@ static void rt2x00lib_rate(struct ieee80 + + void rt2x00lib_set_mac_address(struct rt2x00_dev *rt2x00dev, u8 *eeprom_mac_addr) + { +- const char *mac_addr; +- +- mac_addr = of_get_mac_address(rt2x00dev->dev->of_node); +- if (!IS_ERR(mac_addr)) +- ether_addr_copy(eeprom_mac_addr, mac_addr); ++ of_get_mac_address(rt2x00dev->dev->of_node, eeprom_mac_addr); + + if (!is_valid_ether_addr(eeprom_mac_addr)) { + eth_random_addr(eeprom_mac_addr); +--- a/drivers/of/of_net.c ++++ b/drivers/of/of_net.c +@@ -45,37 +45,29 @@ int of_get_phy_mode(struct device_node * + } + EXPORT_SYMBOL_GPL(of_get_phy_mode); + +-static const void *of_get_mac_addr(struct device_node *np, const char *name) ++static int of_get_mac_addr(struct device_node *np, const char *name, u8 *addr) + { + struct property *pp = of_find_property(np, name, NULL); + +- if (pp && pp->length == ETH_ALEN && is_valid_ether_addr(pp->value)) +- return pp->value; +- return NULL; ++ if (pp && pp->length == ETH_ALEN && is_valid_ether_addr(pp->value)) { ++ memcpy(addr, pp->value, ETH_ALEN); ++ return 0; ++ } ++ return -ENODEV; + } + +-static const void *of_get_mac_addr_nvmem(struct device_node *np) ++static int of_get_mac_addr_nvmem(struct device_node *np, u8 *addr) + { +- int ret; +- const void *mac; +- u8 nvmem_mac[ETH_ALEN]; + struct platform_device *pdev = of_find_device_by_node(np); ++ int ret; + + if (!pdev) +- return ERR_PTR(-ENODEV); ++ return -ENODEV; + +- ret = nvmem_get_mac_address(&pdev->dev, &nvmem_mac); +- if (ret) { +- put_device(&pdev->dev); +- return ERR_PTR(ret); +- } +- +- mac = devm_kmemdup(&pdev->dev, nvmem_mac, ETH_ALEN, GFP_KERNEL); ++ ret = nvmem_get_mac_address(&pdev->dev, addr); + put_device(&pdev->dev); +- if (!mac) +- return ERR_PTR(-ENOMEM); + +- return mac; ++ return ret; + } + + /** +@@ -98,24 +90,27 @@ static const void *of_get_mac_addr_nvmem + * this case, the real MAC is in 'local-mac-address', and 'mac-address' exists + * but is all zeros. + * +- * Return: Will be a valid pointer on success and ERR_PTR in case of error. ++ * Return: 0 on success and errno in case of error. + */ +-const void *of_get_mac_address(struct device_node *np) ++int of_get_mac_address(struct device_node *np, u8 *addr) + { +- const void *addr; +- +- addr = of_get_mac_addr(np, "mac-address"); +- if (addr) +- return addr; ++ int ret; + +- addr = of_get_mac_addr(np, "local-mac-address"); +- if (addr) +- return addr; ++ if (!np) ++ return -ENODEV; + +- addr = of_get_mac_addr(np, "address"); +- if (addr) +- return addr; ++ ret = of_get_mac_addr(np, "mac-address", addr); ++ if (!ret) ++ return 0; ++ ++ ret = of_get_mac_addr(np, "local-mac-address", addr); ++ if (!ret) ++ return 0; ++ ++ ret = of_get_mac_addr(np, "address", addr); ++ if (!ret) ++ return 0; + +- return of_get_mac_addr_nvmem(np); ++ return of_get_mac_addr_nvmem(np, addr); + } + EXPORT_SYMBOL(of_get_mac_address); +--- a/drivers/staging/octeon/ethernet.c ++++ b/drivers/staging/octeon/ethernet.c +@@ -407,14 +407,10 @@ static int cvm_oct_common_set_mac_addres + int cvm_oct_common_init(struct net_device *dev) + { + struct octeon_ethernet *priv = netdev_priv(dev); +- const u8 *mac = NULL; ++ int ret; + +- if (priv->of_node) +- mac = of_get_mac_address(priv->of_node); +- +- if (!IS_ERR_OR_NULL(mac)) +- ether_addr_copy(dev->dev_addr, mac); +- else ++ ret = of_get_mac_address(priv->of_node, dev->dev_addr); ++ if (ret) + eth_hw_addr_random(dev); + + /* +--- a/drivers/staging/wfx/main.c ++++ b/drivers/staging/wfx/main.c +@@ -334,7 +334,6 @@ int wfx_probe(struct wfx_dev *wdev) + { + int i; + int err; +- const void *macaddr; + struct gpio_desc *gpio_saved; + + // During first part of boot, gpio_wakeup cannot yet been used. So +@@ -423,9 +422,9 @@ int wfx_probe(struct wfx_dev *wdev) + + for (i = 0; i < ARRAY_SIZE(wdev->addresses); i++) { + eth_zero_addr(wdev->addresses[i].addr); +- macaddr = of_get_mac_address(wdev->dev->of_node); +- if (!IS_ERR_OR_NULL(macaddr)) { +- ether_addr_copy(wdev->addresses[i].addr, macaddr); ++ err = of_get_mac_address(wdev->dev->of_node, ++ wdev->addresses[i].addr); ++ if (!err) { + wdev->addresses[i].addr[ETH_ALEN - 1] += i; + } else { + ether_addr_copy(wdev->addresses[i].addr, +--- a/include/linux/of_net.h ++++ b/include/linux/of_net.h +@@ -13,7 +13,7 @@ + + struct net_device; + extern int of_get_phy_mode(struct device_node *np, phy_interface_t *interface); +-extern const void *of_get_mac_address(struct device_node *np); ++extern int of_get_mac_address(struct device_node *np, u8 *mac); + extern struct net_device *of_find_net_device_by_node(struct device_node *np); + #else + static inline int of_get_phy_mode(struct device_node *np, +@@ -22,9 +22,9 @@ static inline int of_get_phy_mode(struct + return -ENODEV; + } + +-static inline const void *of_get_mac_address(struct device_node *np) ++static inline int of_get_mac_address(struct device_node *np, u8 *mac) + { +- return ERR_PTR(-ENODEV); ++ return -ENODEV; + } + + static inline struct net_device *of_find_net_device_by_node(struct device_node *np) +--- a/include/net/dsa.h ++++ b/include/net/dsa.h +@@ -208,7 +208,7 @@ struct dsa_port { + unsigned int index; + const char *name; + struct dsa_port *cpu_dp; +- const char *mac; ++ u8 mac[ETH_ALEN]; + struct device_node *dn; + unsigned int ageing_time; + bool vlan_filtering; +--- a/net/dsa/dsa2.c ++++ b/net/dsa/dsa2.c +@@ -288,7 +288,7 @@ static int dsa_port_setup(struct dsa_por + + break; + case DSA_PORT_TYPE_USER: +- dp->mac = of_get_mac_address(dp->dn); ++ of_get_mac_address(dp->dn, dp->mac); + err = dsa_slave_create(dp); + if (err) + break; +--- a/net/dsa/slave.c ++++ b/net/dsa/slave.c +@@ -1855,7 +1855,7 @@ int dsa_slave_create(struct dsa_port *po + slave_dev->hw_features |= NETIF_F_HW_TC; + slave_dev->features |= NETIF_F_LLTX; + slave_dev->ethtool_ops = &dsa_slave_ethtool_ops; +- if (!IS_ERR_OR_NULL(port->mac)) ++ if (!is_zero_ether_addr(port->mac)) + ether_addr_copy(slave_dev->dev_addr, port->mac); + else + eth_hw_addr_inherit(slave_dev, master); +--- a/net/ethernet/eth.c ++++ b/net/ethernet/eth.c +@@ -506,13 +506,14 @@ unsigned char * __weak arch_get_platform + + int eth_platform_get_mac_address(struct device *dev, u8 *mac_addr) + { +- const unsigned char *addr = NULL; ++ unsigned char *addr; ++ int ret; + +- if (dev->of_node) +- addr = of_get_mac_address(dev->of_node); +- if (IS_ERR_OR_NULL(addr)) +- addr = arch_get_platform_mac_address(); ++ ret = of_get_mac_address(dev->of_node, mac_addr); ++ if (!ret) ++ return 0; + ++ addr = arch_get_platform_mac_address(); + if (!addr) + return -ENODEV; + diff --git a/target/linux/generic/backport-5.15/732-net-next-2-of-net-fix-of_get_mac_addr_nvmem-for-non-platform-devices.patch b/target/linux/generic/backport-5.15/732-net-next-2-of-net-fix-of_get_mac_addr_nvmem-for-non-platform-devices.patch new file mode 100644 index 0000000000..245c5f3bd6 --- /dev/null +++ b/target/linux/generic/backport-5.15/732-net-next-2-of-net-fix-of_get_mac_addr_nvmem-for-non-platform-devices.patch @@ -0,0 +1,77 @@ +From f10843e04a075202dbb39dfcee047e3a2fdf5a8d Mon Sep 17 00:00:00 2001 +From: Michael Walle <michael@walle.cc> +Date: Mon, 12 Apr 2021 19:47:18 +0200 +Subject: of: net: fix of_get_mac_addr_nvmem() for non-platform devices + +of_get_mac_address() already supports fetching the MAC address by an +nvmem provider. But until now, it was just working for platform devices. +Esp. it was not working for DSA ports and PCI devices. It gets more +common that PCI devices have a device tree binding since SoCs contain +integrated root complexes. + +Use the nvmem of_* binding to fetch the nvmem cells by a struct +device_node. We still have to try to read the cell by device first +because there might be a nvmem_cell_lookup associated with that device. + +Signed-off-by: Michael Walle <michael@walle.cc> +Reviewed-by: Andrew Lunn <andrew@lunn.ch> +Signed-off-by: David S. Miller <davem@davemloft.net> +--- + drivers/of/of_net.c | 35 ++++++++++++++++++++++++++++++----- + 1 file changed, 30 insertions(+), 5 deletions(-) + +--- a/drivers/of/of_net.c ++++ b/drivers/of/of_net.c +@@ -11,6 +11,7 @@ + #include <linux/phy.h> + #include <linux/export.h> + #include <linux/device.h> ++#include <linux/nvmem-consumer.h> + + /** + * of_get_phy_mode - Get phy mode for given device_node +@@ -59,15 +60,39 @@ static int of_get_mac_addr(struct device + static int of_get_mac_addr_nvmem(struct device_node *np, u8 *addr) + { + struct platform_device *pdev = of_find_device_by_node(np); ++ struct nvmem_cell *cell; ++ const void *mac; ++ size_t len; + int ret; + +- if (!pdev) +- return -ENODEV; ++ /* Try lookup by device first, there might be a nvmem_cell_lookup ++ * associated with a given device. ++ */ ++ if (pdev) { ++ ret = nvmem_get_mac_address(&pdev->dev, addr); ++ put_device(&pdev->dev); ++ return ret; ++ } ++ ++ cell = of_nvmem_cell_get(np, "mac-address"); ++ if (IS_ERR(cell)) ++ return PTR_ERR(cell); ++ ++ mac = nvmem_cell_read(cell, &len); ++ nvmem_cell_put(cell); ++ ++ if (IS_ERR(mac)) ++ return PTR_ERR(mac); ++ ++ if (len != ETH_ALEN || !is_valid_ether_addr(mac)) { ++ kfree(mac); ++ return -EINVAL; ++ } + +- ret = nvmem_get_mac_address(&pdev->dev, addr); +- put_device(&pdev->dev); ++ memcpy(addr, mac, ETH_ALEN); ++ kfree(mac); + +- return ret; ++ return 0; + } + + /** diff --git a/target/linux/generic/backport-5.15/733-v5.15-0001-net-bgmac-bcma-handle-deferred-probe-error-due-to-ma.patch b/target/linux/generic/backport-5.15/733-v5.15-0001-net-bgmac-bcma-handle-deferred-probe-error-due-to-ma.patch new file mode 100644 index 0000000000..6e7f20634f --- /dev/null +++ b/target/linux/generic/backport-5.15/733-v5.15-0001-net-bgmac-bcma-handle-deferred-probe-error-due-to-ma.patch @@ -0,0 +1,42 @@ +From 029497e66bdc762e001880e4c85a91f35a54b1e2 Mon Sep 17 00:00:00 2001 +From: Christian Lamparter <chunkeey@gmail.com> +Date: Sun, 19 Sep 2021 13:57:25 +0200 +Subject: [PATCH] net: bgmac-bcma: handle deferred probe error due to + mac-address +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +Due to the inclusion of nvmem handling into the mac-address getter +function of_get_mac_address() by +commit d01f449c008a ("of_net: add NVMEM support to of_get_mac_address") +it is now possible to get a -EPROBE_DEFER return code. Which did cause +bgmac to assign a random ethernet address. + +This exact issue happened on my Meraki MR32. The nvmem provider is +an EEPROM (at24c64) which gets instantiated once the module +driver is loaded... This happens once the filesystem becomes available. + +With this patch, bgmac_probe() will propagate the -EPROBE_DEFER error. +Then the driver subsystem will reschedule the probe at a later time. + +Cc: Petr Štetiar <ynezz@true.cz> +Cc: Michael Walle <michael@walle.cc> +Fixes: d01f449c008a ("of_net: add NVMEM support to of_get_mac_address") +Signed-off-by: Christian Lamparter <chunkeey@gmail.com> +Signed-off-by: David S. Miller <davem@davemloft.net> +--- + drivers/net/ethernet/broadcom/bgmac-bcma.c | 2 ++ + 1 file changed, 2 insertions(+) + +--- a/drivers/net/ethernet/broadcom/bgmac-bcma.c ++++ b/drivers/net/ethernet/broadcom/bgmac-bcma.c +@@ -129,6 +129,8 @@ static int bgmac_probe(struct bcma_devic + bcma_set_drvdata(core, bgmac); + + err = of_get_mac_address(bgmac->dev->of_node, bgmac->net_dev->dev_addr); ++ if (err == -EPROBE_DEFER) ++ return err; + + /* If no MAC address assigned via device tree, check SPROM */ + if (err) { diff --git a/target/linux/generic/backport-5.15/733-v5.15-0002-net-bgmac-platform-handle-mac-address-deferral.patch b/target/linux/generic/backport-5.15/733-v5.15-0002-net-bgmac-platform-handle-mac-address-deferral.patch new file mode 100644 index 0000000000..bde62f3b1b --- /dev/null +++ b/target/linux/generic/backport-5.15/733-v5.15-0002-net-bgmac-platform-handle-mac-address-deferral.patch @@ -0,0 +1,36 @@ +From 763716a55cb1f480ffe1a9702e6b5d9ea1a80a24 Mon Sep 17 00:00:00 2001 +From: Matthew Hagan <mnhagan88@gmail.com> +Date: Sat, 25 Sep 2021 11:36:27 +0000 +Subject: [PATCH] net: bgmac-platform: handle mac-address deferral + +This patch is a replication of Christian Lamparter's "net: bgmac-bcma: +handle deferred probe error due to mac-address" patch for the +bgmac-platform driver [1]. + +As is the case with the bgmac-bcma driver, this change is to cover the +scenario where the MAC address cannot yet be discovered due to reliance +on an nvmem provider which is yet to be instantiated, resulting in a +random address being assigned that has to be manually overridden. + +[1] https://lore.kernel.org/netdev/20210919115725.29064-1-chunkeey@gmail.com + +Signed-off-by: Matthew Hagan <mnhagan88@gmail.com> +Reviewed-by: Andrew Lunn <andrew@lunn.ch> +Reviewed-by: Florian Fainelli <f.fainelli@gmail.com> +Signed-off-by: David S. Miller <davem@davemloft.net> +--- + drivers/net/ethernet/broadcom/bgmac-platform.c | 3 +++ + 1 file changed, 3 insertions(+) + +--- a/drivers/net/ethernet/broadcom/bgmac-platform.c ++++ b/drivers/net/ethernet/broadcom/bgmac-platform.c +@@ -193,6 +193,9 @@ static int bgmac_probe(struct platform_d + bgmac->dma_dev = &pdev->dev; + + ret = of_get_mac_address(np, bgmac->net_dev->dev_addr); ++ if (ret == -EPROBE_DEFER) ++ return ret; ++ + if (ret) + dev_warn(&pdev->dev, + "MAC address not present in device tree\n"); diff --git a/target/linux/generic/backport-5.15/734-v5.16-0001-net-bgmac-improve-handling-PHY.patch b/target/linux/generic/backport-5.15/734-v5.16-0001-net-bgmac-improve-handling-PHY.patch new file mode 100644 index 0000000000..6788a2ec35 --- /dev/null +++ b/target/linux/generic/backport-5.15/734-v5.16-0001-net-bgmac-improve-handling-PHY.patch @@ -0,0 +1,84 @@ +From b5375509184dc23d2b7fa0c5ed8763899ccc9674 Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?Rafa=C5=82=20Mi=C5=82ecki?= <rafal@milecki.pl> +Date: Sat, 2 Oct 2021 19:58:11 +0200 +Subject: [PATCH] net: bgmac: improve handling PHY +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +1. Use info from DT if available + +It allows describing for example a fixed link. It's more accurate than +just guessing there may be one (depending on a chipset). + +2. Verify PHY ID before trying to connect PHY + +PHY addr 0x1e (30) is special in Broadcom routers and means a switch +connected as MDIO devices instead of a real PHY. Don't try connecting to +it. + +Signed-off-by: Rafał Miłecki <rafal@milecki.pl> +Signed-off-by: David S. Miller <davem@davemloft.net> +--- + drivers/net/ethernet/broadcom/bgmac-bcma.c | 33 ++++++++++++++-------- + 1 file changed, 21 insertions(+), 12 deletions(-) + +--- a/drivers/net/ethernet/broadcom/bgmac-bcma.c ++++ b/drivers/net/ethernet/broadcom/bgmac-bcma.c +@@ -11,6 +11,7 @@ + #include <linux/bcma/bcma.h> + #include <linux/brcmphy.h> + #include <linux/etherdevice.h> ++#include <linux/of_mdio.h> + #include <linux/of_net.h> + #include "bgmac.h" + +@@ -86,17 +87,28 @@ static int bcma_phy_connect(struct bgmac + struct phy_device *phy_dev; + char bus_id[MII_BUS_ID_SIZE + 3]; + ++ /* DT info should be the most accurate */ ++ phy_dev = of_phy_get_and_connect(bgmac->net_dev, bgmac->dev->of_node, ++ bgmac_adjust_link); ++ if (phy_dev) ++ return 0; ++ + /* Connect to the PHY */ +- snprintf(bus_id, sizeof(bus_id), PHY_ID_FMT, bgmac->mii_bus->id, +- bgmac->phyaddr); +- phy_dev = phy_connect(bgmac->net_dev, bus_id, bgmac_adjust_link, +- PHY_INTERFACE_MODE_MII); +- if (IS_ERR(phy_dev)) { +- dev_err(bgmac->dev, "PHY connection failed\n"); +- return PTR_ERR(phy_dev); ++ if (bgmac->mii_bus && bgmac->phyaddr != BGMAC_PHY_NOREGS) { ++ snprintf(bus_id, sizeof(bus_id), PHY_ID_FMT, bgmac->mii_bus->id, ++ bgmac->phyaddr); ++ phy_dev = phy_connect(bgmac->net_dev, bus_id, bgmac_adjust_link, ++ PHY_INTERFACE_MODE_MII); ++ if (IS_ERR(phy_dev)) { ++ dev_err(bgmac->dev, "PHY connection failed\n"); ++ return PTR_ERR(phy_dev); ++ } ++ ++ return 0; + } + +- return 0; ++ /* Assume a fixed link to the switch port */ ++ return bgmac_phy_connect_direct(bgmac); + } + + static const struct bcma_device_id bgmac_bcma_tbl[] = { +@@ -297,10 +309,7 @@ static int bgmac_probe(struct bcma_devic + bgmac->cco_ctl_maskset = bcma_bgmac_cco_ctl_maskset; + bgmac->get_bus_clock = bcma_bgmac_get_bus_clock; + bgmac->cmn_maskset32 = bcma_bgmac_cmn_maskset32; +- if (bgmac->mii_bus) +- bgmac->phy_connect = bcma_phy_connect; +- else +- bgmac->phy_connect = bgmac_phy_connect_direct; ++ bgmac->phy_connect = bcma_phy_connect; + + err = bgmac_enet_probe(bgmac); + if (err) diff --git a/target/linux/generic/backport-5.15/734-v5.16-0002-net-bgmac-support-MDIO-described-in-DT.patch b/target/linux/generic/backport-5.15/734-v5.16-0002-net-bgmac-support-MDIO-described-in-DT.patch new file mode 100644 index 0000000000..f134828273 --- /dev/null +++ b/target/linux/generic/backport-5.15/734-v5.16-0002-net-bgmac-support-MDIO-described-in-DT.patch @@ -0,0 +1,54 @@ +From 45c9d966688e7fad7f24bfc450547d91e4304d0b Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?Rafa=C5=82=20Mi=C5=82ecki?= <rafal@milecki.pl> +Date: Sat, 2 Oct 2021 19:58:12 +0200 +Subject: [PATCH] net: bgmac: support MDIO described in DT +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +Check ethernet controller DT node for "mdio" subnode and use it with +of_mdiobus_register() when present. That allows specifying MDIO and its +PHY devices in a standard DT based way. + +This is required for BCM53573 SoC support. That family is sometimes +called Northstar (by marketing?) but is quite different from it. It uses +different CPU(s) and many different hw blocks. + +One of shared blocks in BCM53573 is Ethernet controller. Switch however +is not SRAB accessible (as it Northstar) but is MDIO attached. + +Signed-off-by: Rafał Miłecki <rafal@milecki.pl> +Signed-off-by: David S. Miller <davem@davemloft.net> +--- + drivers/net/ethernet/broadcom/bgmac-bcma-mdio.c | 6 +++++- + 1 file changed, 5 insertions(+), 1 deletion(-) + +--- a/drivers/net/ethernet/broadcom/bgmac-bcma-mdio.c ++++ b/drivers/net/ethernet/broadcom/bgmac-bcma-mdio.c +@@ -10,6 +10,7 @@ + + #include <linux/bcma/bcma.h> + #include <linux/brcmphy.h> ++#include <linux/of_mdio.h> + #include "bgmac.h" + + static bool bcma_mdio_wait_value(struct bcma_device *core, u16 reg, u32 mask, +@@ -211,6 +212,7 @@ struct mii_bus *bcma_mdio_mii_register(s + { + struct bcma_device *core = bgmac->bcma.core; + struct mii_bus *mii_bus; ++ struct device_node *np; + int err; + + mii_bus = mdiobus_alloc(); +@@ -229,7 +231,9 @@ struct mii_bus *bcma_mdio_mii_register(s + mii_bus->parent = &core->dev; + mii_bus->phy_mask = ~(1 << bgmac->phyaddr); + +- err = mdiobus_register(mii_bus); ++ np = of_get_child_by_name(core->dev.of_node, "mdio"); ++ ++ err = of_mdiobus_register(mii_bus, np); + if (err) { + dev_err(&core->dev, "Registration of mii bus failed\n"); + goto err_free_bus; diff --git a/target/linux/generic/backport-5.15/735-v5.14-01-net-dsa-qca8k-change-simple-print-to-dev-variant.patch b/target/linux/generic/backport-5.15/735-v5.14-01-net-dsa-qca8k-change-simple-print-to-dev-variant.patch new file mode 100644 index 0000000000..b8e6d9b613 --- /dev/null +++ b/target/linux/generic/backport-5.15/735-v5.14-01-net-dsa-qca8k-change-simple-print-to-dev-variant.patch @@ -0,0 +1,35 @@ +From 5d9e068402dcf7354cc8ee66c2152845306d2ccb Mon Sep 17 00:00:00 2001 +From: Ansuel Smith <ansuelsmth@gmail.com> +Date: Fri, 14 May 2021 22:59:51 +0200 +Subject: [PATCH] net: dsa: qca8k: change simple print to dev variant + +Change pr_err and pr_warn to dev variant. + +Signed-off-by: Ansuel Smith <ansuelsmth@gmail.com> +Reviewed-by: Florian Fainelli <f.fainelli@gmail.com> +Reviewed-by: Andrew Lunn <andrew@lunn.ch> +Signed-off-by: David S. Miller <davem@davemloft.net> +--- + drivers/net/dsa/qca8k.c | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +--- a/drivers/net/dsa/qca8k.c ++++ b/drivers/net/dsa/qca8k.c +@@ -701,7 +701,7 @@ qca8k_setup(struct dsa_switch *ds) + + /* Make sure that port 0 is the cpu port */ + if (!dsa_is_cpu_port(ds, 0)) { +- pr_err("port 0 is not the CPU port\n"); ++ dev_err(priv->dev, "port 0 is not the CPU port"); + return -EINVAL; + } + +@@ -711,7 +711,7 @@ qca8k_setup(struct dsa_switch *ds) + priv->regmap = devm_regmap_init(ds->dev, NULL, priv, + &qca8k_regmap_config); + if (IS_ERR(priv->regmap)) +- pr_warn("regmap initialization failed"); ++ dev_warn(priv->dev, "regmap initialization failed"); + + ret = qca8k_setup_mdio_bus(priv); + if (ret) diff --git a/target/linux/generic/backport-5.15/735-v5.14-02-net-dsa-qca8k-use-iopoll-macro-for-qca8k_busy_wait.patch b/target/linux/generic/backport-5.15/735-v5.14-02-net-dsa-qca8k-use-iopoll-macro-for-qca8k_busy_wait.patch new file mode 100644 index 0000000000..ff8288d484 --- /dev/null +++ b/target/linux/generic/backport-5.15/735-v5.14-02-net-dsa-qca8k-use-iopoll-macro-for-qca8k_busy_wait.patch @@ -0,0 +1,61 @@ +From 2ad255f2faaffb3af786031fba2e7955454b558a Mon Sep 17 00:00:00 2001 +From: Ansuel Smith <ansuelsmth@gmail.com> +Date: Fri, 14 May 2021 22:59:52 +0200 +Subject: [PATCH] net: dsa: qca8k: use iopoll macro for qca8k_busy_wait + +Use iopoll macro instead of while loop. + +Signed-off-by: Ansuel Smith <ansuelsmth@gmail.com> +Reviewed-by: Andrew Lunn <andrew@lunn.ch> +Signed-off-by: David S. Miller <davem@davemloft.net> +--- + drivers/net/dsa/qca8k.c | 23 +++++++++++------------ + drivers/net/dsa/qca8k.h | 2 ++ + 2 files changed, 13 insertions(+), 12 deletions(-) + +--- a/drivers/net/dsa/qca8k.c ++++ b/drivers/net/dsa/qca8k.c +@@ -262,21 +262,20 @@ static struct regmap_config qca8k_regmap + static int + qca8k_busy_wait(struct qca8k_priv *priv, u32 reg, u32 mask) + { +- unsigned long timeout; ++ u32 val; ++ int ret; + +- timeout = jiffies + msecs_to_jiffies(20); ++ ret = read_poll_timeout(qca8k_read, val, !(val & mask), ++ 0, QCA8K_BUSY_WAIT_TIMEOUT * USEC_PER_MSEC, false, ++ priv, reg); + +- /* loop until the busy flag has cleared */ +- do { +- u32 val = qca8k_read(priv, reg); +- int busy = val & mask; ++ /* Check if qca8k_read has failed for a different reason ++ * before returning -ETIMEDOUT ++ */ ++ if (ret < 0 && val < 0) ++ return val; + +- if (!busy) +- break; +- cond_resched(); +- } while (!time_after_eq(jiffies, timeout)); +- +- return time_after_eq(jiffies, timeout); ++ return ret; + } + + static void +--- a/drivers/net/dsa/qca8k.h ++++ b/drivers/net/dsa/qca8k.h +@@ -18,6 +18,8 @@ + #define PHY_ID_QCA8337 0x004dd036 + #define QCA8K_ID_QCA8337 0x13 + ++#define QCA8K_BUSY_WAIT_TIMEOUT 20 ++ + #define QCA8K_NUM_FDB_RECORDS 2048 + + #define QCA8K_CPU_PORT 0 diff --git a/target/linux/generic/backport-5.15/735-v5.14-03-net-dsa-qca8k-improve-qca8k-read-write-rmw-bus-acces.patch b/target/linux/generic/backport-5.15/735-v5.14-03-net-dsa-qca8k-improve-qca8k-read-write-rmw-bus-acces.patch new file mode 100644 index 0000000000..c403589874 --- /dev/null +++ b/target/linux/generic/backport-5.15/735-v5.14-03-net-dsa-qca8k-improve-qca8k-read-write-rmw-bus-acces.patch @@ -0,0 +1,86 @@ +From 504bf65931824eda83494e5b5d75686e27ace03e Mon Sep 17 00:00:00 2001 +From: Ansuel Smith <ansuelsmth@gmail.com> +Date: Fri, 14 May 2021 22:59:53 +0200 +Subject: [PATCH] net: dsa: qca8k: improve qca8k read/write/rmw bus access + +Put bus in local variable to improve faster access to the mdio bus. + +Signed-off-by: Ansuel Smith <ansuelsmth@gmail.com> +Reviewed-by: Andrew Lunn <andrew@lunn.ch> +Signed-off-by: David S. Miller <davem@davemloft.net> +--- + drivers/net/dsa/qca8k.c | 29 ++++++++++++++++------------- + 1 file changed, 16 insertions(+), 13 deletions(-) + +--- a/drivers/net/dsa/qca8k.c ++++ b/drivers/net/dsa/qca8k.c +@@ -142,17 +142,18 @@ qca8k_set_page(struct mii_bus *bus, u16 + static u32 + qca8k_read(struct qca8k_priv *priv, u32 reg) + { ++ struct mii_bus *bus = priv->bus; + u16 r1, r2, page; + u32 val; + + qca8k_split_addr(reg, &r1, &r2, &page); + +- mutex_lock_nested(&priv->bus->mdio_lock, MDIO_MUTEX_NESTED); ++ mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED); + +- qca8k_set_page(priv->bus, page); +- val = qca8k_mii_read32(priv->bus, 0x10 | r2, r1); ++ qca8k_set_page(bus, page); ++ val = qca8k_mii_read32(bus, 0x10 | r2, r1); + +- mutex_unlock(&priv->bus->mdio_lock); ++ mutex_unlock(&bus->mdio_lock); + + return val; + } +@@ -160,35 +161,37 @@ qca8k_read(struct qca8k_priv *priv, u32 + static void + qca8k_write(struct qca8k_priv *priv, u32 reg, u32 val) + { ++ struct mii_bus *bus = priv->bus; + u16 r1, r2, page; + + qca8k_split_addr(reg, &r1, &r2, &page); + +- mutex_lock_nested(&priv->bus->mdio_lock, MDIO_MUTEX_NESTED); ++ mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED); + +- qca8k_set_page(priv->bus, page); +- qca8k_mii_write32(priv->bus, 0x10 | r2, r1, val); ++ qca8k_set_page(bus, page); ++ qca8k_mii_write32(bus, 0x10 | r2, r1, val); + +- mutex_unlock(&priv->bus->mdio_lock); ++ mutex_unlock(&bus->mdio_lock); + } + + static u32 + qca8k_rmw(struct qca8k_priv *priv, u32 reg, u32 mask, u32 val) + { ++ struct mii_bus *bus = priv->bus; + u16 r1, r2, page; + u32 ret; + + qca8k_split_addr(reg, &r1, &r2, &page); + +- mutex_lock_nested(&priv->bus->mdio_lock, MDIO_MUTEX_NESTED); ++ mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED); + +- qca8k_set_page(priv->bus, page); +- ret = qca8k_mii_read32(priv->bus, 0x10 | r2, r1); ++ qca8k_set_page(bus, page); ++ ret = qca8k_mii_read32(bus, 0x10 | r2, r1); + ret &= ~mask; + ret |= val; +- qca8k_mii_write32(priv->bus, 0x10 | r2, r1, ret); ++ qca8k_mii_write32(bus, 0x10 | r2, r1, ret); + +- mutex_unlock(&priv->bus->mdio_lock); ++ mutex_unlock(&bus->mdio_lock); + + return ret; + } diff --git a/target/linux/generic/backport-5.15/735-v5.14-04-net-dsa-qca8k-handle-qca8k_set_page-errors.patch b/target/linux/generic/backport-5.15/735-v5.14-04-net-dsa-qca8k-handle-qca8k_set_page-errors.patch new file mode 100644 index 0000000000..6be494a8c7 --- /dev/null +++ b/target/linux/generic/backport-5.15/735-v5.14-04-net-dsa-qca8k-handle-qca8k_set_page-errors.patch @@ -0,0 +1,101 @@ +From ba5707ec58cfb6853dff41c2aae72deb6a03d389 Mon Sep 17 00:00:00 2001 +From: Ansuel Smith <ansuelsmth@gmail.com> +Date: Fri, 14 May 2021 22:59:54 +0200 +Subject: [PATCH] net: dsa: qca8k: handle qca8k_set_page errors + +With a remote possibility, the set_page function can fail. Since this is +a critical part of the write/read qca8k regs, propagate the error and +terminate any read/write operation. + +Signed-off-by: Ansuel Smith <ansuelsmth@gmail.com> +Reviewed-by: Andrew Lunn <andrew@lunn.ch> +Signed-off-by: David S. Miller <davem@davemloft.net> +--- + drivers/net/dsa/qca8k.c | 33 ++++++++++++++++++++++++++------- + 1 file changed, 26 insertions(+), 7 deletions(-) + +--- a/drivers/net/dsa/qca8k.c ++++ b/drivers/net/dsa/qca8k.c +@@ -127,16 +127,23 @@ qca8k_mii_write32(struct mii_bus *bus, i + "failed to write qca8k 32bit register\n"); + } + +-static void ++static int + qca8k_set_page(struct mii_bus *bus, u16 page) + { ++ int ret; ++ + if (page == qca8k_current_page) +- return; ++ return 0; + +- if (bus->write(bus, 0x18, 0, page) < 0) ++ ret = bus->write(bus, 0x18, 0, page); ++ if (ret < 0) { + dev_err_ratelimited(&bus->dev, + "failed to set qca8k page\n"); ++ return ret; ++ } ++ + qca8k_current_page = page; ++ return 0; + } + + static u32 +@@ -150,11 +157,14 @@ qca8k_read(struct qca8k_priv *priv, u32 + + mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED); + +- qca8k_set_page(bus, page); ++ val = qca8k_set_page(bus, page); ++ if (val < 0) ++ goto exit; ++ + val = qca8k_mii_read32(bus, 0x10 | r2, r1); + ++exit: + mutex_unlock(&bus->mdio_lock); +- + return val; + } + +@@ -163,14 +173,19 @@ qca8k_write(struct qca8k_priv *priv, u32 + { + struct mii_bus *bus = priv->bus; + u16 r1, r2, page; ++ int ret; + + qca8k_split_addr(reg, &r1, &r2, &page); + + mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED); + +- qca8k_set_page(bus, page); ++ ret = qca8k_set_page(bus, page); ++ if (ret < 0) ++ goto exit; ++ + qca8k_mii_write32(bus, 0x10 | r2, r1, val); + ++exit: + mutex_unlock(&bus->mdio_lock); + } + +@@ -185,12 +200,16 @@ qca8k_rmw(struct qca8k_priv *priv, u32 r + + mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED); + +- qca8k_set_page(bus, page); ++ ret = qca8k_set_page(bus, page); ++ if (ret < 0) ++ goto exit; ++ + ret = qca8k_mii_read32(bus, 0x10 | r2, r1); + ret &= ~mask; + ret |= val; + qca8k_mii_write32(bus, 0x10 | r2, r1, ret); + ++exit: + mutex_unlock(&bus->mdio_lock); + + return ret; diff --git a/target/linux/generic/backport-5.15/735-v5.14-05-net-dsa-qca8k-handle-error-with-qca8k_read-operation.patch b/target/linux/generic/backport-5.15/735-v5.14-05-net-dsa-qca8k-handle-error-with-qca8k_read-operation.patch new file mode 100644 index 0000000000..3349b7897a --- /dev/null +++ b/target/linux/generic/backport-5.15/735-v5.14-05-net-dsa-qca8k-handle-error-with-qca8k_read-operation.patch @@ -0,0 +1,207 @@ +From 028f5f8ef44fcf87a456772cbb9f0d90a0a22884 Mon Sep 17 00:00:00 2001 +From: Ansuel Smith <ansuelsmth@gmail.com> +Date: Fri, 14 May 2021 22:59:55 +0200 +Subject: [PATCH] net: dsa: qca8k: handle error with qca8k_read operation + +qca8k_read can fail. Rework any user to handle error values and +correctly return. + +Signed-off-by: Ansuel Smith <ansuelsmth@gmail.com> +Reviewed-by: Andrew Lunn <andrew@lunn.ch> +Signed-off-by: David S. Miller <davem@davemloft.net> +--- + drivers/net/dsa/qca8k.c | 73 ++++++++++++++++++++++++++++++++--------- + 1 file changed, 58 insertions(+), 15 deletions(-) + +--- a/drivers/net/dsa/qca8k.c ++++ b/drivers/net/dsa/qca8k.c +@@ -231,8 +231,13 @@ static int + qca8k_regmap_read(void *ctx, uint32_t reg, uint32_t *val) + { + struct qca8k_priv *priv = (struct qca8k_priv *)ctx; ++ int ret; ++ ++ ret = qca8k_read(priv, reg); ++ if (ret < 0) ++ return ret; + +- *val = qca8k_read(priv, reg); ++ *val = ret; + + return 0; + } +@@ -300,15 +305,20 @@ qca8k_busy_wait(struct qca8k_priv *priv, + return ret; + } + +-static void ++static int + qca8k_fdb_read(struct qca8k_priv *priv, struct qca8k_fdb *fdb) + { +- u32 reg[4]; ++ u32 reg[4], val; + int i; + + /* load the ARL table into an array */ +- for (i = 0; i < 4; i++) +- reg[i] = qca8k_read(priv, QCA8K_REG_ATU_DATA0 + (i * 4)); ++ for (i = 0; i < 4; i++) { ++ val = qca8k_read(priv, QCA8K_REG_ATU_DATA0 + (i * 4)); ++ if (val < 0) ++ return val; ++ ++ reg[i] = val; ++ } + + /* vid - 83:72 */ + fdb->vid = (reg[2] >> QCA8K_ATU_VID_S) & QCA8K_ATU_VID_M; +@@ -323,6 +333,8 @@ qca8k_fdb_read(struct qca8k_priv *priv, + fdb->mac[3] = (reg[0] >> QCA8K_ATU_ADDR3_S) & 0xff; + fdb->mac[4] = (reg[0] >> QCA8K_ATU_ADDR4_S) & 0xff; + fdb->mac[5] = reg[0] & 0xff; ++ ++ return 0; + } + + static void +@@ -374,6 +386,8 @@ qca8k_fdb_access(struct qca8k_priv *priv + /* Check for table full violation when adding an entry */ + if (cmd == QCA8K_FDB_LOAD) { + reg = qca8k_read(priv, QCA8K_REG_ATU_FUNC); ++ if (reg < 0) ++ return reg; + if (reg & QCA8K_ATU_FUNC_FULL) + return -1; + } +@@ -388,10 +402,10 @@ qca8k_fdb_next(struct qca8k_priv *priv, + + qca8k_fdb_write(priv, fdb->vid, fdb->port_mask, fdb->mac, fdb->aging); + ret = qca8k_fdb_access(priv, QCA8K_FDB_NEXT, port); +- if (ret >= 0) +- qca8k_fdb_read(priv, fdb); ++ if (ret < 0) ++ return ret; + +- return ret; ++ return qca8k_fdb_read(priv, fdb); + } + + static int +@@ -449,6 +463,8 @@ qca8k_vlan_access(struct qca8k_priv *pri + /* Check for table full violation when adding an entry */ + if (cmd == QCA8K_VLAN_LOAD) { + reg = qca8k_read(priv, QCA8K_REG_VTU_FUNC1); ++ if (reg < 0) ++ return reg; + if (reg & QCA8K_VTU_FUNC1_FULL) + return -ENOMEM; + } +@@ -475,6 +491,8 @@ qca8k_vlan_add(struct qca8k_priv *priv, + goto out; + + reg = qca8k_read(priv, QCA8K_REG_VTU_FUNC0); ++ if (reg < 0) ++ return reg; + reg |= QCA8K_VTU_FUNC0_VALID | QCA8K_VTU_FUNC0_IVL_EN; + reg &= ~(QCA8K_VTU_FUNC0_EG_MODE_MASK << QCA8K_VTU_FUNC0_EG_MODE_S(port)); + if (untagged) +@@ -506,6 +524,8 @@ qca8k_vlan_del(struct qca8k_priv *priv, + goto out; + + reg = qca8k_read(priv, QCA8K_REG_VTU_FUNC0); ++ if (reg < 0) ++ return reg; + reg &= ~(3 << QCA8K_VTU_FUNC0_EG_MODE_S(port)); + reg |= QCA8K_VTU_FUNC0_EG_MODE_NOT << + QCA8K_VTU_FUNC0_EG_MODE_S(port); +@@ -621,8 +641,11 @@ qca8k_mdio_read(struct qca8k_priv *priv, + QCA8K_MDIO_MASTER_BUSY)) + return -ETIMEDOUT; + +- val = (qca8k_read(priv, QCA8K_MDIO_MASTER_CTRL) & +- QCA8K_MDIO_MASTER_DATA_MASK); ++ val = qca8k_read(priv, QCA8K_MDIO_MASTER_CTRL); ++ if (val < 0) ++ return val; ++ ++ val &= QCA8K_MDIO_MASTER_DATA_MASK; + + return val; + } +@@ -978,6 +1001,8 @@ qca8k_phylink_mac_link_state(struct dsa_ + u32 reg; + + reg = qca8k_read(priv, QCA8K_REG_PORT_STATUS(port)); ++ if (reg < 0) ++ return reg; + + state->link = !!(reg & QCA8K_PORT_STATUS_LINK_UP); + state->an_complete = state->link; +@@ -1078,18 +1103,26 @@ qca8k_get_ethtool_stats(struct dsa_switc + { + struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv; + const struct qca8k_mib_desc *mib; +- u32 reg, i; ++ u32 reg, i, val; + u64 hi; + + for (i = 0; i < ARRAY_SIZE(ar8327_mib); i++) { + mib = &ar8327_mib[i]; + reg = QCA8K_PORT_MIB_COUNTER(port) + mib->offset; + +- data[i] = qca8k_read(priv, reg); ++ val = qca8k_read(priv, reg); ++ if (val < 0) ++ continue; ++ + if (mib->size == 2) { + hi = qca8k_read(priv, reg + 4); +- data[i] |= hi << 32; ++ if (hi < 0) ++ continue; + } ++ ++ data[i] = val; ++ if (mib->size == 2) ++ data[i] |= hi << 32; + } + } + +@@ -1107,18 +1140,25 @@ qca8k_set_mac_eee(struct dsa_switch *ds, + { + struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv; + u32 lpi_en = QCA8K_REG_EEE_CTRL_LPI_EN(port); ++ int ret = 0; + u32 reg; + + mutex_lock(&priv->reg_mutex); + reg = qca8k_read(priv, QCA8K_REG_EEE_CTRL); ++ if (reg < 0) { ++ ret = reg; ++ goto exit; ++ } ++ + if (eee->eee_enabled) + reg |= lpi_en; + else + reg &= ~lpi_en; + qca8k_write(priv, QCA8K_REG_EEE_CTRL, reg); +- mutex_unlock(&priv->reg_mutex); + +- return 0; ++exit: ++ mutex_unlock(&priv->reg_mutex); ++ return ret; + } + + static int +@@ -1456,6 +1496,9 @@ qca8k_sw_probe(struct mdio_device *mdiod + + /* read the switches ID register */ + id = qca8k_read(priv, QCA8K_REG_MASK_CTRL); ++ if (id < 0) ++ return id; ++ + id >>= QCA8K_MASK_CTRL_ID_S; + id &= QCA8K_MASK_CTRL_ID_M; + if (id != QCA8K_ID_QCA8337) diff --git a/target/linux/generic/backport-5.15/735-v5.14-06-net-dsa-qca8k-handle-error-with-qca8k_write-operatio.patch b/target/linux/generic/backport-5.15/735-v5.14-06-net-dsa-qca8k-handle-error-with-qca8k_write-operatio.patch new file mode 100644 index 0000000000..1e0e224c39 --- /dev/null +++ b/target/linux/generic/backport-5.15/735-v5.14-06-net-dsa-qca8k-handle-error-with-qca8k_write-operatio.patch @@ -0,0 +1,263 @@ +From d7805757c75c76e9518fc1023a29f0c4eed5b581 Mon Sep 17 00:00:00 2001 +From: Ansuel Smith <ansuelsmth@gmail.com> +Date: Fri, 14 May 2021 22:59:56 +0200 +Subject: [PATCH] net: dsa: qca8k: handle error with qca8k_write operation + +qca8k_write can fail. Rework any user to handle error values and +correctly return. + +Signed-off-by: Ansuel Smith <ansuelsmth@gmail.com> +Reviewed-by: Andrew Lunn <andrew@lunn.ch> +Signed-off-by: David S. Miller <davem@davemloft.net> +--- + drivers/net/dsa/qca8k.c | 102 ++++++++++++++++++++++++++-------------- + 1 file changed, 67 insertions(+), 35 deletions(-) + +--- a/drivers/net/dsa/qca8k.c ++++ b/drivers/net/dsa/qca8k.c +@@ -168,7 +168,7 @@ exit: + return val; + } + +-static void ++static int + qca8k_write(struct qca8k_priv *priv, u32 reg, u32 val) + { + struct mii_bus *bus = priv->bus; +@@ -187,6 +187,7 @@ qca8k_write(struct qca8k_priv *priv, u32 + + exit: + mutex_unlock(&bus->mdio_lock); ++ return ret; + } + + static u32 +@@ -247,9 +248,7 @@ qca8k_regmap_write(void *ctx, uint32_t r + { + struct qca8k_priv *priv = (struct qca8k_priv *)ctx; + +- qca8k_write(priv, reg, val); +- +- return 0; ++ return qca8k_write(priv, reg, val); + } + + static const struct regmap_range qca8k_readable_ranges[] = { +@@ -367,6 +366,7 @@ static int + qca8k_fdb_access(struct qca8k_priv *priv, enum qca8k_fdb_cmd cmd, int port) + { + u32 reg; ++ int ret; + + /* Set the command and FDB index */ + reg = QCA8K_ATU_FUNC_BUSY; +@@ -377,7 +377,9 @@ qca8k_fdb_access(struct qca8k_priv *priv + } + + /* Write the function register triggering the table access */ +- qca8k_write(priv, QCA8K_REG_ATU_FUNC, reg); ++ ret = qca8k_write(priv, QCA8K_REG_ATU_FUNC, reg); ++ if (ret) ++ return ret; + + /* wait for completion */ + if (qca8k_busy_wait(priv, QCA8K_REG_ATU_FUNC, QCA8K_ATU_FUNC_BUSY)) +@@ -447,6 +449,7 @@ static int + qca8k_vlan_access(struct qca8k_priv *priv, enum qca8k_vlan_cmd cmd, u16 vid) + { + u32 reg; ++ int ret; + + /* Set the command and VLAN index */ + reg = QCA8K_VTU_FUNC1_BUSY; +@@ -454,7 +457,9 @@ qca8k_vlan_access(struct qca8k_priv *pri + reg |= vid << QCA8K_VTU_FUNC1_VID_S; + + /* Write the function register triggering the table access */ +- qca8k_write(priv, QCA8K_REG_VTU_FUNC1, reg); ++ ret = qca8k_write(priv, QCA8K_REG_VTU_FUNC1, reg); ++ if (ret) ++ return ret; + + /* wait for completion */ + if (qca8k_busy_wait(priv, QCA8K_REG_VTU_FUNC1, QCA8K_VTU_FUNC1_BUSY)) +@@ -502,7 +507,9 @@ qca8k_vlan_add(struct qca8k_priv *priv, + reg |= QCA8K_VTU_FUNC0_EG_MODE_TAG << + QCA8K_VTU_FUNC0_EG_MODE_S(port); + +- qca8k_write(priv, QCA8K_REG_VTU_FUNC0, reg); ++ ret = qca8k_write(priv, QCA8K_REG_VTU_FUNC0, reg); ++ if (ret) ++ return ret; + ret = qca8k_vlan_access(priv, QCA8K_VLAN_LOAD, vid); + + out: +@@ -545,7 +552,9 @@ qca8k_vlan_del(struct qca8k_priv *priv, + if (del) { + ret = qca8k_vlan_access(priv, QCA8K_VLAN_PURGE, vid); + } else { +- qca8k_write(priv, QCA8K_REG_VTU_FUNC0, reg); ++ ret = qca8k_write(priv, QCA8K_REG_VTU_FUNC0, reg); ++ if (ret) ++ return ret; + ret = qca8k_vlan_access(priv, QCA8K_VLAN_LOAD, vid); + } + +@@ -555,15 +564,20 @@ out: + return ret; + } + +-static void ++static int + qca8k_mib_init(struct qca8k_priv *priv) + { ++ int ret; ++ + mutex_lock(&priv->reg_mutex); + qca8k_reg_set(priv, QCA8K_REG_MIB, QCA8K_MIB_FLUSH | QCA8K_MIB_BUSY); + qca8k_busy_wait(priv, QCA8K_REG_MIB, QCA8K_MIB_BUSY); + qca8k_reg_set(priv, QCA8K_REG_MIB, QCA8K_MIB_CPU_KEEP); +- qca8k_write(priv, QCA8K_REG_MODULE_EN, QCA8K_MODULE_EN_MIB); ++ ++ ret = qca8k_write(priv, QCA8K_REG_MODULE_EN, QCA8K_MODULE_EN_MIB); ++ + mutex_unlock(&priv->reg_mutex); ++ return ret; + } + + static void +@@ -600,6 +614,7 @@ static int + qca8k_mdio_write(struct qca8k_priv *priv, int port, u32 regnum, u16 data) + { + u32 phy, val; ++ int ret; + + if (regnum >= QCA8K_MDIO_MASTER_MAX_REG) + return -EINVAL; +@@ -613,7 +628,9 @@ qca8k_mdio_write(struct qca8k_priv *priv + QCA8K_MDIO_MASTER_REG_ADDR(regnum) | + QCA8K_MDIO_MASTER_DATA(data); + +- qca8k_write(priv, QCA8K_MDIO_MASTER_CTRL, val); ++ ret = qca8k_write(priv, QCA8K_MDIO_MASTER_CTRL, val); ++ if (ret) ++ return ret; + + return qca8k_busy_wait(priv, QCA8K_MDIO_MASTER_CTRL, + QCA8K_MDIO_MASTER_BUSY); +@@ -623,6 +640,7 @@ static int + qca8k_mdio_read(struct qca8k_priv *priv, int port, u32 regnum) + { + u32 phy, val; ++ int ret; + + if (regnum >= QCA8K_MDIO_MASTER_MAX_REG) + return -EINVAL; +@@ -635,7 +653,9 @@ qca8k_mdio_read(struct qca8k_priv *priv, + QCA8K_MDIO_MASTER_READ | QCA8K_MDIO_MASTER_PHY_ADDR(phy) | + QCA8K_MDIO_MASTER_REG_ADDR(regnum); + +- qca8k_write(priv, QCA8K_MDIO_MASTER_CTRL, val); ++ ret = qca8k_write(priv, QCA8K_MDIO_MASTER_CTRL, val); ++ if (ret) ++ return ret; + + if (qca8k_busy_wait(priv, QCA8K_MDIO_MASTER_CTRL, + QCA8K_MDIO_MASTER_BUSY)) +@@ -766,12 +786,18 @@ qca8k_setup(struct dsa_switch *ds) + QCA8K_GLOBAL_FW_CTRL0_CPU_PORT_EN); + + /* Enable MIB counters */ +- qca8k_mib_init(priv); ++ ret = qca8k_mib_init(priv); ++ if (ret) ++ dev_warn(priv->dev, "mib init failed"); + + /* Enable QCA header mode on the cpu port */ +- qca8k_write(priv, QCA8K_REG_PORT_HDR_CTRL(QCA8K_CPU_PORT), +- QCA8K_PORT_HDR_CTRL_ALL << QCA8K_PORT_HDR_CTRL_TX_S | +- QCA8K_PORT_HDR_CTRL_ALL << QCA8K_PORT_HDR_CTRL_RX_S); ++ ret = qca8k_write(priv, QCA8K_REG_PORT_HDR_CTRL(QCA8K_CPU_PORT), ++ QCA8K_PORT_HDR_CTRL_ALL << QCA8K_PORT_HDR_CTRL_TX_S | ++ QCA8K_PORT_HDR_CTRL_ALL << QCA8K_PORT_HDR_CTRL_RX_S); ++ if (ret) { ++ dev_err(priv->dev, "failed enabling QCA header mode"); ++ return ret; ++ } + + /* Disable forwarding by default on all ports */ + for (i = 0; i < QCA8K_NUM_PORTS; i++) +@@ -783,11 +809,13 @@ qca8k_setup(struct dsa_switch *ds) + qca8k_port_set_status(priv, i, 0); + + /* Forward all unknown frames to CPU port for Linux processing */ +- qca8k_write(priv, QCA8K_REG_GLOBAL_FW_CTRL1, +- BIT(0) << QCA8K_GLOBAL_FW_CTRL1_IGMP_DP_S | +- BIT(0) << QCA8K_GLOBAL_FW_CTRL1_BC_DP_S | +- BIT(0) << QCA8K_GLOBAL_FW_CTRL1_MC_DP_S | +- BIT(0) << QCA8K_GLOBAL_FW_CTRL1_UC_DP_S); ++ ret = qca8k_write(priv, QCA8K_REG_GLOBAL_FW_CTRL1, ++ BIT(0) << QCA8K_GLOBAL_FW_CTRL1_IGMP_DP_S | ++ BIT(0) << QCA8K_GLOBAL_FW_CTRL1_BC_DP_S | ++ BIT(0) << QCA8K_GLOBAL_FW_CTRL1_MC_DP_S | ++ BIT(0) << QCA8K_GLOBAL_FW_CTRL1_UC_DP_S); ++ if (ret) ++ return ret; + + /* Setup connection between CPU port & user ports */ + for (i = 0; i < QCA8K_NUM_PORTS; i++) { +@@ -815,16 +843,20 @@ qca8k_setup(struct dsa_switch *ds) + qca8k_rmw(priv, QCA8K_EGRESS_VLAN(i), + 0xfff << shift, + QCA8K_PORT_VID_DEF << shift); +- qca8k_write(priv, QCA8K_REG_PORT_VLAN_CTRL0(i), +- QCA8K_PORT_VLAN_CVID(QCA8K_PORT_VID_DEF) | +- QCA8K_PORT_VLAN_SVID(QCA8K_PORT_VID_DEF)); ++ ret = qca8k_write(priv, QCA8K_REG_PORT_VLAN_CTRL0(i), ++ QCA8K_PORT_VLAN_CVID(QCA8K_PORT_VID_DEF) | ++ QCA8K_PORT_VLAN_SVID(QCA8K_PORT_VID_DEF)); ++ if (ret) ++ return ret; + } + } + + /* Setup our port MTUs to match power on defaults */ + for (i = 0; i < QCA8K_NUM_PORTS; i++) + priv->port_mtu[i] = ETH_FRAME_LEN + ETH_FCS_LEN; +- qca8k_write(priv, QCA8K_MAX_FRAME_SIZE, ETH_FRAME_LEN + ETH_FCS_LEN); ++ ret = qca8k_write(priv, QCA8K_MAX_FRAME_SIZE, ETH_FRAME_LEN + ETH_FCS_LEN); ++ if (ret) ++ dev_warn(priv->dev, "failed setting MTU settings"); + + /* Flush the FDB table */ + qca8k_fdb_flush(priv); +@@ -1140,8 +1172,8 @@ qca8k_set_mac_eee(struct dsa_switch *ds, + { + struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv; + u32 lpi_en = QCA8K_REG_EEE_CTRL_LPI_EN(port); +- int ret = 0; + u32 reg; ++ int ret; + + mutex_lock(&priv->reg_mutex); + reg = qca8k_read(priv, QCA8K_REG_EEE_CTRL); +@@ -1154,7 +1186,7 @@ qca8k_set_mac_eee(struct dsa_switch *ds, + reg |= lpi_en; + else + reg &= ~lpi_en; +- qca8k_write(priv, QCA8K_REG_EEE_CTRL, reg); ++ ret = qca8k_write(priv, QCA8K_REG_EEE_CTRL, reg); + + exit: + mutex_unlock(&priv->reg_mutex); +@@ -1284,9 +1316,7 @@ qca8k_port_change_mtu(struct dsa_switch + mtu = priv->port_mtu[i]; + + /* Include L2 header / FCS length */ +- qca8k_write(priv, QCA8K_MAX_FRAME_SIZE, mtu + ETH_HLEN + ETH_FCS_LEN); +- +- return 0; ++ return qca8k_write(priv, QCA8K_MAX_FRAME_SIZE, mtu + ETH_HLEN + ETH_FCS_LEN); + } + + static int diff --git a/target/linux/generic/backport-5.15/735-v5.14-07-net-dsa-qca8k-handle-error-with-qca8k_rmw-operation.patch b/target/linux/generic/backport-5.15/735-v5.14-07-net-dsa-qca8k-handle-error-with-qca8k_rmw-operation.patch new file mode 100644 index 0000000000..506966f1af --- /dev/null +++ b/target/linux/generic/backport-5.15/735-v5.14-07-net-dsa-qca8k-handle-error-with-qca8k_rmw-operation.patch @@ -0,0 +1,226 @@ +From aaf421425cbdec4eb6fd75a29e65c2867b0b7bbd Mon Sep 17 00:00:00 2001 +From: Ansuel Smith <ansuelsmth@gmail.com> +Date: Fri, 14 May 2021 22:59:57 +0200 +Subject: [PATCH] net: dsa: qca8k: handle error with qca8k_rmw operation + +qca8k_rmw can fail. Rework any user to handle error values and +correctly return. Change qca8k_rmw to return the error code or 0 instead +of the reg value. The reg returned by qca8k_rmw wasn't used anywhere, +so this doesn't cause any functional change. + +Signed-off-by: Ansuel Smith <ansuelsmth@gmail.com> +Reviewed-by: Andrew Lunn <andrew@lunn.ch> +Signed-off-by: David S. Miller <davem@davemloft.net> +--- + drivers/net/dsa/qca8k.c | 133 +++++++++++++++++++++++++--------------- + 1 file changed, 83 insertions(+), 50 deletions(-) + +--- a/drivers/net/dsa/qca8k.c ++++ b/drivers/net/dsa/qca8k.c +@@ -190,12 +190,13 @@ exit: + return ret; + } + +-static u32 +-qca8k_rmw(struct qca8k_priv *priv, u32 reg, u32 mask, u32 val) ++static int ++qca8k_rmw(struct qca8k_priv *priv, u32 reg, u32 mask, u32 write_val) + { + struct mii_bus *bus = priv->bus; + u16 r1, r2, page; +- u32 ret; ++ u32 val; ++ int ret; + + qca8k_split_addr(reg, &r1, &r2, &page); + +@@ -205,10 +206,15 @@ qca8k_rmw(struct qca8k_priv *priv, u32 r + if (ret < 0) + goto exit; + +- ret = qca8k_mii_read32(bus, 0x10 | r2, r1); +- ret &= ~mask; +- ret |= val; +- qca8k_mii_write32(bus, 0x10 | r2, r1, ret); ++ val = qca8k_mii_read32(bus, 0x10 | r2, r1); ++ if (val < 0) { ++ ret = val; ++ goto exit; ++ } ++ ++ val &= ~mask; ++ val |= write_val; ++ qca8k_mii_write32(bus, 0x10 | r2, r1, val); + + exit: + mutex_unlock(&bus->mdio_lock); +@@ -216,16 +222,16 @@ exit: + return ret; + } + +-static void ++static int + qca8k_reg_set(struct qca8k_priv *priv, u32 reg, u32 val) + { +- qca8k_rmw(priv, reg, 0, val); ++ return qca8k_rmw(priv, reg, 0, val); + } + +-static void ++static int + qca8k_reg_clear(struct qca8k_priv *priv, u32 reg, u32 val) + { +- qca8k_rmw(priv, reg, val, 0); ++ return qca8k_rmw(priv, reg, val, 0); + } + + static int +@@ -570,12 +576,19 @@ qca8k_mib_init(struct qca8k_priv *priv) + int ret; + + mutex_lock(&priv->reg_mutex); +- qca8k_reg_set(priv, QCA8K_REG_MIB, QCA8K_MIB_FLUSH | QCA8K_MIB_BUSY); ++ ret = qca8k_reg_set(priv, QCA8K_REG_MIB, QCA8K_MIB_FLUSH | QCA8K_MIB_BUSY); ++ if (ret) ++ goto exit; ++ + qca8k_busy_wait(priv, QCA8K_REG_MIB, QCA8K_MIB_BUSY); +- qca8k_reg_set(priv, QCA8K_REG_MIB, QCA8K_MIB_CPU_KEEP); ++ ++ ret = qca8k_reg_set(priv, QCA8K_REG_MIB, QCA8K_MIB_CPU_KEEP); ++ if (ret) ++ goto exit; + + ret = qca8k_write(priv, QCA8K_REG_MODULE_EN, QCA8K_MODULE_EN_MIB); + ++exit: + mutex_unlock(&priv->reg_mutex); + return ret; + } +@@ -747,9 +760,8 @@ qca8k_setup_mdio_bus(struct qca8k_priv * + * a dt-overlay and driver reload changed the configuration + */ + +- qca8k_reg_clear(priv, QCA8K_MDIO_MASTER_CTRL, +- QCA8K_MDIO_MASTER_EN); +- return 0; ++ return qca8k_reg_clear(priv, QCA8K_MDIO_MASTER_CTRL, ++ QCA8K_MDIO_MASTER_EN); + } + + priv->ops.phy_read = qca8k_phy_read; +@@ -782,8 +794,12 @@ qca8k_setup(struct dsa_switch *ds) + return ret; + + /* Enable CPU Port */ +- qca8k_reg_set(priv, QCA8K_REG_GLOBAL_FW_CTRL0, +- QCA8K_GLOBAL_FW_CTRL0_CPU_PORT_EN); ++ ret = qca8k_reg_set(priv, QCA8K_REG_GLOBAL_FW_CTRL0, ++ QCA8K_GLOBAL_FW_CTRL0_CPU_PORT_EN); ++ if (ret) { ++ dev_err(priv->dev, "failed enabling CPU port"); ++ return ret; ++ } + + /* Enable MIB counters */ + ret = qca8k_mib_init(priv); +@@ -800,9 +816,12 @@ qca8k_setup(struct dsa_switch *ds) + } + + /* Disable forwarding by default on all ports */ +- for (i = 0; i < QCA8K_NUM_PORTS; i++) +- qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(i), +- QCA8K_PORT_LOOKUP_MEMBER, 0); ++ for (i = 0; i < QCA8K_NUM_PORTS; i++) { ++ ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(i), ++ QCA8K_PORT_LOOKUP_MEMBER, 0); ++ if (ret) ++ return ret; ++ } + + /* Disable MAC by default on all ports */ + for (i = 1; i < QCA8K_NUM_PORTS; i++) +@@ -821,28 +840,37 @@ qca8k_setup(struct dsa_switch *ds) + for (i = 0; i < QCA8K_NUM_PORTS; i++) { + /* CPU port gets connected to all user ports of the switch */ + if (dsa_is_cpu_port(ds, i)) { +- qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(QCA8K_CPU_PORT), +- QCA8K_PORT_LOOKUP_MEMBER, dsa_user_ports(ds)); ++ ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(QCA8K_CPU_PORT), ++ QCA8K_PORT_LOOKUP_MEMBER, dsa_user_ports(ds)); ++ if (ret) ++ return ret; + } + + /* Individual user ports get connected to CPU port only */ + if (dsa_is_user_port(ds, i)) { + int shift = 16 * (i % 2); + +- qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(i), +- QCA8K_PORT_LOOKUP_MEMBER, +- BIT(QCA8K_CPU_PORT)); ++ ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(i), ++ QCA8K_PORT_LOOKUP_MEMBER, ++ BIT(QCA8K_CPU_PORT)); ++ if (ret) ++ return ret; + + /* Enable ARP Auto-learning by default */ +- qca8k_reg_set(priv, QCA8K_PORT_LOOKUP_CTRL(i), +- QCA8K_PORT_LOOKUP_LEARN); ++ ret = qca8k_reg_set(priv, QCA8K_PORT_LOOKUP_CTRL(i), ++ QCA8K_PORT_LOOKUP_LEARN); ++ if (ret) ++ return ret; + + /* For port based vlans to work we need to set the + * default egress vid + */ +- qca8k_rmw(priv, QCA8K_EGRESS_VLAN(i), +- 0xfff << shift, +- QCA8K_PORT_VID_DEF << shift); ++ ret = qca8k_rmw(priv, QCA8K_EGRESS_VLAN(i), ++ 0xfff << shift, ++ QCA8K_PORT_VID_DEF << shift); ++ if (ret) ++ return ret; ++ + ret = qca8k_write(priv, QCA8K_REG_PORT_VLAN_CTRL0(i), + QCA8K_PORT_VLAN_CVID(QCA8K_PORT_VID_DEF) | + QCA8K_PORT_VLAN_SVID(QCA8K_PORT_VID_DEF)); +@@ -1234,7 +1262,7 @@ qca8k_port_bridge_join(struct dsa_switch + { + struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv; + int port_mask = BIT(QCA8K_CPU_PORT); +- int i; ++ int i, ret; + + for (i = 1; i < QCA8K_NUM_PORTS; i++) { + if (dsa_to_port(ds, i)->bridge_dev != br) +@@ -1242,17 +1270,20 @@ qca8k_port_bridge_join(struct dsa_switch + /* Add this port to the portvlan mask of the other ports + * in the bridge + */ +- qca8k_reg_set(priv, +- QCA8K_PORT_LOOKUP_CTRL(i), +- BIT(port)); ++ ret = qca8k_reg_set(priv, ++ QCA8K_PORT_LOOKUP_CTRL(i), ++ BIT(port)); ++ if (ret) ++ return ret; + if (i != port) + port_mask |= BIT(i); + } ++ + /* Add all other ports to this ports portvlan mask */ +- qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port), +- QCA8K_PORT_LOOKUP_MEMBER, port_mask); ++ ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port), ++ QCA8K_PORT_LOOKUP_MEMBER, port_mask); + +- return 0; ++ return ret; + } + + static void diff --git a/target/linux/generic/backport-5.15/735-v5.14-08-net-dsa-qca8k-handle-error-from-qca8k_busy_wait.patch b/target/linux/generic/backport-5.15/735-v5.14-08-net-dsa-qca8k-handle-error-from-qca8k_busy_wait.patch new file mode 100644 index 0000000000..360ce1d947 --- /dev/null +++ b/target/linux/generic/backport-5.15/735-v5.14-08-net-dsa-qca8k-handle-error-from-qca8k_busy_wait.patch @@ -0,0 +1,66 @@ +From b7c818d194927bdc60ed15db55bb8654496a36b7 Mon Sep 17 00:00:00 2001 +From: Ansuel Smith <ansuelsmth@gmail.com> +Date: Fri, 14 May 2021 22:59:58 +0200 +Subject: [PATCH] net: dsa: qca8k: handle error from qca8k_busy_wait + +Propagate errors from qca8k_busy_wait instead of hardcoding return +value. + +Signed-off-by: Ansuel Smith <ansuelsmth@gmail.com> +Reviewed-by: Andrew Lunn <andrew@lunn.ch> +Signed-off-by: David S. Miller <davem@davemloft.net> +--- + drivers/net/dsa/qca8k.c | 21 +++++++++++++-------- + 1 file changed, 13 insertions(+), 8 deletions(-) + +--- a/drivers/net/dsa/qca8k.c ++++ b/drivers/net/dsa/qca8k.c +@@ -388,8 +388,9 @@ qca8k_fdb_access(struct qca8k_priv *priv + return ret; + + /* wait for completion */ +- if (qca8k_busy_wait(priv, QCA8K_REG_ATU_FUNC, QCA8K_ATU_FUNC_BUSY)) +- return -1; ++ ret = qca8k_busy_wait(priv, QCA8K_REG_ATU_FUNC, QCA8K_ATU_FUNC_BUSY); ++ if (ret) ++ return ret; + + /* Check for table full violation when adding an entry */ + if (cmd == QCA8K_FDB_LOAD) { +@@ -468,8 +469,9 @@ qca8k_vlan_access(struct qca8k_priv *pri + return ret; + + /* wait for completion */ +- if (qca8k_busy_wait(priv, QCA8K_REG_VTU_FUNC1, QCA8K_VTU_FUNC1_BUSY)) +- return -ETIMEDOUT; ++ ret = qca8k_busy_wait(priv, QCA8K_REG_VTU_FUNC1, QCA8K_VTU_FUNC1_BUSY); ++ if (ret) ++ return ret; + + /* Check for table full violation when adding an entry */ + if (cmd == QCA8K_VLAN_LOAD) { +@@ -580,7 +582,9 @@ qca8k_mib_init(struct qca8k_priv *priv) + if (ret) + goto exit; + +- qca8k_busy_wait(priv, QCA8K_REG_MIB, QCA8K_MIB_BUSY); ++ ret = qca8k_busy_wait(priv, QCA8K_REG_MIB, QCA8K_MIB_BUSY); ++ if (ret) ++ goto exit; + + ret = qca8k_reg_set(priv, QCA8K_REG_MIB, QCA8K_MIB_CPU_KEEP); + if (ret) +@@ -670,9 +674,10 @@ qca8k_mdio_read(struct qca8k_priv *priv, + if (ret) + return ret; + +- if (qca8k_busy_wait(priv, QCA8K_MDIO_MASTER_CTRL, +- QCA8K_MDIO_MASTER_BUSY)) +- return -ETIMEDOUT; ++ ret = qca8k_busy_wait(priv, QCA8K_MDIO_MASTER_CTRL, ++ QCA8K_MDIO_MASTER_BUSY); ++ if (ret) ++ return ret; + + val = qca8k_read(priv, QCA8K_MDIO_MASTER_CTRL); + if (val < 0) diff --git a/target/linux/generic/backport-5.15/735-v5.14-09-net-dsa-qca8k-add-support-for-qca8327-switch.patch b/target/linux/generic/backport-5.15/735-v5.14-09-net-dsa-qca8k-add-support-for-qca8327-switch.patch new file mode 100644 index 0000000000..72305850ca --- /dev/null +++ b/target/linux/generic/backport-5.15/735-v5.14-09-net-dsa-qca8k-add-support-for-qca8327-switch.patch @@ -0,0 +1,96 @@ +From 6e82a457e06252b59102486767539cc9c2aba60b Mon Sep 17 00:00:00 2001 +From: Ansuel Smith <ansuelsmth@gmail.com> +Date: Fri, 14 May 2021 22:59:59 +0200 +Subject: [PATCH] net: dsa: qca8k: add support for qca8327 switch + +qca8327 switch is a low tier version of the more recent qca8337. +It does share the same regs used by the qca8k driver and can be +supported with minimal change. + +Signed-off-by: Ansuel Smith <ansuelsmth@gmail.com> +Reviewed-by: Andrew Lunn <andrew@lunn.ch> +Reviewed-by: Vladimir Oltean <olteanv@gmail.com> +Reviewed-by: Florian Fainelli <f.fainelli@gmail.com> +Signed-off-by: David S. Miller <davem@davemloft.net> +--- + drivers/net/dsa/qca8k.c | 23 ++++++++++++++++++++--- + drivers/net/dsa/qca8k.h | 6 ++++++ + 2 files changed, 26 insertions(+), 3 deletions(-) + +--- a/drivers/net/dsa/qca8k.c ++++ b/drivers/net/dsa/qca8k.c +@@ -1533,6 +1533,7 @@ static const struct dsa_switch_ops qca8k + static int + qca8k_sw_probe(struct mdio_device *mdiodev) + { ++ const struct qca8k_match_data *data; + struct qca8k_priv *priv; + u32 id; + +@@ -1560,6 +1561,11 @@ qca8k_sw_probe(struct mdio_device *mdiod + gpiod_set_value_cansleep(priv->reset_gpio, 0); + } + ++ /* get the switches ID from the compatible */ ++ data = of_device_get_match_data(&mdiodev->dev); ++ if (!data) ++ return -ENODEV; ++ + /* read the switches ID register */ + id = qca8k_read(priv, QCA8K_REG_MASK_CTRL); + if (id < 0) +@@ -1567,8 +1573,10 @@ qca8k_sw_probe(struct mdio_device *mdiod + + id >>= QCA8K_MASK_CTRL_ID_S; + id &= QCA8K_MASK_CTRL_ID_M; +- if (id != QCA8K_ID_QCA8337) ++ if (id != data->id) { ++ dev_err(&mdiodev->dev, "Switch id detected %x but expected %x", id, data->id); + return -ENODEV; ++ } + + priv->ds = devm_kzalloc(&mdiodev->dev, sizeof(*priv->ds), GFP_KERNEL); + if (!priv->ds) +@@ -1634,9 +1642,18 @@ static int qca8k_resume(struct device *d + static SIMPLE_DEV_PM_OPS(qca8k_pm_ops, + qca8k_suspend, qca8k_resume); + ++static const struct qca8k_match_data qca832x = { ++ .id = QCA8K_ID_QCA8327, ++}; ++ ++static const struct qca8k_match_data qca833x = { ++ .id = QCA8K_ID_QCA8337, ++}; ++ + static const struct of_device_id qca8k_of_match[] = { +- { .compatible = "qca,qca8334" }, +- { .compatible = "qca,qca8337" }, ++ { .compatible = "qca,qca8327", .data = &qca832x }, ++ { .compatible = "qca,qca8334", .data = &qca833x }, ++ { .compatible = "qca,qca8337", .data = &qca833x }, + { /* sentinel */ }, + }; + +--- a/drivers/net/dsa/qca8k.h ++++ b/drivers/net/dsa/qca8k.h +@@ -15,6 +15,8 @@ + #define QCA8K_NUM_PORTS 7 + #define QCA8K_MAX_MTU 9000 + ++#define PHY_ID_QCA8327 0x004dd034 ++#define QCA8K_ID_QCA8327 0x12 + #define PHY_ID_QCA8337 0x004dd036 + #define QCA8K_ID_QCA8337 0x13 + +@@ -213,6 +215,10 @@ struct ar8xxx_port_status { + int enabled; + }; + ++struct qca8k_match_data { ++ u8 id; ++}; ++ + struct qca8k_priv { + struct regmap *regmap; + struct mii_bus *bus; diff --git a/target/linux/generic/backport-5.15/735-v5.14-10-devicetree-net-dsa-qca8k-Document-new-compatible-qca.patch b/target/linux/generic/backport-5.15/735-v5.14-10-devicetree-net-dsa-qca8k-Document-new-compatible-qca.patch new file mode 100644 index 0000000000..3c4a14bd0b --- /dev/null +++ b/target/linux/generic/backport-5.15/735-v5.14-10-devicetree-net-dsa-qca8k-Document-new-compatible-qca.patch @@ -0,0 +1,26 @@ +From 227a9ffc1bc77037339530607fe129af3824620e Mon Sep 17 00:00:00 2001 +From: Ansuel Smith <ansuelsmth@gmail.com> +Date: Fri, 14 May 2021 23:00:00 +0200 +Subject: [PATCH] devicetree: net: dsa: qca8k: Document new compatible qca8327 + +Add support for qca8327 in the compatible list. + +Signed-off-by: Ansuel Smith <ansuelsmth@gmail.com> +Reviewed-by: Andrew Lunn <andrew@lunn.ch> +Acked-by: Rob Herring <robh@kernel.org> +Reviewed-by: Florian Fainelli <f.fainelli@gmail.com> +Signed-off-by: David S. Miller <davem@davemloft.net> +--- + Documentation/devicetree/bindings/net/dsa/qca8k.txt | 1 + + 1 file changed, 1 insertion(+) + +--- a/Documentation/devicetree/bindings/net/dsa/qca8k.txt ++++ b/Documentation/devicetree/bindings/net/dsa/qca8k.txt +@@ -3,6 +3,7 @@ + Required properties: + + - compatible: should be one of: ++ "qca,qca8327" + "qca,qca8334" + "qca,qca8337" + diff --git a/target/linux/generic/backport-5.15/735-v5.14-11-net-dsa-qca8k-add-priority-tweak-to-qca8337-switch.patch b/target/linux/generic/backport-5.15/735-v5.14-11-net-dsa-qca8k-add-priority-tweak-to-qca8337-switch.patch new file mode 100644 index 0000000000..cd3050ef71 --- /dev/null +++ b/target/linux/generic/backport-5.15/735-v5.14-11-net-dsa-qca8k-add-priority-tweak-to-qca8337-switch.patch @@ -0,0 +1,130 @@ +From 83a3ceb39b2495171aabe9446271b94c678354f3 Mon Sep 17 00:00:00 2001 +From: Ansuel Smith <ansuelsmth@gmail.com> +Date: Fri, 14 May 2021 23:00:01 +0200 +Subject: [PATCH] net: dsa: qca8k: add priority tweak to qca8337 switch + +The port 5 of the qca8337 have some problem in flood condition. The +original legacy driver had some specific buffer and priority settings +for the different port suggested by the QCA switch team. Add this +missing settings to improve switch stability under load condition. +The packet priority tweak is only needed for the qca8337 switch and +other qca8k switch are not affected. + +Signed-off-by: Ansuel Smith <ansuelsmth@gmail.com> +Signed-off-by: David S. Miller <davem@davemloft.net> +--- + drivers/net/dsa/qca8k.c | 47 +++++++++++++++++++++++++++++++++++++++++ + drivers/net/dsa/qca8k.h | 25 ++++++++++++++++++++++ + 2 files changed, 72 insertions(+) + +--- a/drivers/net/dsa/qca8k.c ++++ b/drivers/net/dsa/qca8k.c +@@ -779,6 +779,7 @@ qca8k_setup(struct dsa_switch *ds) + { + struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv; + int ret, i; ++ u32 mask; + + /* Make sure that port 0 is the cpu port */ + if (!dsa_is_cpu_port(ds, 0)) { +@@ -884,6 +885,51 @@ qca8k_setup(struct dsa_switch *ds) + } + } + ++ /* The port 5 of the qca8337 have some problem in flood condition. The ++ * original legacy driver had some specific buffer and priority settings ++ * for the different port suggested by the QCA switch team. Add this ++ * missing settings to improve switch stability under load condition. ++ * This problem is limited to qca8337 and other qca8k switch are not affected. ++ */ ++ if (priv->switch_id == QCA8K_ID_QCA8337) { ++ for (i = 0; i < QCA8K_NUM_PORTS; i++) { ++ switch (i) { ++ /* The 2 CPU port and port 5 requires some different ++ * priority than any other ports. ++ */ ++ case 0: ++ case 5: ++ case 6: ++ mask = QCA8K_PORT_HOL_CTRL0_EG_PRI0(0x3) | ++ QCA8K_PORT_HOL_CTRL0_EG_PRI1(0x4) | ++ QCA8K_PORT_HOL_CTRL0_EG_PRI2(0x4) | ++ QCA8K_PORT_HOL_CTRL0_EG_PRI3(0x4) | ++ QCA8K_PORT_HOL_CTRL0_EG_PRI4(0x6) | ++ QCA8K_PORT_HOL_CTRL0_EG_PRI5(0x8) | ++ QCA8K_PORT_HOL_CTRL0_EG_PORT(0x1e); ++ break; ++ default: ++ mask = QCA8K_PORT_HOL_CTRL0_EG_PRI0(0x3) | ++ QCA8K_PORT_HOL_CTRL0_EG_PRI1(0x4) | ++ QCA8K_PORT_HOL_CTRL0_EG_PRI2(0x6) | ++ QCA8K_PORT_HOL_CTRL0_EG_PRI3(0x8) | ++ QCA8K_PORT_HOL_CTRL0_EG_PORT(0x19); ++ } ++ qca8k_write(priv, QCA8K_REG_PORT_HOL_CTRL0(i), mask); ++ ++ mask = QCA8K_PORT_HOL_CTRL1_ING(0x6) | ++ QCA8K_PORT_HOL_CTRL1_EG_PRI_BUF_EN | ++ QCA8K_PORT_HOL_CTRL1_EG_PORT_BUF_EN | ++ QCA8K_PORT_HOL_CTRL1_WRED_EN; ++ qca8k_rmw(priv, QCA8K_REG_PORT_HOL_CTRL1(i), ++ QCA8K_PORT_HOL_CTRL1_ING_BUF | ++ QCA8K_PORT_HOL_CTRL1_EG_PRI_BUF_EN | ++ QCA8K_PORT_HOL_CTRL1_EG_PORT_BUF_EN | ++ QCA8K_PORT_HOL_CTRL1_WRED_EN, ++ mask); ++ } ++ } ++ + /* Setup our port MTUs to match power on defaults */ + for (i = 0; i < QCA8K_NUM_PORTS; i++) + priv->port_mtu[i] = ETH_FRAME_LEN + ETH_FCS_LEN; +@@ -1578,6 +1624,7 @@ qca8k_sw_probe(struct mdio_device *mdiod + return -ENODEV; + } + ++ priv->switch_id = id; + priv->ds = devm_kzalloc(&mdiodev->dev, sizeof(*priv->ds), GFP_KERNEL); + if (!priv->ds) + return -ENOMEM; +--- a/drivers/net/dsa/qca8k.h ++++ b/drivers/net/dsa/qca8k.h +@@ -168,6 +168,30 @@ + #define QCA8K_PORT_LOOKUP_STATE GENMASK(18, 16) + #define QCA8K_PORT_LOOKUP_LEARN BIT(20) + ++#define QCA8K_REG_PORT_HOL_CTRL0(_i) (0x970 + (_i) * 0x8) ++#define QCA8K_PORT_HOL_CTRL0_EG_PRI0_BUF GENMASK(3, 0) ++#define QCA8K_PORT_HOL_CTRL0_EG_PRI0(x) ((x) << 0) ++#define QCA8K_PORT_HOL_CTRL0_EG_PRI1_BUF GENMASK(7, 4) ++#define QCA8K_PORT_HOL_CTRL0_EG_PRI1(x) ((x) << 4) ++#define QCA8K_PORT_HOL_CTRL0_EG_PRI2_BUF GENMASK(11, 8) ++#define QCA8K_PORT_HOL_CTRL0_EG_PRI2(x) ((x) << 8) ++#define QCA8K_PORT_HOL_CTRL0_EG_PRI3_BUF GENMASK(15, 12) ++#define QCA8K_PORT_HOL_CTRL0_EG_PRI3(x) ((x) << 12) ++#define QCA8K_PORT_HOL_CTRL0_EG_PRI4_BUF GENMASK(19, 16) ++#define QCA8K_PORT_HOL_CTRL0_EG_PRI4(x) ((x) << 16) ++#define QCA8K_PORT_HOL_CTRL0_EG_PRI5_BUF GENMASK(23, 20) ++#define QCA8K_PORT_HOL_CTRL0_EG_PRI5(x) ((x) << 20) ++#define QCA8K_PORT_HOL_CTRL0_EG_PORT_BUF GENMASK(29, 24) ++#define QCA8K_PORT_HOL_CTRL0_EG_PORT(x) ((x) << 24) ++ ++#define QCA8K_REG_PORT_HOL_CTRL1(_i) (0x974 + (_i) * 0x8) ++#define QCA8K_PORT_HOL_CTRL1_ING_BUF GENMASK(3, 0) ++#define QCA8K_PORT_HOL_CTRL1_ING(x) ((x) << 0) ++#define QCA8K_PORT_HOL_CTRL1_EG_PRI_BUF_EN BIT(6) ++#define QCA8K_PORT_HOL_CTRL1_EG_PORT_BUF_EN BIT(7) ++#define QCA8K_PORT_HOL_CTRL1_WRED_EN BIT(8) ++#define QCA8K_PORT_HOL_CTRL1_EG_MIRROR_EN BIT(16) ++ + /* Pkt edit registers */ + #define QCA8K_EGRESS_VLAN(x) (0x0c70 + (4 * (x / 2))) + +@@ -220,6 +244,7 @@ struct qca8k_match_data { + }; + + struct qca8k_priv { ++ u8 switch_id; + struct regmap *regmap; + struct mii_bus *bus; + struct ar8xxx_port_status port_sts[QCA8K_NUM_PORTS]; diff --git a/target/linux/generic/backport-5.15/735-v5.14-12-net-dsa-qca8k-limit-port5-delay-to-qca8337.patch b/target/linux/generic/backport-5.15/735-v5.14-12-net-dsa-qca8k-limit-port5-delay-to-qca8337.patch new file mode 100644 index 0000000000..d25edbb1aa --- /dev/null +++ b/target/linux/generic/backport-5.15/735-v5.14-12-net-dsa-qca8k-limit-port5-delay-to-qca8337.patch @@ -0,0 +1,31 @@ +From 5bf9ff3b9fb5ecb67a1a3517b26db3a00f2a2f11 Mon Sep 17 00:00:00 2001 +From: Ansuel Smith <ansuelsmth@gmail.com> +Date: Fri, 14 May 2021 23:00:02 +0200 +Subject: [PATCH] net: dsa: qca8k: limit port5 delay to qca8337 + +Limit port5 rx delay to qca8337. This is taken from the legacy QSDK code +that limits the rx delay on port5 to only this particular switch version, +on other switch only the tx and rx delay for port0 are needed. + +Signed-off-by: Ansuel Smith <ansuelsmth@gmail.com> +Reviewed-by: Andrew Lunn <andrew@lunn.ch> +Signed-off-by: David S. Miller <davem@davemloft.net> +--- + drivers/net/dsa/qca8k.c | 6 ++++-- + 1 file changed, 4 insertions(+), 2 deletions(-) + +--- a/drivers/net/dsa/qca8k.c ++++ b/drivers/net/dsa/qca8k.c +@@ -1003,8 +1003,10 @@ qca8k_phylink_mac_config(struct dsa_swit + QCA8K_PORT_PAD_RGMII_EN | + QCA8K_PORT_PAD_RGMII_TX_DELAY(QCA8K_MAX_DELAY) | + QCA8K_PORT_PAD_RGMII_RX_DELAY(QCA8K_MAX_DELAY)); +- qca8k_write(priv, QCA8K_REG_PORT5_PAD_CTRL, +- QCA8K_PORT_PAD_RGMII_RX_DELAY_EN); ++ /* QCA8337 requires to set rgmii rx delay */ ++ if (priv->switch_id == QCA8K_ID_QCA8337) ++ qca8k_write(priv, QCA8K_REG_PORT5_PAD_CTRL, ++ QCA8K_PORT_PAD_RGMII_RX_DELAY_EN); + break; + case PHY_INTERFACE_MODE_SGMII: + case PHY_INTERFACE_MODE_1000BASEX: diff --git a/target/linux/generic/backport-5.15/735-v5.14-13-net-dsa-qca8k-add-GLOBAL_FC-settings-needed-for-qca8.patch b/target/linux/generic/backport-5.15/735-v5.14-13-net-dsa-qca8k-add-GLOBAL_FC-settings-needed-for-qca8.patch new file mode 100644 index 0000000000..2b393d242a --- /dev/null +++ b/target/linux/generic/backport-5.15/735-v5.14-13-net-dsa-qca8k-add-GLOBAL_FC-settings-needed-for-qca8.patch @@ -0,0 +1,48 @@ +From 0fc57e4b5e39461fc0a54aae0afe4241363a7267 Mon Sep 17 00:00:00 2001 +From: Ansuel Smith <ansuelsmth@gmail.com> +Date: Fri, 14 May 2021 23:00:03 +0200 +Subject: [PATCH] net: dsa: qca8k: add GLOBAL_FC settings needed for qca8327 + +Switch qca8327 needs special settings for the GLOBAL_FC_THRES regs. + +Signed-off-by: Ansuel Smith <ansuelsmth@gmail.com> +Signed-off-by: David S. Miller <davem@davemloft.net> +--- + drivers/net/dsa/qca8k.c | 10 ++++++++++ + drivers/net/dsa/qca8k.h | 6 ++++++ + 2 files changed, 16 insertions(+) + +--- a/drivers/net/dsa/qca8k.c ++++ b/drivers/net/dsa/qca8k.c +@@ -930,6 +930,16 @@ qca8k_setup(struct dsa_switch *ds) + } + } + ++ /* Special GLOBAL_FC_THRESH value are needed for ar8327 switch */ ++ if (priv->switch_id == QCA8K_ID_QCA8327) { ++ mask = QCA8K_GLOBAL_FC_GOL_XON_THRES(288) | ++ QCA8K_GLOBAL_FC_GOL_XOFF_THRES(496); ++ qca8k_rmw(priv, QCA8K_REG_GLOBAL_FC_THRESH, ++ QCA8K_GLOBAL_FC_GOL_XON_THRES_S | ++ QCA8K_GLOBAL_FC_GOL_XOFF_THRES_S, ++ mask); ++ } ++ + /* Setup our port MTUs to match power on defaults */ + for (i = 0; i < QCA8K_NUM_PORTS; i++) + priv->port_mtu[i] = ETH_FRAME_LEN + ETH_FCS_LEN; +--- a/drivers/net/dsa/qca8k.h ++++ b/drivers/net/dsa/qca8k.h +@@ -168,6 +168,12 @@ + #define QCA8K_PORT_LOOKUP_STATE GENMASK(18, 16) + #define QCA8K_PORT_LOOKUP_LEARN BIT(20) + ++#define QCA8K_REG_GLOBAL_FC_THRESH 0x800 ++#define QCA8K_GLOBAL_FC_GOL_XON_THRES(x) ((x) << 16) ++#define QCA8K_GLOBAL_FC_GOL_XON_THRES_S GENMASK(24, 16) ++#define QCA8K_GLOBAL_FC_GOL_XOFF_THRES(x) ((x) << 0) ++#define QCA8K_GLOBAL_FC_GOL_XOFF_THRES_S GENMASK(8, 0) ++ + #define QCA8K_REG_PORT_HOL_CTRL0(_i) (0x970 + (_i) * 0x8) + #define QCA8K_PORT_HOL_CTRL0_EG_PRI0_BUF GENMASK(3, 0) + #define QCA8K_PORT_HOL_CTRL0_EG_PRI0(x) ((x) << 0) diff --git a/target/linux/generic/backport-5.15/735-v5.14-14-net-dsa-qca8k-add-support-for-switch-rev.patch b/target/linux/generic/backport-5.15/735-v5.14-14-net-dsa-qca8k-add-support-for-switch-rev.patch new file mode 100644 index 0000000000..ed9b8188de --- /dev/null +++ b/target/linux/generic/backport-5.15/735-v5.14-14-net-dsa-qca8k-add-support-for-switch-rev.patch @@ -0,0 +1,114 @@ +From 95ffeaf18b3bb90eeef52cbf7d79ccc9d0345ff5 Mon Sep 17 00:00:00 2001 +From: Ansuel Smith <ansuelsmth@gmail.com> +Date: Fri, 14 May 2021 23:00:04 +0200 +Subject: [PATCH] net: dsa: qca8k: add support for switch rev + +qca8k internal phy driver require some special debug value to be set +based on the switch revision. Rework the switch id read function to +also read the chip revision. + +Signed-off-by: Ansuel Smith <ansuelsmth@gmail.com> +Reviewed-by: Florian Fainelli <f.fainelli@gmail.com> +Signed-off-by: David S. Miller <davem@davemloft.net> +--- + drivers/net/dsa/qca8k.c | 53 ++++++++++++++++++++++++++--------------- + drivers/net/dsa/qca8k.h | 7 ++++-- + 2 files changed, 39 insertions(+), 21 deletions(-) + +--- a/drivers/net/dsa/qca8k.c ++++ b/drivers/net/dsa/qca8k.c +@@ -1588,12 +1588,40 @@ static const struct dsa_switch_ops qca8k + .phylink_mac_link_up = qca8k_phylink_mac_link_up, + }; + ++static int qca8k_read_switch_id(struct qca8k_priv *priv) ++{ ++ const struct qca8k_match_data *data; ++ u32 val; ++ u8 id; ++ ++ /* get the switches ID from the compatible */ ++ data = of_device_get_match_data(priv->dev); ++ if (!data) ++ return -ENODEV; ++ ++ val = qca8k_read(priv, QCA8K_REG_MASK_CTRL); ++ if (val < 0) ++ return -ENODEV; ++ ++ id = QCA8K_MASK_CTRL_DEVICE_ID(val & QCA8K_MASK_CTRL_DEVICE_ID_MASK); ++ if (id != data->id) { ++ dev_err(priv->dev, "Switch id detected %x but expected %x", id, data->id); ++ return -ENODEV; ++ } ++ ++ priv->switch_id = id; ++ ++ /* Save revision to communicate to the internal PHY driver */ ++ priv->switch_revision = (val & QCA8K_MASK_CTRL_REV_ID_MASK); ++ ++ return 0; ++} ++ + static int + qca8k_sw_probe(struct mdio_device *mdiodev) + { +- const struct qca8k_match_data *data; + struct qca8k_priv *priv; +- u32 id; ++ int ret; + + /* allocate the private data struct so that we can probe the switches + * ID register +@@ -1619,24 +1647,11 @@ qca8k_sw_probe(struct mdio_device *mdiod + gpiod_set_value_cansleep(priv->reset_gpio, 0); + } + +- /* get the switches ID from the compatible */ +- data = of_device_get_match_data(&mdiodev->dev); +- if (!data) +- return -ENODEV; ++ /* Check the detected switch id */ ++ ret = qca8k_read_switch_id(priv); ++ if (ret) ++ return ret; + +- /* read the switches ID register */ +- id = qca8k_read(priv, QCA8K_REG_MASK_CTRL); +- if (id < 0) +- return id; +- +- id >>= QCA8K_MASK_CTRL_ID_S; +- id &= QCA8K_MASK_CTRL_ID_M; +- if (id != data->id) { +- dev_err(&mdiodev->dev, "Switch id detected %x but expected %x", id, data->id); +- return -ENODEV; +- } +- +- priv->switch_id = id; + priv->ds = devm_kzalloc(&mdiodev->dev, sizeof(*priv->ds), GFP_KERNEL); + if (!priv->ds) + return -ENOMEM; +--- a/drivers/net/dsa/qca8k.h ++++ b/drivers/net/dsa/qca8k.h +@@ -30,8 +30,10 @@ + + /* Global control registers */ + #define QCA8K_REG_MASK_CTRL 0x000 +-#define QCA8K_MASK_CTRL_ID_M 0xff +-#define QCA8K_MASK_CTRL_ID_S 8 ++#define QCA8K_MASK_CTRL_REV_ID_MASK GENMASK(7, 0) ++#define QCA8K_MASK_CTRL_REV_ID(x) ((x) >> 0) ++#define QCA8K_MASK_CTRL_DEVICE_ID_MASK GENMASK(15, 8) ++#define QCA8K_MASK_CTRL_DEVICE_ID(x) ((x) >> 8) + #define QCA8K_REG_PORT0_PAD_CTRL 0x004 + #define QCA8K_REG_PORT5_PAD_CTRL 0x008 + #define QCA8K_REG_PORT6_PAD_CTRL 0x00c +@@ -251,6 +253,7 @@ struct qca8k_match_data { + + struct qca8k_priv { + u8 switch_id; ++ u8 switch_revision; + struct regmap *regmap; + struct mii_bus *bus; + struct ar8xxx_port_status port_sts[QCA8K_NUM_PORTS]; diff --git a/target/linux/generic/backport-5.15/735-v5.14-15-net-dsa-qca8k-add-ethernet-ports-fallback-to-setup_m.patch b/target/linux/generic/backport-5.15/735-v5.14-15-net-dsa-qca8k-add-ethernet-ports-fallback-to-setup_m.patch new file mode 100644 index 0000000000..629cb324e0 --- /dev/null +++ b/target/linux/generic/backport-5.15/735-v5.14-15-net-dsa-qca8k-add-ethernet-ports-fallback-to-setup_m.patch @@ -0,0 +1,28 @@ +From 1ee0591a1093c2448642c33433483e9260275f7b Mon Sep 17 00:00:00 2001 +From: Ansuel Smith <ansuelsmth@gmail.com> +Date: Fri, 14 May 2021 23:00:05 +0200 +Subject: [PATCH] net: dsa: qca8k: add ethernet-ports fallback to + setup_mdio_bus + +Dsa now also supports ethernet-ports. Add this new binding as a fallback +if the ports node can't be found. + +Signed-off-by: Ansuel Smith <ansuelsmth@gmail.com> +Reviewed-by: Andrew Lunn <andrew@lunn.ch> +Signed-off-by: David S. Miller <davem@davemloft.net> +--- + drivers/net/dsa/qca8k.c | 3 +++ + 1 file changed, 3 insertions(+) + +--- a/drivers/net/dsa/qca8k.c ++++ b/drivers/net/dsa/qca8k.c +@@ -719,6 +719,9 @@ qca8k_setup_mdio_bus(struct qca8k_priv * + + ports = of_get_child_by_name(priv->dev->of_node, "ports"); + if (!ports) ++ ports = of_get_child_by_name(priv->dev->of_node, "ethernet-ports"); ++ ++ if (!ports) + return -EINVAL; + + for_each_available_child_of_node(ports, port) { diff --git a/target/linux/generic/backport-5.15/735-v5.14-16-net-dsa-qca8k-make-rgmii-delay-configurable.patch b/target/linux/generic/backport-5.15/735-v5.14-16-net-dsa-qca8k-make-rgmii-delay-configurable.patch new file mode 100644 index 0000000000..6dc2dc6e3e --- /dev/null +++ b/target/linux/generic/backport-5.15/735-v5.14-16-net-dsa-qca8k-make-rgmii-delay-configurable.patch @@ -0,0 +1,188 @@ +From e4b9977cee1583da38a6e9118078bb728aaccf7b Mon Sep 17 00:00:00 2001 +From: Ansuel Smith <ansuelsmth@gmail.com> +Date: Fri, 14 May 2021 23:00:06 +0200 +Subject: [PATCH] net: dsa: qca8k: make rgmii delay configurable + +The legacy qsdk code used a different delay instead of the max value. +Qsdk use 1 ns for rx and 2 ns for tx. Make these values configurable +using the standard rx/tx-internal-delay-ps ethernet binding and apply +qsdk values by default. The connected gmac doesn't add any delay so no +additional delay is added to tx/rx. +On this switch the delay is actually in ns so value should be in the +1000 order. Any value converted from ps to ns by dividing it by 1000 +as the switch max value for delay is 3ns. + +Signed-off-by: Ansuel Smith <ansuelsmth@gmail.com> +Signed-off-by: David S. Miller <davem@davemloft.net> +--- + drivers/net/dsa/qca8k.c | 82 ++++++++++++++++++++++++++++++++++++++++- + drivers/net/dsa/qca8k.h | 11 +++--- + 2 files changed, 86 insertions(+), 7 deletions(-) + +--- a/drivers/net/dsa/qca8k.c ++++ b/drivers/net/dsa/qca8k.c +@@ -778,6 +778,68 @@ qca8k_setup_mdio_bus(struct qca8k_priv * + } + + static int ++qca8k_setup_of_rgmii_delay(struct qca8k_priv *priv) ++{ ++ struct device_node *port_dn; ++ phy_interface_t mode; ++ struct dsa_port *dp; ++ u32 val; ++ ++ /* CPU port is already checked */ ++ dp = dsa_to_port(priv->ds, 0); ++ ++ port_dn = dp->dn; ++ ++ /* Check if port 0 is set to the correct type */ ++ of_get_phy_mode(port_dn, &mode); ++ if (mode != PHY_INTERFACE_MODE_RGMII_ID && ++ mode != PHY_INTERFACE_MODE_RGMII_RXID && ++ mode != PHY_INTERFACE_MODE_RGMII_TXID) { ++ return 0; ++ } ++ ++ switch (mode) { ++ case PHY_INTERFACE_MODE_RGMII_ID: ++ case PHY_INTERFACE_MODE_RGMII_RXID: ++ if (of_property_read_u32(port_dn, "rx-internal-delay-ps", &val)) ++ val = 2; ++ else ++ /* Switch regs accept value in ns, convert ps to ns */ ++ val = val / 1000; ++ ++ if (val > QCA8K_MAX_DELAY) { ++ dev_err(priv->dev, "rgmii rx delay is limited to a max value of 3ns, setting to the max value"); ++ val = 3; ++ } ++ ++ priv->rgmii_rx_delay = val; ++ /* Stop here if we need to check only for rx delay */ ++ if (mode != PHY_INTERFACE_MODE_RGMII_ID) ++ break; ++ ++ fallthrough; ++ case PHY_INTERFACE_MODE_RGMII_TXID: ++ if (of_property_read_u32(port_dn, "tx-internal-delay-ps", &val)) ++ val = 1; ++ else ++ /* Switch regs accept value in ns, convert ps to ns */ ++ val = val / 1000; ++ ++ if (val > QCA8K_MAX_DELAY) { ++ dev_err(priv->dev, "rgmii tx delay is limited to a max value of 3ns, setting to the max value"); ++ val = 3; ++ } ++ ++ priv->rgmii_tx_delay = val; ++ break; ++ default: ++ return 0; ++ } ++ ++ return 0; ++} ++ ++static int + qca8k_setup(struct dsa_switch *ds) + { + struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv; +@@ -802,6 +864,10 @@ qca8k_setup(struct dsa_switch *ds) + if (ret) + return ret; + ++ ret = qca8k_setup_of_rgmii_delay(priv); ++ if (ret) ++ return ret; ++ + /* Enable CPU Port */ + ret = qca8k_reg_set(priv, QCA8K_REG_GLOBAL_FW_CTRL0, + QCA8K_GLOBAL_FW_CTRL0_CPU_PORT_EN); +@@ -970,6 +1036,8 @@ qca8k_phylink_mac_config(struct dsa_swit + case 0: /* 1st CPU port */ + if (state->interface != PHY_INTERFACE_MODE_RGMII && + state->interface != PHY_INTERFACE_MODE_RGMII_ID && ++ state->interface != PHY_INTERFACE_MODE_RGMII_TXID && ++ state->interface != PHY_INTERFACE_MODE_RGMII_RXID && + state->interface != PHY_INTERFACE_MODE_SGMII) + return; + +@@ -985,6 +1053,8 @@ qca8k_phylink_mac_config(struct dsa_swit + case 6: /* 2nd CPU port / external PHY */ + if (state->interface != PHY_INTERFACE_MODE_RGMII && + state->interface != PHY_INTERFACE_MODE_RGMII_ID && ++ state->interface != PHY_INTERFACE_MODE_RGMII_TXID && ++ state->interface != PHY_INTERFACE_MODE_RGMII_RXID && + state->interface != PHY_INTERFACE_MODE_SGMII && + state->interface != PHY_INTERFACE_MODE_1000BASEX) + return; +@@ -1008,14 +1078,18 @@ qca8k_phylink_mac_config(struct dsa_swit + qca8k_write(priv, reg, QCA8K_PORT_PAD_RGMII_EN); + break; + case PHY_INTERFACE_MODE_RGMII_ID: ++ case PHY_INTERFACE_MODE_RGMII_TXID: ++ case PHY_INTERFACE_MODE_RGMII_RXID: + /* RGMII_ID needs internal delay. This is enabled through + * PORT5_PAD_CTRL for all ports, rather than individual port + * registers + */ + qca8k_write(priv, reg, + QCA8K_PORT_PAD_RGMII_EN | +- QCA8K_PORT_PAD_RGMII_TX_DELAY(QCA8K_MAX_DELAY) | +- QCA8K_PORT_PAD_RGMII_RX_DELAY(QCA8K_MAX_DELAY)); ++ QCA8K_PORT_PAD_RGMII_TX_DELAY(priv->rgmii_tx_delay) | ++ QCA8K_PORT_PAD_RGMII_RX_DELAY(priv->rgmii_rx_delay) | ++ QCA8K_PORT_PAD_RGMII_TX_DELAY_EN | ++ QCA8K_PORT_PAD_RGMII_RX_DELAY_EN); + /* QCA8337 requires to set rgmii rx delay */ + if (priv->switch_id == QCA8K_ID_QCA8337) + qca8k_write(priv, QCA8K_REG_PORT5_PAD_CTRL, +@@ -1073,6 +1147,8 @@ qca8k_phylink_validate(struct dsa_switch + if (state->interface != PHY_INTERFACE_MODE_NA && + state->interface != PHY_INTERFACE_MODE_RGMII && + state->interface != PHY_INTERFACE_MODE_RGMII_ID && ++ state->interface != PHY_INTERFACE_MODE_RGMII_TXID && ++ state->interface != PHY_INTERFACE_MODE_RGMII_RXID && + state->interface != PHY_INTERFACE_MODE_SGMII) + goto unsupported; + break; +@@ -1090,6 +1166,8 @@ qca8k_phylink_validate(struct dsa_switch + if (state->interface != PHY_INTERFACE_MODE_NA && + state->interface != PHY_INTERFACE_MODE_RGMII && + state->interface != PHY_INTERFACE_MODE_RGMII_ID && ++ state->interface != PHY_INTERFACE_MODE_RGMII_TXID && ++ state->interface != PHY_INTERFACE_MODE_RGMII_RXID && + state->interface != PHY_INTERFACE_MODE_SGMII && + state->interface != PHY_INTERFACE_MODE_1000BASEX) + goto unsupported; +--- a/drivers/net/dsa/qca8k.h ++++ b/drivers/net/dsa/qca8k.h +@@ -38,12 +38,11 @@ + #define QCA8K_REG_PORT5_PAD_CTRL 0x008 + #define QCA8K_REG_PORT6_PAD_CTRL 0x00c + #define QCA8K_PORT_PAD_RGMII_EN BIT(26) +-#define QCA8K_PORT_PAD_RGMII_TX_DELAY(x) \ +- ((0x8 + (x & 0x3)) << 22) +-#define QCA8K_PORT_PAD_RGMII_RX_DELAY(x) \ +- ((0x10 + (x & 0x3)) << 20) +-#define QCA8K_MAX_DELAY 3 ++#define QCA8K_PORT_PAD_RGMII_TX_DELAY(x) ((x) << 22) ++#define QCA8K_PORT_PAD_RGMII_RX_DELAY(x) ((x) << 20) ++#define QCA8K_PORT_PAD_RGMII_TX_DELAY_EN BIT(25) + #define QCA8K_PORT_PAD_RGMII_RX_DELAY_EN BIT(24) ++#define QCA8K_MAX_DELAY 3 + #define QCA8K_PORT_PAD_SGMII_EN BIT(7) + #define QCA8K_REG_PWS 0x010 + #define QCA8K_PWS_SERDES_AEN_DIS BIT(7) +@@ -254,6 +253,8 @@ struct qca8k_match_data { + struct qca8k_priv { + u8 switch_id; + u8 switch_revision; ++ u8 rgmii_tx_delay; ++ u8 rgmii_rx_delay; + struct regmap *regmap; + struct mii_bus *bus; + struct ar8xxx_port_status port_sts[QCA8K_NUM_PORTS]; diff --git a/target/linux/generic/backport-5.15/735-v5.14-17-net-dsa-qca8k-clear-MASTER_EN-after-phy-read-write.patch b/target/linux/generic/backport-5.15/735-v5.14-17-net-dsa-qca8k-clear-MASTER_EN-after-phy-read-write.patch new file mode 100644 index 0000000000..4593da032b --- /dev/null +++ b/target/linux/generic/backport-5.15/735-v5.14-17-net-dsa-qca8k-clear-MASTER_EN-after-phy-read-write.patch @@ -0,0 +1,50 @@ +From 63c33bbfeb6842a956a0eb12901e28eb335bdb18 Mon Sep 17 00:00:00 2001 +From: Ansuel Smith <ansuelsmth@gmail.com> +Date: Fri, 14 May 2021 23:00:07 +0200 +Subject: [PATCH] net: dsa: qca8k: clear MASTER_EN after phy read/write + +Clear MDIO_MASTER_EN bit from MDIO_MASTER_CTRL after read/write +operation. The MDIO_MASTER_EN bit is not reset after read/write +operation and the next operation can be wrongly interpreted by the +switch as a mdio operation. This cause a production of wrong/garbage +data from the switch and underfined bheavior. (random port drop, +unplugged port flagged with link up, wrong port speed) +Also on driver remove the MASTER_CTRL can be left set and cause the +malfunction of any next driver using the mdio device. + +Signed-off-by: Ansuel Smith <ansuelsmth@gmail.com> +Signed-off-by: David S. Miller <davem@davemloft.net> +--- + drivers/net/dsa/qca8k.c | 14 ++++++++++++-- + 1 file changed, 12 insertions(+), 2 deletions(-) + +--- a/drivers/net/dsa/qca8k.c ++++ b/drivers/net/dsa/qca8k.c +@@ -649,8 +649,14 @@ qca8k_mdio_write(struct qca8k_priv *priv + if (ret) + return ret; + +- return qca8k_busy_wait(priv, QCA8K_MDIO_MASTER_CTRL, +- QCA8K_MDIO_MASTER_BUSY); ++ ret = qca8k_busy_wait(priv, QCA8K_MDIO_MASTER_CTRL, ++ QCA8K_MDIO_MASTER_BUSY); ++ ++ /* even if the busy_wait timeouts try to clear the MASTER_EN */ ++ qca8k_reg_clear(priv, QCA8K_MDIO_MASTER_CTRL, ++ QCA8K_MDIO_MASTER_EN); ++ ++ return ret; + } + + static int +@@ -685,6 +691,10 @@ qca8k_mdio_read(struct qca8k_priv *priv, + + val &= QCA8K_MDIO_MASTER_DATA_MASK; + ++ /* even if the busy_wait timeouts try to clear the MASTER_EN */ ++ qca8k_reg_clear(priv, QCA8K_MDIO_MASTER_CTRL, ++ QCA8K_MDIO_MASTER_EN); ++ + return val; + } + diff --git a/target/linux/generic/backport-5.15/735-v5.14-18-net-dsa-qca8k-dsa-qca8k-protect-MASTER-busy_wait-wit.patch b/target/linux/generic/backport-5.15/735-v5.14-18-net-dsa-qca8k-dsa-qca8k-protect-MASTER-busy_wait-wit.patch new file mode 100644 index 0000000000..b6684d7210 --- /dev/null +++ b/target/linux/generic/backport-5.15/735-v5.14-18-net-dsa-qca8k-dsa-qca8k-protect-MASTER-busy_wait-wit.patch @@ -0,0 +1,128 @@ +From 60df02b6ea4581d72eb7a3ab7204504a54059b72 Mon Sep 17 00:00:00 2001 +From: Ansuel Smith <ansuelsmth@gmail.com> +Date: Fri, 14 May 2021 23:00:08 +0200 +Subject: [PATCH] net: dsa: qca8k: dsa: qca8k: protect MASTER busy_wait with + mdio mutex + +MDIO_MASTER operation have a dedicated busy wait that is not protected +by the mdio mutex. This can cause situation where the MASTER operation +is done and a normal operation is executed between the MASTER read/write +and the MASTER busy_wait. Rework the qca8k_mdio_read/write function to +address this issue by binding the lock for the whole MASTER operation +and not only the mdio read/write common operation. + +Signed-off-by: Ansuel Smith <ansuelsmth@gmail.com> +Reviewed-by: Andrew Lunn <andrew@lunn.ch> +Signed-off-by: David S. Miller <davem@davemloft.net> +--- + drivers/net/dsa/qca8k.c | 68 +++++++++++++++++++++++++++++++++-------- + 1 file changed, 55 insertions(+), 13 deletions(-) + +--- a/drivers/net/dsa/qca8k.c ++++ b/drivers/net/dsa/qca8k.c +@@ -628,8 +628,31 @@ qca8k_port_to_phy(int port) + } + + static int ++qca8k_mdio_busy_wait(struct qca8k_priv *priv, u32 reg, u32 mask) ++{ ++ u16 r1, r2, page; ++ u32 val; ++ int ret; ++ ++ qca8k_split_addr(reg, &r1, &r2, &page); ++ ++ ret = read_poll_timeout(qca8k_mii_read32, val, !(val & mask), 0, ++ QCA8K_BUSY_WAIT_TIMEOUT * USEC_PER_MSEC, false, ++ priv->bus, 0x10 | r2, r1); ++ ++ /* Check if qca8k_read has failed for a different reason ++ * before returnting -ETIMEDOUT ++ */ ++ if (ret < 0 && val < 0) ++ return val; ++ ++ return ret; ++} ++ ++static int + qca8k_mdio_write(struct qca8k_priv *priv, int port, u32 regnum, u16 data) + { ++ u16 r1, r2, page; + u32 phy, val; + int ret; + +@@ -645,12 +668,21 @@ qca8k_mdio_write(struct qca8k_priv *priv + QCA8K_MDIO_MASTER_REG_ADDR(regnum) | + QCA8K_MDIO_MASTER_DATA(data); + +- ret = qca8k_write(priv, QCA8K_MDIO_MASTER_CTRL, val); ++ qca8k_split_addr(QCA8K_MDIO_MASTER_CTRL, &r1, &r2, &page); ++ ++ mutex_lock_nested(&priv->bus->mdio_lock, MDIO_MUTEX_NESTED); ++ ++ ret = qca8k_set_page(priv->bus, page); + if (ret) +- return ret; ++ goto exit; ++ ++ qca8k_mii_write32(priv->bus, 0x10 | r2, r1, val); + +- ret = qca8k_busy_wait(priv, QCA8K_MDIO_MASTER_CTRL, +- QCA8K_MDIO_MASTER_BUSY); ++ ret = qca8k_mdio_busy_wait(priv, QCA8K_MDIO_MASTER_CTRL, ++ QCA8K_MDIO_MASTER_BUSY); ++ ++exit: ++ mutex_unlock(&priv->bus->mdio_lock); + + /* even if the busy_wait timeouts try to clear the MASTER_EN */ + qca8k_reg_clear(priv, QCA8K_MDIO_MASTER_CTRL, +@@ -662,6 +694,7 @@ qca8k_mdio_write(struct qca8k_priv *priv + static int + qca8k_mdio_read(struct qca8k_priv *priv, int port, u32 regnum) + { ++ u16 r1, r2, page; + u32 phy, val; + int ret; + +@@ -676,21 +709,30 @@ qca8k_mdio_read(struct qca8k_priv *priv, + QCA8K_MDIO_MASTER_READ | QCA8K_MDIO_MASTER_PHY_ADDR(phy) | + QCA8K_MDIO_MASTER_REG_ADDR(regnum); + +- ret = qca8k_write(priv, QCA8K_MDIO_MASTER_CTRL, val); +- if (ret) +- return ret; ++ qca8k_split_addr(QCA8K_MDIO_MASTER_CTRL, &r1, &r2, &page); ++ ++ mutex_lock_nested(&priv->bus->mdio_lock, MDIO_MUTEX_NESTED); + +- ret = qca8k_busy_wait(priv, QCA8K_MDIO_MASTER_CTRL, +- QCA8K_MDIO_MASTER_BUSY); ++ ret = qca8k_set_page(priv->bus, page); + if (ret) +- return ret; ++ goto exit; + +- val = qca8k_read(priv, QCA8K_MDIO_MASTER_CTRL); +- if (val < 0) +- return val; ++ qca8k_mii_write32(priv->bus, 0x10 | r2, r1, val); ++ ++ ret = qca8k_mdio_busy_wait(priv, QCA8K_MDIO_MASTER_CTRL, ++ QCA8K_MDIO_MASTER_BUSY); ++ if (ret) ++ goto exit; + ++ val = qca8k_mii_read32(priv->bus, 0x10 | r2, r1); + val &= QCA8K_MDIO_MASTER_DATA_MASK; + ++exit: ++ mutex_unlock(&priv->bus->mdio_lock); ++ ++ if (val >= 0) ++ val &= QCA8K_MDIO_MASTER_DATA_MASK; ++ + /* even if the busy_wait timeouts try to clear the MASTER_EN */ + qca8k_reg_clear(priv, QCA8K_MDIO_MASTER_CTRL, + QCA8K_MDIO_MASTER_EN); diff --git a/target/linux/generic/backport-5.15/735-v5.14-19-net-dsa-qca8k-enlarge-mdio-delay-and-timeout.patch b/target/linux/generic/backport-5.15/735-v5.14-19-net-dsa-qca8k-enlarge-mdio-delay-and-timeout.patch new file mode 100644 index 0000000000..30eeed361e --- /dev/null +++ b/target/linux/generic/backport-5.15/735-v5.14-19-net-dsa-qca8k-enlarge-mdio-delay-and-timeout.patch @@ -0,0 +1,39 @@ +From 617960d72e93de0f3fa52407e2d39e8c43e73b0a Mon Sep 17 00:00:00 2001 +From: Ansuel Smith <ansuelsmth@gmail.com> +Date: Fri, 14 May 2021 23:00:09 +0200 +Subject: [PATCH] net: dsa: qca8k: enlarge mdio delay and timeout + +The witch require some extra delay after setting page or the next +read/write can use still use the old page. Add a delay after the +set_page function to address this as it's done in QSDK legacy driver. +Some timeouts were notice with VLAN and phy function, enlarge the +mdio busy wait timeout to fix these problems. + +Signed-off-by: Ansuel Smith <ansuelsmth@gmail.com> +Signed-off-by: David S. Miller <davem@davemloft.net> +--- + drivers/net/dsa/qca8k.c | 1 + + drivers/net/dsa/qca8k.h | 2 +- + 2 files changed, 2 insertions(+), 1 deletion(-) + +--- a/drivers/net/dsa/qca8k.c ++++ b/drivers/net/dsa/qca8k.c +@@ -143,6 +143,7 @@ qca8k_set_page(struct mii_bus *bus, u16 + } + + qca8k_current_page = page; ++ usleep_range(1000, 2000); + return 0; + } + +--- a/drivers/net/dsa/qca8k.h ++++ b/drivers/net/dsa/qca8k.h +@@ -20,7 +20,7 @@ + #define PHY_ID_QCA8337 0x004dd036 + #define QCA8K_ID_QCA8337 0x13 + +-#define QCA8K_BUSY_WAIT_TIMEOUT 20 ++#define QCA8K_BUSY_WAIT_TIMEOUT 2000 + + #define QCA8K_NUM_FDB_RECORDS 2048 + diff --git a/target/linux/generic/backport-5.15/735-v5.14-20-net-dsa-qca8k-add-support-for-internal-phy-and-inter.patch b/target/linux/generic/backport-5.15/735-v5.14-20-net-dsa-qca8k-add-support-for-internal-phy-and-inter.patch new file mode 100644 index 0000000000..88d3c1ef43 --- /dev/null +++ b/target/linux/generic/backport-5.15/735-v5.14-20-net-dsa-qca8k-add-support-for-internal-phy-and-inter.patch @@ -0,0 +1,267 @@ +From 759bafb8a3226326ca357613bc90acf738f80c32 Mon Sep 17 00:00:00 2001 +From: Ansuel Smith <ansuelsmth@gmail.com> +Date: Fri, 14 May 2021 23:00:10 +0200 +Subject: [PATCH] net: dsa: qca8k: add support for internal phy and internal + mdio + +Add support to setup_mdio_bus for internal phy declaration. Introduce a +flag to use the legacy port phy mapping by default and use the direct +mapping if a mdio node is detected in the switch node. Register a +dedicated mdio internal mdio bus to address the different mapping +between port and phy if the mdio node is detected. + +Signed-off-by: Ansuel Smith <ansuelsmth@gmail.com> +Signed-off-by: David S. Miller <davem@davemloft.net> +--- + drivers/net/dsa/qca8k.c | 112 +++++++++++++++++++++++++++++----------- + drivers/net/dsa/qca8k.h | 1 + + 2 files changed, 83 insertions(+), 30 deletions(-) + +--- a/drivers/net/dsa/qca8k.c ++++ b/drivers/net/dsa/qca8k.c +@@ -11,6 +11,7 @@ + #include <linux/netdevice.h> + #include <net/dsa.h> + #include <linux/of_net.h> ++#include <linux/of_mdio.h> + #include <linux/of_platform.h> + #include <linux/if_bridge.h> + #include <linux/mdio.h> +@@ -629,7 +630,7 @@ qca8k_port_to_phy(int port) + } + + static int +-qca8k_mdio_busy_wait(struct qca8k_priv *priv, u32 reg, u32 mask) ++qca8k_mdio_busy_wait(struct mii_bus *bus, u32 reg, u32 mask) + { + u16 r1, r2, page; + u32 val; +@@ -639,7 +640,7 @@ qca8k_mdio_busy_wait(struct qca8k_priv * + + ret = read_poll_timeout(qca8k_mii_read32, val, !(val & mask), 0, + QCA8K_BUSY_WAIT_TIMEOUT * USEC_PER_MSEC, false, +- priv->bus, 0x10 | r2, r1); ++ bus, 0x10 | r2, r1); + + /* Check if qca8k_read has failed for a different reason + * before returnting -ETIMEDOUT +@@ -651,19 +652,16 @@ qca8k_mdio_busy_wait(struct qca8k_priv * + } + + static int +-qca8k_mdio_write(struct qca8k_priv *priv, int port, u32 regnum, u16 data) ++qca8k_mdio_write(struct mii_bus *salve_bus, int phy, int regnum, u16 data) + { ++ struct qca8k_priv *priv = salve_bus->priv; + u16 r1, r2, page; +- u32 phy, val; ++ u32 val; + int ret; + + if (regnum >= QCA8K_MDIO_MASTER_MAX_REG) + return -EINVAL; + +- /* callee is responsible for not passing bad ports, +- * but we still would like to make spills impossible. +- */ +- phy = qca8k_port_to_phy(port) % PHY_MAX_ADDR; + val = QCA8K_MDIO_MASTER_BUSY | QCA8K_MDIO_MASTER_EN | + QCA8K_MDIO_MASTER_WRITE | QCA8K_MDIO_MASTER_PHY_ADDR(phy) | + QCA8K_MDIO_MASTER_REG_ADDR(regnum) | +@@ -679,33 +677,29 @@ qca8k_mdio_write(struct qca8k_priv *priv + + qca8k_mii_write32(priv->bus, 0x10 | r2, r1, val); + +- ret = qca8k_mdio_busy_wait(priv, QCA8K_MDIO_MASTER_CTRL, ++ ret = qca8k_mdio_busy_wait(priv->bus, QCA8K_MDIO_MASTER_CTRL, + QCA8K_MDIO_MASTER_BUSY); + + exit: +- mutex_unlock(&priv->bus->mdio_lock); +- + /* even if the busy_wait timeouts try to clear the MASTER_EN */ +- qca8k_reg_clear(priv, QCA8K_MDIO_MASTER_CTRL, +- QCA8K_MDIO_MASTER_EN); ++ qca8k_mii_write32(priv->bus, 0x10 | r2, r1, 0); ++ ++ mutex_unlock(&priv->bus->mdio_lock); + + return ret; + } + + static int +-qca8k_mdio_read(struct qca8k_priv *priv, int port, u32 regnum) ++qca8k_mdio_read(struct mii_bus *salve_bus, int phy, int regnum) + { ++ struct qca8k_priv *priv = salve_bus->priv; + u16 r1, r2, page; +- u32 phy, val; ++ u32 val; + int ret; + + if (regnum >= QCA8K_MDIO_MASTER_MAX_REG) + return -EINVAL; + +- /* callee is responsible for not passing bad ports, +- * but we still would like to make spills impossible. +- */ +- phy = qca8k_port_to_phy(port) % PHY_MAX_ADDR; + val = QCA8K_MDIO_MASTER_BUSY | QCA8K_MDIO_MASTER_EN | + QCA8K_MDIO_MASTER_READ | QCA8K_MDIO_MASTER_PHY_ADDR(phy) | + QCA8K_MDIO_MASTER_REG_ADDR(regnum); +@@ -720,24 +714,22 @@ qca8k_mdio_read(struct qca8k_priv *priv, + + qca8k_mii_write32(priv->bus, 0x10 | r2, r1, val); + +- ret = qca8k_mdio_busy_wait(priv, QCA8K_MDIO_MASTER_CTRL, ++ ret = qca8k_mdio_busy_wait(priv->bus, QCA8K_MDIO_MASTER_CTRL, + QCA8K_MDIO_MASTER_BUSY); + if (ret) + goto exit; + + val = qca8k_mii_read32(priv->bus, 0x10 | r2, r1); +- val &= QCA8K_MDIO_MASTER_DATA_MASK; + + exit: ++ /* even if the busy_wait timeouts try to clear the MASTER_EN */ ++ qca8k_mii_write32(priv->bus, 0x10 | r2, r1, 0); ++ + mutex_unlock(&priv->bus->mdio_lock); + + if (val >= 0) + val &= QCA8K_MDIO_MASTER_DATA_MASK; + +- /* even if the busy_wait timeouts try to clear the MASTER_EN */ +- qca8k_reg_clear(priv, QCA8K_MDIO_MASTER_CTRL, +- QCA8K_MDIO_MASTER_EN); +- + return val; + } + +@@ -746,7 +738,14 @@ qca8k_phy_write(struct dsa_switch *ds, i + { + struct qca8k_priv *priv = ds->priv; + +- return qca8k_mdio_write(priv, port, regnum, data); ++ /* Check if the legacy mapping should be used and the ++ * port is not correctly mapped to the right PHY in the ++ * devicetree ++ */ ++ if (priv->legacy_phy_port_mapping) ++ port = qca8k_port_to_phy(port) % PHY_MAX_ADDR; ++ ++ return qca8k_mdio_write(priv->bus, port, regnum, data); + } + + static int +@@ -755,7 +754,14 @@ qca8k_phy_read(struct dsa_switch *ds, in + struct qca8k_priv *priv = ds->priv; + int ret; + +- ret = qca8k_mdio_read(priv, port, regnum); ++ /* Check if the legacy mapping should be used and the ++ * port is not correctly mapped to the right PHY in the ++ * devicetree ++ */ ++ if (priv->legacy_phy_port_mapping) ++ port = qca8k_port_to_phy(port) % PHY_MAX_ADDR; ++ ++ ret = qca8k_mdio_read(priv->bus, port, regnum); + + if (ret < 0) + return 0xffff; +@@ -764,10 +770,37 @@ qca8k_phy_read(struct dsa_switch *ds, in + } + + static int ++qca8k_mdio_register(struct qca8k_priv *priv, struct device_node *mdio) ++{ ++ struct dsa_switch *ds = priv->ds; ++ struct mii_bus *bus; ++ ++ bus = devm_mdiobus_alloc(ds->dev); ++ ++ if (!bus) ++ return -ENOMEM; ++ ++ bus->priv = (void *)priv; ++ bus->name = "qca8k slave mii"; ++ bus->read = qca8k_mdio_read; ++ bus->write = qca8k_mdio_write; ++ snprintf(bus->id, MII_BUS_ID_SIZE, "qca8k-%d", ++ ds->index); ++ ++ bus->parent = ds->dev; ++ bus->phy_mask = ~ds->phys_mii_mask; ++ ++ ds->slave_mii_bus = bus; ++ ++ return devm_of_mdiobus_register(priv->dev, bus, mdio); ++} ++ ++static int + qca8k_setup_mdio_bus(struct qca8k_priv *priv) + { + u32 internal_mdio_mask = 0, external_mdio_mask = 0, reg; +- struct device_node *ports, *port; ++ struct device_node *ports, *port, *mdio; ++ phy_interface_t mode; + int err; + + ports = of_get_child_by_name(priv->dev->of_node, "ports"); +@@ -788,7 +821,10 @@ qca8k_setup_mdio_bus(struct qca8k_priv * + if (!dsa_is_user_port(priv->ds, reg)) + continue; + +- if (of_property_read_bool(port, "phy-handle")) ++ of_get_phy_mode(port, &mode); ++ ++ if (of_property_read_bool(port, "phy-handle") && ++ mode != PHY_INTERFACE_MODE_INTERNAL) + external_mdio_mask |= BIT(reg); + else + internal_mdio_mask |= BIT(reg); +@@ -825,8 +861,23 @@ qca8k_setup_mdio_bus(struct qca8k_priv * + QCA8K_MDIO_MASTER_EN); + } + ++ /* Check if the devicetree declare the port:phy mapping */ ++ mdio = of_get_child_by_name(priv->dev->of_node, "mdio"); ++ if (of_device_is_available(mdio)) { ++ err = qca8k_mdio_register(priv, mdio); ++ if (err) ++ of_node_put(mdio); ++ ++ return err; ++ } ++ ++ /* If a mapping can't be found the legacy mapping is used, ++ * using the qca8k_port_to_phy function ++ */ ++ priv->legacy_phy_port_mapping = true; + priv->ops.phy_read = qca8k_phy_read; + priv->ops.phy_write = qca8k_phy_write; ++ + return 0; + } + +@@ -1212,7 +1263,8 @@ qca8k_phylink_validate(struct dsa_switch + case 5: + /* Internal PHY */ + if (state->interface != PHY_INTERFACE_MODE_NA && +- state->interface != PHY_INTERFACE_MODE_GMII) ++ state->interface != PHY_INTERFACE_MODE_GMII && ++ state->interface != PHY_INTERFACE_MODE_INTERNAL) + goto unsupported; + break; + case 6: /* 2nd CPU port / external PHY */ +--- a/drivers/net/dsa/qca8k.h ++++ b/drivers/net/dsa/qca8k.h +@@ -255,6 +255,7 @@ struct qca8k_priv { + u8 switch_revision; + u8 rgmii_tx_delay; + u8 rgmii_rx_delay; ++ bool legacy_phy_port_mapping; + struct regmap *regmap; + struct mii_bus *bus; + struct ar8xxx_port_status port_sts[QCA8K_NUM_PORTS]; diff --git a/target/linux/generic/backport-5.15/735-v5.14-21-devicetree-bindings-dsa-qca8k-Document-internal-mdio.patch b/target/linux/generic/backport-5.15/735-v5.14-21-devicetree-bindings-dsa-qca8k-Document-internal-mdio.patch new file mode 100644 index 0000000000..6db01b4b41 --- /dev/null +++ b/target/linux/generic/backport-5.15/735-v5.14-21-devicetree-bindings-dsa-qca8k-Document-internal-mdio.patch @@ -0,0 +1,93 @@ +From 0c994a28e7518f098c84a3049cb2915780db873a Mon Sep 17 00:00:00 2001 +From: Ansuel Smith <ansuelsmth@gmail.com> +Date: Fri, 14 May 2021 23:00:11 +0200 +Subject: [PATCH] devicetree: bindings: dsa: qca8k: Document internal mdio + definition + +Document new way of declare mapping of internal PHY to port. +The new implementation directly declare the PHY connected to the port +by adding a node in the switch node. The driver detect this and register +an internal mdiobus using the mapping defined in the mdio node. + +Signed-off-by: Ansuel Smith <ansuelsmth@gmail.com> +Reviewed-by: Rob Herring <robh@kernel.org> +Signed-off-by: David S. Miller <davem@davemloft.net> +--- + .../devicetree/bindings/net/dsa/qca8k.txt | 39 +++++++++++++++++++ + 1 file changed, 39 insertions(+) + +--- a/Documentation/devicetree/bindings/net/dsa/qca8k.txt ++++ b/Documentation/devicetree/bindings/net/dsa/qca8k.txt +@@ -21,6 +21,10 @@ described in dsa/dsa.txt. If the QCA8K s + mdio-bus each subnode describing a port needs to have a valid phandle + referencing the internal PHY it is connected to. This is because there's no + N:N mapping of port and PHY id. ++To declare the internal mdio-bus configuration, declare a mdio node in the ++switch node and declare the phandle for the port referencing the internal ++PHY is connected to. In this config a internal mdio-bus is registered and ++the mdio MASTER is used as communication. + + Don't use mixed external and internal mdio-bus configurations, as this is + not supported by the hardware. +@@ -150,26 +154,61 @@ for the internal master mdio-bus configu + port@1 { + reg = <1>; + label = "lan1"; ++ phy-mode = "internal"; ++ phy-handle = <&phy_port1>; + }; + + port@2 { + reg = <2>; + label = "lan2"; ++ phy-mode = "internal"; ++ phy-handle = <&phy_port2>; + }; + + port@3 { + reg = <3>; + label = "lan3"; ++ phy-mode = "internal"; ++ phy-handle = <&phy_port3>; + }; + + port@4 { + reg = <4>; + label = "lan4"; ++ phy-mode = "internal"; ++ phy-handle = <&phy_port4>; + }; + + port@5 { + reg = <5>; + label = "wan"; ++ phy-mode = "internal"; ++ phy-handle = <&phy_port5>; ++ }; ++ }; ++ ++ mdio { ++ #address-cells = <1>; ++ #size-cells = <0>; ++ ++ phy_port1: phy@0 { ++ reg = <0>; ++ }; ++ ++ phy_port2: phy@1 { ++ reg = <1>; ++ }; ++ ++ phy_port3: phy@2 { ++ reg = <2>; ++ }; ++ ++ phy_port4: phy@3 { ++ reg = <3>; ++ }; ++ ++ phy_port5: phy@4 { ++ reg = <4>; + }; + }; + }; diff --git a/target/linux/generic/backport-5.15/735-v5.14-22-net-dsa-qca8k-improve-internal-mdio-read-write-bus-a.patch b/target/linux/generic/backport-5.15/735-v5.14-22-net-dsa-qca8k-improve-internal-mdio-read-write-bus-a.patch new file mode 100644 index 0000000000..da8d5b3462 --- /dev/null +++ b/target/linux/generic/backport-5.15/735-v5.14-22-net-dsa-qca8k-improve-internal-mdio-read-write-bus-a.patch @@ -0,0 +1,95 @@ +From b7ebac354d54f1657bb89b7a7ca149db50203e6a Mon Sep 17 00:00:00 2001 +From: Ansuel Smith <ansuelsmth@gmail.com> +Date: Fri, 14 May 2021 23:00:12 +0200 +Subject: [PATCH] net: dsa: qca8k: improve internal mdio read/write bus access + +Improve the internal mdio read/write bus access by caching the value +without accessing it for every read/write. + +Signed-off-by: Ansuel Smith <ansuelsmth@gmail.com> +Signed-off-by: David S. Miller <davem@davemloft.net> +--- + drivers/net/dsa/qca8k.c | 28 +++++++++++++++------------- + 1 file changed, 15 insertions(+), 13 deletions(-) + +--- a/drivers/net/dsa/qca8k.c ++++ b/drivers/net/dsa/qca8k.c +@@ -655,6 +655,7 @@ static int + qca8k_mdio_write(struct mii_bus *salve_bus, int phy, int regnum, u16 data) + { + struct qca8k_priv *priv = salve_bus->priv; ++ struct mii_bus *bus = priv->bus; + u16 r1, r2, page; + u32 val; + int ret; +@@ -669,22 +670,22 @@ qca8k_mdio_write(struct mii_bus *salve_b + + qca8k_split_addr(QCA8K_MDIO_MASTER_CTRL, &r1, &r2, &page); + +- mutex_lock_nested(&priv->bus->mdio_lock, MDIO_MUTEX_NESTED); ++ mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED); + +- ret = qca8k_set_page(priv->bus, page); ++ ret = qca8k_set_page(bus, page); + if (ret) + goto exit; + +- qca8k_mii_write32(priv->bus, 0x10 | r2, r1, val); ++ qca8k_mii_write32(bus, 0x10 | r2, r1, val); + +- ret = qca8k_mdio_busy_wait(priv->bus, QCA8K_MDIO_MASTER_CTRL, ++ ret = qca8k_mdio_busy_wait(bus, QCA8K_MDIO_MASTER_CTRL, + QCA8K_MDIO_MASTER_BUSY); + + exit: + /* even if the busy_wait timeouts try to clear the MASTER_EN */ +- qca8k_mii_write32(priv->bus, 0x10 | r2, r1, 0); ++ qca8k_mii_write32(bus, 0x10 | r2, r1, 0); + +- mutex_unlock(&priv->bus->mdio_lock); ++ mutex_unlock(&bus->mdio_lock); + + return ret; + } +@@ -693,6 +694,7 @@ static int + qca8k_mdio_read(struct mii_bus *salve_bus, int phy, int regnum) + { + struct qca8k_priv *priv = salve_bus->priv; ++ struct mii_bus *bus = priv->bus; + u16 r1, r2, page; + u32 val; + int ret; +@@ -706,26 +708,26 @@ qca8k_mdio_read(struct mii_bus *salve_bu + + qca8k_split_addr(QCA8K_MDIO_MASTER_CTRL, &r1, &r2, &page); + +- mutex_lock_nested(&priv->bus->mdio_lock, MDIO_MUTEX_NESTED); ++ mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED); + +- ret = qca8k_set_page(priv->bus, page); ++ ret = qca8k_set_page(bus, page); + if (ret) + goto exit; + +- qca8k_mii_write32(priv->bus, 0x10 | r2, r1, val); ++ qca8k_mii_write32(bus, 0x10 | r2, r1, val); + +- ret = qca8k_mdio_busy_wait(priv->bus, QCA8K_MDIO_MASTER_CTRL, ++ ret = qca8k_mdio_busy_wait(bus, QCA8K_MDIO_MASTER_CTRL, + QCA8K_MDIO_MASTER_BUSY); + if (ret) + goto exit; + +- val = qca8k_mii_read32(priv->bus, 0x10 | r2, r1); ++ val = qca8k_mii_read32(bus, 0x10 | r2, r1); + + exit: + /* even if the busy_wait timeouts try to clear the MASTER_EN */ +- qca8k_mii_write32(priv->bus, 0x10 | r2, r1, 0); ++ qca8k_mii_write32(bus, 0x10 | r2, r1, 0); + +- mutex_unlock(&priv->bus->mdio_lock); ++ mutex_unlock(&bus->mdio_lock); + + if (val >= 0) + val &= QCA8K_MDIO_MASTER_DATA_MASK; diff --git a/target/linux/generic/backport-5.15/735-v5.14-23-net-dsa-qca8k-pass-switch_revision-info-to-phy-dev_f.patch b/target/linux/generic/backport-5.15/735-v5.14-23-net-dsa-qca8k-pass-switch_revision-info-to-phy-dev_f.patch new file mode 100644 index 0000000000..1179cf152d --- /dev/null +++ b/target/linux/generic/backport-5.15/735-v5.14-23-net-dsa-qca8k-pass-switch_revision-info-to-phy-dev_f.patch @@ -0,0 +1,48 @@ +From a46aec02bc06ac2c33f326339e4ef88c735dc30d Mon Sep 17 00:00:00 2001 +From: Ansuel Smith <ansuelsmth@gmail.com> +Date: Fri, 14 May 2021 23:00:13 +0200 +Subject: [PATCH] net: dsa: qca8k: pass switch_revision info to phy dev_flags + +Define get_phy_flags to pass switch_Revision needed to tweak the +internal PHY with debug values based on the revision. + +Signed-off-by: Ansuel Smith <ansuelsmth@gmail.com> +Reviewed-by: Florian Fainelli <f.fainelli@gmail.com> +Signed-off-by: David S. Miller <davem@davemloft.net> +--- + drivers/net/dsa/qca8k.c | 17 +++++++++++++++++ + 1 file changed, 17 insertions(+) + +--- a/drivers/net/dsa/qca8k.c ++++ b/drivers/net/dsa/qca8k.c +@@ -1740,6 +1740,22 @@ qca8k_port_vlan_del(struct dsa_switch *d + return ret; + } + ++static u32 qca8k_get_phy_flags(struct dsa_switch *ds, int port) ++{ ++ struct qca8k_priv *priv = ds->priv; ++ ++ /* Communicate to the phy internal driver the switch revision. ++ * Based on the switch revision different values needs to be ++ * set to the dbg and mmd reg on the phy. ++ * The first 2 bit are used to communicate the switch revision ++ * to the phy driver. ++ */ ++ if (port > 0 && port < 6) ++ return priv->switch_revision; ++ ++ return 0; ++} ++ + static enum dsa_tag_protocol + qca8k_get_tag_protocol(struct dsa_switch *ds, int port, + enum dsa_tag_protocol mp) +@@ -1774,6 +1790,7 @@ static const struct dsa_switch_ops qca8k + .phylink_mac_config = qca8k_phylink_mac_config, + .phylink_mac_link_down = qca8k_phylink_mac_link_down, + .phylink_mac_link_up = qca8k_phylink_mac_link_up, ++ .get_phy_flags = qca8k_get_phy_flags, + }; + + static int qca8k_read_switch_id(struct qca8k_priv *priv) diff --git a/target/linux/generic/backport-5.15/735-v5.14-25-net-phy-add-support-for-qca8k-switch-internal-PHY-in.patch b/target/linux/generic/backport-5.15/735-v5.14-25-net-phy-add-support-for-qca8k-switch-internal-PHY-in.patch new file mode 100644 index 0000000000..20325f564d --- /dev/null +++ b/target/linux/generic/backport-5.15/735-v5.14-25-net-phy-add-support-for-qca8k-switch-internal-PHY-in.patch @@ -0,0 +1,229 @@ +From 272833b9b3b3969be7a91839121d86662c8c4253 Mon Sep 17 00:00:00 2001 +From: Ansuel Smith <ansuelsmth@gmail.com> +Date: Fri, 14 May 2021 23:00:15 +0200 +Subject: [PATCH] net: phy: add support for qca8k switch internal PHY in at803x + +Since the at803x share the same regs, it's assumed they are based on the +same implementation. Make it part of the at803x PHY driver to skip +having redudant code. +Add initial support for qca8k internal PHYs. The internal PHYs requires +special mmd and debug values to be set based on the switch revision +passwd using the dev_flags. Supports output of idle, receive and eee_wake +errors stats. +Some debug values sets can't be translated as the documentation lacks any +reference about them. + +Signed-off-by: Ansuel Smith <ansuelsmth@gmail.com> +Signed-off-by: David S. Miller <davem@davemloft.net> +--- + drivers/net/phy/Kconfig | 5 +- + drivers/net/phy/at803x.c | 132 ++++++++++++++++++++++++++++++++++++++- + 2 files changed, 134 insertions(+), 3 deletions(-) + +--- a/drivers/net/phy/Kconfig ++++ b/drivers/net/phy/Kconfig +@@ -235,10 +235,11 @@ config NXP_TJA11XX_PHY + Currently supports the NXP TJA1100 and TJA1101 PHY. + + config AT803X_PHY +- tristate "Qualcomm Atheros AR803X PHYs" ++ tristate "Qualcomm Atheros AR803X PHYs and QCA833x PHYs" + depends on REGULATOR + help +- Currently supports the AR8030, AR8031, AR8033 and AR8035 model ++ Currently supports the AR8030, AR8031, AR8033, AR8035 and internal ++ QCA8337(Internal qca8k PHY) model + + config QSEMI_PHY + tristate "Quality Semiconductor PHYs" +--- a/drivers/net/phy/at803x.c ++++ b/drivers/net/phy/at803x.c +@@ -92,10 +92,16 @@ + #define AT803X_DEBUG_REG_5 0x05 + #define AT803X_DEBUG_TX_CLK_DLY_EN BIT(8) + ++#define AT803X_DEBUG_REG_3C 0x3C ++ ++#define AT803X_DEBUG_REG_3D 0x3D ++ + #define AT803X_DEBUG_REG_1F 0x1F + #define AT803X_DEBUG_PLL_ON BIT(2) + #define AT803X_DEBUG_RGMII_1V8 BIT(3) + ++#define MDIO_AZ_DEBUG 0x800D ++ + /* AT803x supports either the XTAL input pad, an internal PLL or the + * DSP as clock reference for the clock output pad. The XTAL reference + * is only used for 25 MHz output, all other frequencies need the PLL. +@@ -142,10 +148,34 @@ + #define AT803X_PAGE_FIBER 0 + #define AT803X_PAGE_COPPER 1 + ++#define QCA8327_PHY_ID 0x004dd034 ++#define QCA8337_PHY_ID 0x004dd036 ++#define QCA8K_PHY_ID_MASK 0xffffffff ++ ++#define QCA8K_DEVFLAGS_REVISION_MASK GENMASK(2, 0) ++ + MODULE_DESCRIPTION("Qualcomm Atheros AR803x PHY driver"); + MODULE_AUTHOR("Matus Ujhelyi"); + MODULE_LICENSE("GPL"); + ++enum stat_access_type { ++ PHY, ++ MMD ++}; ++ ++struct at803x_hw_stat { ++ const char *string; ++ u8 reg; ++ u32 mask; ++ enum stat_access_type access_type; ++}; ++ ++static struct at803x_hw_stat at803x_hw_stats[] = { ++ { "phy_idle_errors", 0xa, GENMASK(7, 0), PHY}, ++ { "phy_receive_errors", 0x15, GENMASK(15, 0), PHY}, ++ { "eee_wake_errors", 0x16, GENMASK(15, 0), MMD}, ++}; ++ + struct at803x_priv { + int flags; + #define AT803X_KEEP_PLL_ENABLED BIT(0) /* don't turn off internal PLL */ +@@ -154,6 +184,7 @@ struct at803x_priv { + struct regulator_dev *vddio_rdev; + struct regulator_dev *vddh_rdev; + struct regulator *vddio; ++ u64 stats[ARRAY_SIZE(at803x_hw_stats)]; + }; + + struct at803x_context { +@@ -165,6 +196,17 @@ struct at803x_context { + u16 led_control; + }; + ++static int at803x_debug_reg_write(struct phy_device *phydev, u16 reg, u16 data) ++{ ++ int ret; ++ ++ ret = phy_write(phydev, AT803X_DEBUG_ADDR, reg); ++ if (ret < 0) ++ return ret; ++ ++ return phy_write(phydev, AT803X_DEBUG_DATA, data); ++} ++ + static int at803x_debug_reg_read(struct phy_device *phydev, u16 reg) + { + int ret; +@@ -327,6 +369,53 @@ static void at803x_get_wol(struct phy_de + wol->wolopts |= WAKE_MAGIC; + } + ++static int at803x_get_sset_count(struct phy_device *phydev) ++{ ++ return ARRAY_SIZE(at803x_hw_stats); ++} ++ ++static void at803x_get_strings(struct phy_device *phydev, u8 *data) ++{ ++ int i; ++ ++ for (i = 0; i < ARRAY_SIZE(at803x_hw_stats); i++) { ++ strscpy(data + i * ETH_GSTRING_LEN, ++ at803x_hw_stats[i].string, ETH_GSTRING_LEN); ++ } ++} ++ ++static u64 at803x_get_stat(struct phy_device *phydev, int i) ++{ ++ struct at803x_hw_stat stat = at803x_hw_stats[i]; ++ struct at803x_priv *priv = phydev->priv; ++ int val; ++ u64 ret; ++ ++ if (stat.access_type == MMD) ++ val = phy_read_mmd(phydev, MDIO_MMD_PCS, stat.reg); ++ else ++ val = phy_read(phydev, stat.reg); ++ ++ if (val < 0) { ++ ret = U64_MAX; ++ } else { ++ val = val & stat.mask; ++ priv->stats[i] += val; ++ ret = priv->stats[i]; ++ } ++ ++ return ret; ++} ++ ++static void at803x_get_stats(struct phy_device *phydev, ++ struct ethtool_stats *stats, u64 *data) ++{ ++ int i; ++ ++ for (i = 0; i < ARRAY_SIZE(at803x_hw_stats); i++) ++ data[i] = at803x_get_stat(phydev, i); ++} ++ + static int at803x_suspend(struct phy_device *phydev) + { + int value; +@@ -1102,6 +1191,34 @@ static int at803x_cable_test_start(struc + return 0; + } + ++static int qca83xx_config_init(struct phy_device *phydev) ++{ ++ u8 switch_revision; ++ ++ switch_revision = phydev->dev_flags & QCA8K_DEVFLAGS_REVISION_MASK; ++ ++ switch (switch_revision) { ++ case 1: ++ /* For 100M waveform */ ++ at803x_debug_reg_write(phydev, AT803X_DEBUG_REG_0, 0x02ea); ++ /* Turn on Gigabit clock */ ++ at803x_debug_reg_write(phydev, AT803X_DEBUG_REG_3D, 0x68a0); ++ break; ++ ++ case 2: ++ phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_EEE_ADV, 0x0); ++ fallthrough; ++ case 4: ++ phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_AZ_DEBUG, 0x803f); ++ at803x_debug_reg_write(phydev, AT803X_DEBUG_REG_3D, 0x6860); ++ at803x_debug_reg_write(phydev, AT803X_DEBUG_REG_5, 0x2c46); ++ at803x_debug_reg_write(phydev, AT803X_DEBUG_REG_3C, 0x6000); ++ break; ++ } ++ ++ return 0; ++} ++ + static struct phy_driver at803x_driver[] = { + { + /* Qualcomm Atheros AR8035 */ +@@ -1198,7 +1315,20 @@ static struct phy_driver at803x_driver[] + .read_status = at803x_read_status, + .soft_reset = genphy_soft_reset, + .config_aneg = at803x_config_aneg, +-} }; ++}, { ++ /* QCA8337 */ ++ .phy_id = QCA8337_PHY_ID, ++ .phy_id_mask = QCA8K_PHY_ID_MASK, ++ .name = "QCA PHY 8337", ++ /* PHY_GBIT_FEATURES */ ++ .probe = at803x_probe, ++ .flags = PHY_IS_INTERNAL, ++ .config_init = qca83xx_config_init, ++ .soft_reset = genphy_soft_reset, ++ .get_sset_count = at803x_get_sset_count, ++ .get_strings = at803x_get_strings, ++ .get_stats = at803x_get_stats, ++}, }; + + module_phy_driver(at803x_driver); + diff --git a/target/linux/generic/backport-5.15/736-v5.14-net-dsa-qca8k-fix-missing-unlock-on-error-in-qca8k-vlan.patch b/target/linux/generic/backport-5.15/736-v5.14-net-dsa-qca8k-fix-missing-unlock-on-error-in-qca8k-vlan.patch new file mode 100644 index 0000000000..a68e3b1821 --- /dev/null +++ b/target/linux/generic/backport-5.15/736-v5.14-net-dsa-qca8k-fix-missing-unlock-on-error-in-qca8k-vlan.patch @@ -0,0 +1,64 @@ +From 0d56e5c191b197e1d30a0a4c92628836dafced0f Mon Sep 17 00:00:00 2001 +From: Wei Yongjun <weiyongjun1@huawei.com> +Date: Tue, 18 May 2021 11:24:13 +0000 +Subject: [PATCH] net: dsa: qca8k: fix missing unlock on error in + qca8k_vlan_(add|del) + +Add the missing unlock before return from function qca8k_vlan_add() +and qca8k_vlan_del() in the error handling case. + +Fixes: 028f5f8ef44f ("net: dsa: qca8k: handle error with qca8k_read operation") +Reported-by: Hulk Robot <hulkci@huawei.com> +Signed-off-by: Wei Yongjun <weiyongjun1@huawei.com> +Reviewed-by: Vladimir Oltean <olteanv@gmail.com> +Signed-off-by: David S. Miller <davem@davemloft.net> +--- + drivers/net/dsa/qca8k.c | 16 ++++++++++------ + 1 file changed, 10 insertions(+), 6 deletions(-) + +--- a/drivers/net/dsa/qca8k.c ++++ b/drivers/net/dsa/qca8k.c +@@ -506,8 +506,10 @@ qca8k_vlan_add(struct qca8k_priv *priv, + goto out; + + reg = qca8k_read(priv, QCA8K_REG_VTU_FUNC0); +- if (reg < 0) +- return reg; ++ if (reg < 0) { ++ ret = reg; ++ goto out; ++ } + reg |= QCA8K_VTU_FUNC0_VALID | QCA8K_VTU_FUNC0_IVL_EN; + reg &= ~(QCA8K_VTU_FUNC0_EG_MODE_MASK << QCA8K_VTU_FUNC0_EG_MODE_S(port)); + if (untagged) +@@ -519,7 +521,7 @@ qca8k_vlan_add(struct qca8k_priv *priv, + + ret = qca8k_write(priv, QCA8K_REG_VTU_FUNC0, reg); + if (ret) +- return ret; ++ goto out; + ret = qca8k_vlan_access(priv, QCA8K_VLAN_LOAD, vid); + + out: +@@ -541,8 +543,10 @@ qca8k_vlan_del(struct qca8k_priv *priv, + goto out; + + reg = qca8k_read(priv, QCA8K_REG_VTU_FUNC0); +- if (reg < 0) +- return reg; ++ if (reg < 0) { ++ ret = reg; ++ goto out; ++ } + reg &= ~(3 << QCA8K_VTU_FUNC0_EG_MODE_S(port)); + reg |= QCA8K_VTU_FUNC0_EG_MODE_NOT << + QCA8K_VTU_FUNC0_EG_MODE_S(port); +@@ -564,7 +568,7 @@ qca8k_vlan_del(struct qca8k_priv *priv, + } else { + ret = qca8k_write(priv, QCA8K_REG_VTU_FUNC0, reg); + if (ret) +- return ret; ++ goto out; + ret = qca8k_vlan_access(priv, QCA8K_VLAN_LOAD, vid); + } + diff --git a/target/linux/generic/backport-5.15/737-v5.14-01-net-dsa-qca8k-check-return-value-of-read-functions-c.patch b/target/linux/generic/backport-5.15/737-v5.14-01-net-dsa-qca8k-check-return-value-of-read-functions-c.patch new file mode 100644 index 0000000000..451b0e9446 --- /dev/null +++ b/target/linux/generic/backport-5.15/737-v5.14-01-net-dsa-qca8k-check-return-value-of-read-functions-c.patch @@ -0,0 +1,348 @@ +From 7c9896e37807862e276064dd9331860f5d27affc Mon Sep 17 00:00:00 2001 +From: Yang Yingliang <yangyingliang@huawei.com> +Date: Sat, 29 May 2021 11:04:38 +0800 +Subject: [PATCH] net: dsa: qca8k: check return value of read functions + correctly + +Current return type of qca8k_mii_read32() and qca8k_read() are +unsigned, it can't be negative, so the return value check is +unuseful. For check the return value correctly, change return +type of the read functions and add a output parameter to store +the read value. + +Signed-off-by: Yang Yingliang <yangyingliang@huawei.com> +Signed-off-by: Jakub Kicinski <kuba@kernel.org> +--- + drivers/net/dsa/qca8k.c | 130 +++++++++++++++++++--------------------- + 1 file changed, 60 insertions(+), 70 deletions(-) + +--- a/drivers/net/dsa/qca8k.c ++++ b/drivers/net/dsa/qca8k.c +@@ -89,26 +89,26 @@ qca8k_split_addr(u32 regaddr, u16 *r1, u + *page = regaddr & 0x3ff; + } + +-static u32 +-qca8k_mii_read32(struct mii_bus *bus, int phy_id, u32 regnum) ++static int ++qca8k_mii_read32(struct mii_bus *bus, int phy_id, u32 regnum, u32 *val) + { +- u32 val; + int ret; + + ret = bus->read(bus, phy_id, regnum); + if (ret >= 0) { +- val = ret; ++ *val = ret; + ret = bus->read(bus, phy_id, regnum + 1); +- val |= ret << 16; ++ *val |= ret << 16; + } + + if (ret < 0) { + dev_err_ratelimited(&bus->dev, + "failed to read qca8k 32bit register\n"); ++ *val = 0; + return ret; + } + +- return val; ++ return 0; + } + + static void +@@ -148,26 +148,26 @@ qca8k_set_page(struct mii_bus *bus, u16 + return 0; + } + +-static u32 +-qca8k_read(struct qca8k_priv *priv, u32 reg) ++static int ++qca8k_read(struct qca8k_priv *priv, u32 reg, u32 *val) + { + struct mii_bus *bus = priv->bus; + u16 r1, r2, page; +- u32 val; ++ int ret; + + qca8k_split_addr(reg, &r1, &r2, &page); + + mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED); + +- val = qca8k_set_page(bus, page); +- if (val < 0) ++ ret = qca8k_set_page(bus, page); ++ if (ret < 0) + goto exit; + +- val = qca8k_mii_read32(bus, 0x10 | r2, r1); ++ ret = qca8k_mii_read32(bus, 0x10 | r2, r1, val); + + exit: + mutex_unlock(&bus->mdio_lock); +- return val; ++ return ret; + } + + static int +@@ -208,11 +208,9 @@ qca8k_rmw(struct qca8k_priv *priv, u32 r + if (ret < 0) + goto exit; + +- val = qca8k_mii_read32(bus, 0x10 | r2, r1); +- if (val < 0) { +- ret = val; ++ ret = qca8k_mii_read32(bus, 0x10 | r2, r1, &val); ++ if (ret < 0) + goto exit; +- } + + val &= ~mask; + val |= write_val; +@@ -240,15 +238,8 @@ static int + qca8k_regmap_read(void *ctx, uint32_t reg, uint32_t *val) + { + struct qca8k_priv *priv = (struct qca8k_priv *)ctx; +- int ret; +- +- ret = qca8k_read(priv, reg); +- if (ret < 0) +- return ret; +- +- *val = ret; + +- return 0; ++ return qca8k_read(priv, reg, val); + } + + static int +@@ -296,18 +287,18 @@ static struct regmap_config qca8k_regmap + static int + qca8k_busy_wait(struct qca8k_priv *priv, u32 reg, u32 mask) + { ++ int ret, ret1; + u32 val; +- int ret; + +- ret = read_poll_timeout(qca8k_read, val, !(val & mask), ++ ret = read_poll_timeout(qca8k_read, ret1, !(val & mask), + 0, QCA8K_BUSY_WAIT_TIMEOUT * USEC_PER_MSEC, false, +- priv, reg); ++ priv, reg, &val); + + /* Check if qca8k_read has failed for a different reason + * before returning -ETIMEDOUT + */ +- if (ret < 0 && val < 0) +- return val; ++ if (ret < 0 && ret1 < 0) ++ return ret1; + + return ret; + } +@@ -316,13 +307,13 @@ static int + qca8k_fdb_read(struct qca8k_priv *priv, struct qca8k_fdb *fdb) + { + u32 reg[4], val; +- int i; ++ int i, ret; + + /* load the ARL table into an array */ + for (i = 0; i < 4; i++) { +- val = qca8k_read(priv, QCA8K_REG_ATU_DATA0 + (i * 4)); +- if (val < 0) +- return val; ++ ret = qca8k_read(priv, QCA8K_REG_ATU_DATA0 + (i * 4), &val); ++ if (ret < 0) ++ return ret; + + reg[i] = val; + } +@@ -396,9 +387,9 @@ qca8k_fdb_access(struct qca8k_priv *priv + + /* Check for table full violation when adding an entry */ + if (cmd == QCA8K_FDB_LOAD) { +- reg = qca8k_read(priv, QCA8K_REG_ATU_FUNC); +- if (reg < 0) +- return reg; ++ ret = qca8k_read(priv, QCA8K_REG_ATU_FUNC, ®); ++ if (ret < 0) ++ return ret; + if (reg & QCA8K_ATU_FUNC_FULL) + return -1; + } +@@ -477,9 +468,9 @@ qca8k_vlan_access(struct qca8k_priv *pri + + /* Check for table full violation when adding an entry */ + if (cmd == QCA8K_VLAN_LOAD) { +- reg = qca8k_read(priv, QCA8K_REG_VTU_FUNC1); +- if (reg < 0) +- return reg; ++ ret = qca8k_read(priv, QCA8K_REG_VTU_FUNC1, ®); ++ if (ret < 0) ++ return ret; + if (reg & QCA8K_VTU_FUNC1_FULL) + return -ENOMEM; + } +@@ -505,11 +496,9 @@ qca8k_vlan_add(struct qca8k_priv *priv, + if (ret < 0) + goto out; + +- reg = qca8k_read(priv, QCA8K_REG_VTU_FUNC0); +- if (reg < 0) { +- ret = reg; ++ ret = qca8k_read(priv, QCA8K_REG_VTU_FUNC0, ®); ++ if (ret < 0) + goto out; +- } + reg |= QCA8K_VTU_FUNC0_VALID | QCA8K_VTU_FUNC0_IVL_EN; + reg &= ~(QCA8K_VTU_FUNC0_EG_MODE_MASK << QCA8K_VTU_FUNC0_EG_MODE_S(port)); + if (untagged) +@@ -542,11 +531,9 @@ qca8k_vlan_del(struct qca8k_priv *priv, + if (ret < 0) + goto out; + +- reg = qca8k_read(priv, QCA8K_REG_VTU_FUNC0); +- if (reg < 0) { +- ret = reg; ++ ret = qca8k_read(priv, QCA8K_REG_VTU_FUNC0, ®); ++ if (ret < 0) + goto out; +- } + reg &= ~(3 << QCA8K_VTU_FUNC0_EG_MODE_S(port)); + reg |= QCA8K_VTU_FUNC0_EG_MODE_NOT << + QCA8K_VTU_FUNC0_EG_MODE_S(port); +@@ -638,19 +625,19 @@ qca8k_mdio_busy_wait(struct mii_bus *bus + { + u16 r1, r2, page; + u32 val; +- int ret; ++ int ret, ret1; + + qca8k_split_addr(reg, &r1, &r2, &page); + +- ret = read_poll_timeout(qca8k_mii_read32, val, !(val & mask), 0, ++ ret = read_poll_timeout(qca8k_mii_read32, ret1, !(val & mask), 0, + QCA8K_BUSY_WAIT_TIMEOUT * USEC_PER_MSEC, false, +- bus, 0x10 | r2, r1); ++ bus, 0x10 | r2, r1, &val); + + /* Check if qca8k_read has failed for a different reason + * before returnting -ETIMEDOUT + */ +- if (ret < 0 && val < 0) +- return val; ++ if (ret < 0 && ret1 < 0) ++ return ret1; + + return ret; + } +@@ -725,7 +712,7 @@ qca8k_mdio_read(struct mii_bus *salve_bu + if (ret) + goto exit; + +- val = qca8k_mii_read32(bus, 0x10 | r2, r1); ++ ret = qca8k_mii_read32(bus, 0x10 | r2, r1, &val); + + exit: + /* even if the busy_wait timeouts try to clear the MASTER_EN */ +@@ -733,10 +720,10 @@ exit: + + mutex_unlock(&bus->mdio_lock); + +- if (val >= 0) +- val &= QCA8K_MDIO_MASTER_DATA_MASK; ++ if (ret >= 0) ++ ret = val & QCA8K_MDIO_MASTER_DATA_MASK; + +- return val; ++ return ret; + } + + static int +@@ -1211,7 +1198,7 @@ qca8k_phylink_mac_config(struct dsa_swit + qca8k_write(priv, reg, QCA8K_PORT_PAD_SGMII_EN); + + /* Enable/disable SerDes auto-negotiation as necessary */ +- val = qca8k_read(priv, QCA8K_REG_PWS); ++ qca8k_read(priv, QCA8K_REG_PWS, &val); + if (phylink_autoneg_inband(mode)) + val &= ~QCA8K_PWS_SERDES_AEN_DIS; + else +@@ -1219,7 +1206,7 @@ qca8k_phylink_mac_config(struct dsa_swit + qca8k_write(priv, QCA8K_REG_PWS, val); + + /* Configure the SGMII parameters */ +- val = qca8k_read(priv, QCA8K_REG_SGMII_CTRL); ++ qca8k_read(priv, QCA8K_REG_SGMII_CTRL, &val); + + val |= QCA8K_SGMII_EN_PLL | QCA8K_SGMII_EN_RX | + QCA8K_SGMII_EN_TX | QCA8K_SGMII_EN_SD; +@@ -1314,10 +1301,11 @@ qca8k_phylink_mac_link_state(struct dsa_ + { + struct qca8k_priv *priv = ds->priv; + u32 reg; ++ int ret; + +- reg = qca8k_read(priv, QCA8K_REG_PORT_STATUS(port)); +- if (reg < 0) +- return reg; ++ ret = qca8k_read(priv, QCA8K_REG_PORT_STATUS(port), ®); ++ if (ret < 0) ++ return ret; + + state->link = !!(reg & QCA8K_PORT_STATUS_LINK_UP); + state->an_complete = state->link; +@@ -1419,19 +1407,20 @@ qca8k_get_ethtool_stats(struct dsa_switc + struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv; + const struct qca8k_mib_desc *mib; + u32 reg, i, val; +- u64 hi; ++ u64 hi = 0; ++ int ret; + + for (i = 0; i < ARRAY_SIZE(ar8327_mib); i++) { + mib = &ar8327_mib[i]; + reg = QCA8K_PORT_MIB_COUNTER(port) + mib->offset; + +- val = qca8k_read(priv, reg); +- if (val < 0) ++ ret = qca8k_read(priv, reg, &val); ++ if (ret < 0) + continue; + + if (mib->size == 2) { +- hi = qca8k_read(priv, reg + 4); +- if (hi < 0) ++ ret = qca8k_read(priv, reg + 4, (u32 *)&hi); ++ if (ret < 0) + continue; + } + +@@ -1459,7 +1448,7 @@ qca8k_set_mac_eee(struct dsa_switch *ds, + int ret; + + mutex_lock(&priv->reg_mutex); +- reg = qca8k_read(priv, QCA8K_REG_EEE_CTRL); ++ ret = qca8k_read(priv, QCA8K_REG_EEE_CTRL, ®); + if (reg < 0) { + ret = reg; + goto exit; +@@ -1802,14 +1791,15 @@ static int qca8k_read_switch_id(struct q + const struct qca8k_match_data *data; + u32 val; + u8 id; ++ int ret; + + /* get the switches ID from the compatible */ + data = of_device_get_match_data(priv->dev); + if (!data) + return -ENODEV; + +- val = qca8k_read(priv, QCA8K_REG_MASK_CTRL); +- if (val < 0) ++ ret = qca8k_read(priv, QCA8K_REG_MASK_CTRL, &val); ++ if (ret < 0) + return -ENODEV; + + id = QCA8K_MASK_CTRL_DEVICE_ID(val & QCA8K_MASK_CTRL_DEVICE_ID_MASK); diff --git a/target/linux/generic/backport-5.15/737-v5.14-02-net-dsa-qca8k-add-missing-check-return-value-in-qca8.patch b/target/linux/generic/backport-5.15/737-v5.14-02-net-dsa-qca8k-add-missing-check-return-value-in-qca8.patch new file mode 100644 index 0000000000..d20da5b85e --- /dev/null +++ b/target/linux/generic/backport-5.15/737-v5.14-02-net-dsa-qca8k-add-missing-check-return-value-in-qca8.patch @@ -0,0 +1,47 @@ +From 9fe99de01440d9ede74d447ac76e9c445d8daae9 Mon Sep 17 00:00:00 2001 +From: Yang Yingliang <yangyingliang@huawei.com> +Date: Sat, 29 May 2021 11:04:39 +0800 +Subject: [PATCH] net: dsa: qca8k: add missing check return value in + qca8k_phylink_mac_config() + +Now we can check qca8k_read() return value correctly, so if +it fails, we need return directly. + +Signed-off-by: Yang Yingliang <yangyingliang@huawei.com> +Signed-off-by: Jakub Kicinski <kuba@kernel.org> +--- + drivers/net/dsa/qca8k.c | 9 +++++++-- + 1 file changed, 7 insertions(+), 2 deletions(-) + +--- a/drivers/net/dsa/qca8k.c ++++ b/drivers/net/dsa/qca8k.c +@@ -1128,6 +1128,7 @@ qca8k_phylink_mac_config(struct dsa_swit + { + struct qca8k_priv *priv = ds->priv; + u32 reg, val; ++ int ret; + + switch (port) { + case 0: /* 1st CPU port */ +@@ -1198,7 +1199,9 @@ qca8k_phylink_mac_config(struct dsa_swit + qca8k_write(priv, reg, QCA8K_PORT_PAD_SGMII_EN); + + /* Enable/disable SerDes auto-negotiation as necessary */ +- qca8k_read(priv, QCA8K_REG_PWS, &val); ++ ret = qca8k_read(priv, QCA8K_REG_PWS, &val); ++ if (ret) ++ return; + if (phylink_autoneg_inband(mode)) + val &= ~QCA8K_PWS_SERDES_AEN_DIS; + else +@@ -1206,7 +1209,9 @@ qca8k_phylink_mac_config(struct dsa_swit + qca8k_write(priv, QCA8K_REG_PWS, val); + + /* Configure the SGMII parameters */ +- qca8k_read(priv, QCA8K_REG_SGMII_CTRL, &val); ++ ret = qca8k_read(priv, QCA8K_REG_SGMII_CTRL, &val); ++ if (ret) ++ return; + + val |= QCA8K_SGMII_EN_PLL | QCA8K_SGMII_EN_RX | + QCA8K_SGMII_EN_TX | QCA8K_SGMII_EN_SD; diff --git a/target/linux/generic/backport-5.15/738-v5.14-01-net-dsa-qca8k-fix-an-endian-bug-in-qca8k-get-ethtool.patch b/target/linux/generic/backport-5.15/738-v5.14-01-net-dsa-qca8k-fix-an-endian-bug-in-qca8k-get-ethtool.patch new file mode 100644 index 0000000000..aed97d0549 --- /dev/null +++ b/target/linux/generic/backport-5.15/738-v5.14-01-net-dsa-qca8k-fix-an-endian-bug-in-qca8k-get-ethtool.patch @@ -0,0 +1,47 @@ +From aa3d020b22cb844ab7bdbb9e5d861a64666e2b74 Mon Sep 17 00:00:00 2001 +From: Dan Carpenter <dan.carpenter@oracle.com> +Date: Wed, 9 Jun 2021 12:52:12 +0300 +Subject: [PATCH] net: dsa: qca8k: fix an endian bug in + qca8k_get_ethtool_stats() + +The "hi" variable is a u64 but the qca8k_read() writes to the top 32 +bits of it. That will work on little endian systems but it's a bit +subtle. It's cleaner to make declare "hi" as a u32. We will still need +to cast it when we shift it later on in the function but that's fine. + +Fixes: 7c9896e37807 ("net: dsa: qca8k: check return value of read functions correctly") +Signed-off-by: Dan Carpenter <dan.carpenter@oracle.com> +Reviewed-by: Vladimir Oltean <olteanv@gmail.com> +Signed-off-by: David S. Miller <davem@davemloft.net> +--- + drivers/net/dsa/qca8k.c | 6 +++--- + 1 file changed, 3 insertions(+), 3 deletions(-) + +--- a/drivers/net/dsa/qca8k.c ++++ b/drivers/net/dsa/qca8k.c +@@ -1412,7 +1412,7 @@ qca8k_get_ethtool_stats(struct dsa_switc + struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv; + const struct qca8k_mib_desc *mib; + u32 reg, i, val; +- u64 hi = 0; ++ u32 hi = 0; + int ret; + + for (i = 0; i < ARRAY_SIZE(ar8327_mib); i++) { +@@ -1424,14 +1424,14 @@ qca8k_get_ethtool_stats(struct dsa_switc + continue; + + if (mib->size == 2) { +- ret = qca8k_read(priv, reg + 4, (u32 *)&hi); ++ ret = qca8k_read(priv, reg + 4, &hi); + if (ret < 0) + continue; + } + + data[i] = val; + if (mib->size == 2) +- data[i] |= hi << 32; ++ data[i] |= (u64)hi << 32; + } + } + diff --git a/target/linux/generic/backport-5.15/738-v5.14-02-net-dsa-qca8k-check-the-correct-variable-in-qca8k-se.patch b/target/linux/generic/backport-5.15/738-v5.14-02-net-dsa-qca8k-check-the-correct-variable-in-qca8k-se.patch new file mode 100644 index 0000000000..c58f79cd8b --- /dev/null +++ b/target/linux/generic/backport-5.15/738-v5.14-02-net-dsa-qca8k-check-the-correct-variable-in-qca8k-se.patch @@ -0,0 +1,31 @@ +From 3d0167f2a627528032821cdeb78b4eab0510460f Mon Sep 17 00:00:00 2001 +From: Dan Carpenter <dan.carpenter@oracle.com> +Date: Wed, 9 Jun 2021 12:53:03 +0300 +Subject: [PATCH] net: dsa: qca8k: check the correct variable in + qca8k_set_mac_eee() + +This code check "reg" but "ret" was intended so the error handling will +never trigger. + +Fixes: 7c9896e37807 ("net: dsa: qca8k: check return value of read functions correctly") +Signed-off-by: Dan Carpenter <dan.carpenter@oracle.com> +Reviewed-by: Vladimir Oltean <olteanv@gmail.com> +Signed-off-by: David S. Miller <davem@davemloft.net> +--- + drivers/net/dsa/qca8k.c | 4 +--- + 1 file changed, 1 insertion(+), 3 deletions(-) + +--- a/drivers/net/dsa/qca8k.c ++++ b/drivers/net/dsa/qca8k.c +@@ -1454,10 +1454,8 @@ qca8k_set_mac_eee(struct dsa_switch *ds, + + mutex_lock(&priv->reg_mutex); + ret = qca8k_read(priv, QCA8K_REG_EEE_CTRL, ®); +- if (reg < 0) { +- ret = reg; ++ if (ret < 0) + goto exit; +- } + + if (eee->eee_enabled) + reg |= lpi_en; diff --git a/target/linux/generic/backport-5.15/739-v5.15-net-dsa-qca8k-fix-kernel-panic-with-legacy-mdio-mapping.patch b/target/linux/generic/backport-5.15/739-v5.15-net-dsa-qca8k-fix-kernel-panic-with-legacy-mdio-mapping.patch new file mode 100644 index 0000000000..1e293d3a0b --- /dev/null +++ b/target/linux/generic/backport-5.15/739-v5.15-net-dsa-qca8k-fix-kernel-panic-with-legacy-mdio-mapping.patch @@ -0,0 +1,80 @@ +From ce062a0adbfe933b1932235fdfd874c4c91d1bb0 Mon Sep 17 00:00:00 2001 +From: Ansuel Smith <ansuelsmth@gmail.com> +Date: Sat, 11 Sep 2021 17:50:09 +0200 +Subject: net: dsa: qca8k: fix kernel panic with legacy mdio mapping + +When the mdio legacy mapping is used the mii_bus priv registered by DSA +refer to the dsa switch struct instead of the qca8k_priv struct and +causes a kernel panic. Create dedicated function when the internal +dedicated mdio driver is used to properly handle the 2 different +implementation. + +Fixes: 759bafb8a322 ("net: dsa: qca8k: add support for internal phy and internal mdio") +Signed-off-by: Ansuel Smith <ansuelsmth@gmail.com> +Signed-off-by: David S. Miller <davem@davemloft.net> +--- + drivers/net/dsa/qca8k.c | 30 ++++++++++++++++++++++-------- + 1 file changed, 22 insertions(+), 8 deletions(-) + +--- a/drivers/net/dsa/qca8k.c ++++ b/drivers/net/dsa/qca8k.c +@@ -643,10 +643,8 @@ qca8k_mdio_busy_wait(struct mii_bus *bus + } + + static int +-qca8k_mdio_write(struct mii_bus *salve_bus, int phy, int regnum, u16 data) ++qca8k_mdio_write(struct mii_bus *bus, int phy, int regnum, u16 data) + { +- struct qca8k_priv *priv = salve_bus->priv; +- struct mii_bus *bus = priv->bus; + u16 r1, r2, page; + u32 val; + int ret; +@@ -682,10 +680,8 @@ exit: + } + + static int +-qca8k_mdio_read(struct mii_bus *salve_bus, int phy, int regnum) ++qca8k_mdio_read(struct mii_bus *bus, int phy, int regnum) + { +- struct qca8k_priv *priv = salve_bus->priv; +- struct mii_bus *bus = priv->bus; + u16 r1, r2, page; + u32 val; + int ret; +@@ -727,6 +723,24 @@ exit: + } + + static int ++qca8k_internal_mdio_write(struct mii_bus *slave_bus, int phy, int regnum, u16 data) ++{ ++ struct qca8k_priv *priv = slave_bus->priv; ++ struct mii_bus *bus = priv->bus; ++ ++ return qca8k_mdio_write(bus, phy, regnum, data); ++} ++ ++static int ++qca8k_internal_mdio_read(struct mii_bus *slave_bus, int phy, int regnum) ++{ ++ struct qca8k_priv *priv = slave_bus->priv; ++ struct mii_bus *bus = priv->bus; ++ ++ return qca8k_mdio_read(bus, phy, regnum); ++} ++ ++static int + qca8k_phy_write(struct dsa_switch *ds, int port, int regnum, u16 data) + { + struct qca8k_priv *priv = ds->priv; +@@ -775,8 +789,8 @@ qca8k_mdio_register(struct qca8k_priv *p + + bus->priv = (void *)priv; + bus->name = "qca8k slave mii"; +- bus->read = qca8k_mdio_read; +- bus->write = qca8k_mdio_write; ++ bus->read = qca8k_internal_mdio_read; ++ bus->write = qca8k_internal_mdio_write; + snprintf(bus->id, MII_BUS_ID_SIZE, "qca8k-%d", + ds->index); + diff --git a/target/linux/generic/backport-5.15/740-v5.13-0001-net-dsa-b53-Add-debug-prints-in-b53_vlan_enable.patch b/target/linux/generic/backport-5.15/740-v5.13-0001-net-dsa-b53-Add-debug-prints-in-b53_vlan_enable.patch new file mode 100644 index 0000000000..91cf55b18a --- /dev/null +++ b/target/linux/generic/backport-5.15/740-v5.13-0001-net-dsa-b53-Add-debug-prints-in-b53_vlan_enable.patch @@ -0,0 +1,65 @@ +From ee47ed08d75e8f16b3cf882061ee19c2ea19dd6c Mon Sep 17 00:00:00 2001 +From: Florian Fainelli <f.fainelli@gmail.com> +Date: Wed, 10 Mar 2021 10:52:26 -0800 +Subject: [PATCH] net: dsa: b53: Add debug prints in b53_vlan_enable() + +Having dynamic debug prints in b53_vlan_enable() has been helpful to +uncover a recent but update the function to indicate the port being +configured (or -1 for initial setup) and include the global VLAN enabled +and VLAN filtering enable status. + +Signed-off-by: Florian Fainelli <f.fainelli@gmail.com> +Reviewed-by: Andrew Lunn <andrew@lunn.ch> +Signed-off-by: David S. Miller <davem@davemloft.net> +--- + drivers/net/dsa/b53/b53_common.c | 11 +++++++---- + 1 file changed, 7 insertions(+), 4 deletions(-) + +--- a/drivers/net/dsa/b53/b53_common.c ++++ b/drivers/net/dsa/b53/b53_common.c +@@ -349,7 +349,7 @@ static void b53_set_forwarding(struct b5 + b53_write8(dev, B53_CTRL_PAGE, B53_IP_MULTICAST_CTRL, mgmt); + } + +-static void b53_enable_vlan(struct b53_device *dev, bool enable, ++static void b53_enable_vlan(struct b53_device *dev, int port, bool enable, + bool enable_filtering) + { + u8 mgmt, vc0, vc1, vc4 = 0, vc5; +@@ -431,6 +431,9 @@ static void b53_enable_vlan(struct b53_d + b53_write8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, mgmt); + + dev->vlan_enabled = enable; ++ ++ dev_dbg(dev->dev, "Port %d VLAN enabled: %d, filtering: %d\n", ++ port, enable, enable_filtering); + } + + static int b53_set_jumbo(struct b53_device *dev, bool enable, bool allow_10_100) +@@ -708,7 +711,7 @@ int b53_configure_vlan(struct dsa_switch + b53_do_vlan_op(dev, VTA_CMD_CLEAR); + } + +- b53_enable_vlan(dev, dev->vlan_enabled, ds->vlan_filtering); ++ b53_enable_vlan(dev, -1, dev->vlan_enabled, ds->vlan_filtering); + + b53_for_each_port(dev, i) + b53_write16(dev, B53_VLAN_PAGE, +@@ -1390,7 +1393,7 @@ int b53_vlan_filtering(struct dsa_switch + if (switchdev_trans_ph_prepare(trans)) + return 0; + +- b53_enable_vlan(dev, dev->vlan_enabled, vlan_filtering); ++ b53_enable_vlan(dev, port, dev->vlan_enabled, vlan_filtering); + + return 0; + } +@@ -1415,7 +1418,7 @@ int b53_vlan_prepare(struct dsa_switch * + if (vlan->vid_end >= dev->num_vlans) + return -ERANGE; + +- b53_enable_vlan(dev, true, ds->vlan_filtering); ++ b53_enable_vlan(dev, port, true, ds->vlan_filtering); + + return 0; + } diff --git a/target/linux/generic/backport-5.15/740-v5.13-0002-net-dsa-b53-spi-allow-device-tree-probing.patch b/target/linux/generic/backport-5.15/740-v5.13-0002-net-dsa-b53-spi-allow-device-tree-probing.patch new file mode 100644 index 0000000000..56579b2d36 --- /dev/null +++ b/target/linux/generic/backport-5.15/740-v5.13-0002-net-dsa-b53-spi-allow-device-tree-probing.patch @@ -0,0 +1,41 @@ +From 6d16eadab6db0c1d61e59fee7ed1ecc2d10269be Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?=C3=81lvaro=20Fern=C3=A1ndez=20Rojas?= <noltari@gmail.com> +Date: Mon, 15 Mar 2021 15:14:23 +0100 +Subject: [PATCH] net: dsa: b53: spi: allow device tree probing +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +Add missing of_match_table to allow device tree probing. + +Signed-off-by: Álvaro Fernández Rojas <noltari@gmail.com> +Signed-off-by: David S. Miller <davem@davemloft.net> +--- + drivers/net/dsa/b53/b53_spi.c | 13 +++++++++++++ + 1 file changed, 13 insertions(+) + +--- a/drivers/net/dsa/b53/b53_spi.c ++++ b/drivers/net/dsa/b53/b53_spi.c +@@ -324,9 +324,22 @@ static int b53_spi_remove(struct spi_dev + return 0; + } + ++static const struct of_device_id b53_spi_of_match[] = { ++ { .compatible = "brcm,bcm5325" }, ++ { .compatible = "brcm,bcm5365" }, ++ { .compatible = "brcm,bcm5395" }, ++ { .compatible = "brcm,bcm5397" }, ++ { .compatible = "brcm,bcm5398" }, ++ { .compatible = "brcm,bcm53115" }, ++ { .compatible = "brcm,bcm53125" }, ++ { .compatible = "brcm,bcm53128" }, ++ { /* sentinel */ } ++}; ++ + static struct spi_driver b53_spi_driver = { + .driver = { + .name = "b53-switch", ++ .of_match_table = b53_spi_of_match, + }, + .probe = b53_spi_probe, + .remove = b53_spi_remove, diff --git a/target/linux/generic/backport-5.15/740-v5.13-0003-net-dsa-b53-relax-is63xx-condition.patch b/target/linux/generic/backport-5.15/740-v5.13-0003-net-dsa-b53-relax-is63xx-condition.patch new file mode 100644 index 0000000000..99eced1b6a --- /dev/null +++ b/target/linux/generic/backport-5.15/740-v5.13-0003-net-dsa-b53-relax-is63xx-condition.patch @@ -0,0 +1,31 @@ +From ad426d7d966b525b73ed5a1842dd830312bbba71 Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?=C3=81lvaro=20Fern=C3=A1ndez=20Rojas?= <noltari@gmail.com> +Date: Wed, 17 Mar 2021 09:42:01 +0100 +Subject: [PATCH] net: dsa: b53: relax is63xx() condition +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +BCM63xx switches are present on bcm63xx and bmips devices. + +Signed-off-by: Álvaro Fernández Rojas <noltari@gmail.com> +Acked-by: Florian Fainelli <f.fainelli@gmail.com> +Signed-off-by: David S. Miller <davem@davemloft.net> +--- + drivers/net/dsa/b53/b53_priv.h | 4 ---- + 1 file changed, 4 deletions(-) + +--- a/drivers/net/dsa/b53/b53_priv.h ++++ b/drivers/net/dsa/b53/b53_priv.h +@@ -186,11 +186,7 @@ static inline int is531x5(struct b53_dev + + static inline int is63xx(struct b53_device *dev) + { +-#ifdef CONFIG_BCM63XX + return dev->chip_id == BCM63XX_DEVICE_ID; +-#else +- return 0; +-#endif + } + + static inline int is5301x(struct b53_device *dev) diff --git a/target/linux/generic/backport-5.15/740-v5.13-0004-net-dsa-tag_brcm-add-support-for-legacy-tags.patch b/target/linux/generic/backport-5.15/740-v5.13-0004-net-dsa-tag_brcm-add-support-for-legacy-tags.patch new file mode 100644 index 0000000000..3b7d8f37cd --- /dev/null +++ b/target/linux/generic/backport-5.15/740-v5.13-0004-net-dsa-tag_brcm-add-support-for-legacy-tags.patch @@ -0,0 +1,180 @@ +From 964dbf186eaa84d409c359ddf09c827a3fbe8228 Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?=C3=81lvaro=20Fern=C3=A1ndez=20Rojas?= <noltari@gmail.com> +Date: Wed, 17 Mar 2021 11:29:26 +0100 +Subject: [PATCH] net: dsa: tag_brcm: add support for legacy tags +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +Add support for legacy Broadcom tags, which are similar to DSA_TAG_PROTO_BRCM. +These tags are used on BCM5325, BCM5365 and BCM63xx switches. + +Signed-off-by: Álvaro Fernández Rojas <noltari@gmail.com> +Signed-off-by: David S. Miller <davem@davemloft.net> +--- + include/net/dsa.h | 2 + + net/dsa/Kconfig | 7 +++ + net/dsa/tag_brcm.c | 107 +++++++++++++++++++++++++++++++++++++++++++-- + 3 files changed, 113 insertions(+), 3 deletions(-) + +--- a/include/net/dsa.h ++++ b/include/net/dsa.h +@@ -45,10 +45,12 @@ struct phylink_link_state; + #define DSA_TAG_PROTO_OCELOT_VALUE 15 + #define DSA_TAG_PROTO_AR9331_VALUE 16 + #define DSA_TAG_PROTO_RTL4_A_VALUE 17 ++#define DSA_TAG_PROTO_BRCM_LEGACY_VALUE 22 + + enum dsa_tag_protocol { + DSA_TAG_PROTO_NONE = DSA_TAG_PROTO_NONE_VALUE, + DSA_TAG_PROTO_BRCM = DSA_TAG_PROTO_BRCM_VALUE, ++ DSA_TAG_PROTO_BRCM_LEGACY = DSA_TAG_PROTO_BRCM_LEGACY_VALUE, + DSA_TAG_PROTO_BRCM_PREPEND = DSA_TAG_PROTO_BRCM_PREPEND_VALUE, + DSA_TAG_PROTO_DSA = DSA_TAG_PROTO_DSA_VALUE, + DSA_TAG_PROTO_EDSA = DSA_TAG_PROTO_EDSA_VALUE, +--- a/net/dsa/Kconfig ++++ b/net/dsa/Kconfig +@@ -47,6 +47,13 @@ config NET_DSA_TAG_BRCM + Say Y if you want to enable support for tagging frames for the + Broadcom switches which place the tag after the MAC source address. + ++config NET_DSA_TAG_BRCM_LEGACY ++ tristate "Tag driver for Broadcom legacy switches using in-frame headers" ++ select NET_DSA_TAG_BRCM_COMMON ++ help ++ Say Y if you want to enable support for tagging frames for the ++ Broadcom legacy switches which place the tag after the MAC source ++ address. + + config NET_DSA_TAG_BRCM_PREPEND + tristate "Tag driver for Broadcom switches using prepended headers" +--- a/net/dsa/tag_brcm.c ++++ b/net/dsa/tag_brcm.c +@@ -11,9 +11,26 @@ + + #include "dsa_priv.h" + +-/* This tag length is 4 bytes, older ones were 6 bytes, we do not +- * handle them +- */ ++/* Legacy Broadcom tag (6 bytes) */ ++#define BRCM_LEG_TAG_LEN 6 ++ ++/* Type fields */ ++/* 1st byte in the tag */ ++#define BRCM_LEG_TYPE_HI 0x88 ++/* 2nd byte in the tag */ ++#define BRCM_LEG_TYPE_LO 0x74 ++ ++/* Tag fields */ ++/* 3rd byte in the tag */ ++#define BRCM_LEG_UNICAST (0 << 5) ++#define BRCM_LEG_MULTICAST (1 << 5) ++#define BRCM_LEG_EGRESS (2 << 5) ++#define BRCM_LEG_INGRESS (3 << 5) ++ ++/* 6th byte in the tag */ ++#define BRCM_LEG_PORT_ID (0xf) ++ ++/* Newer Broadcom tag (4 bytes) */ + #define BRCM_TAG_LEN 4 + + /* Tag is constructed and desconstructed using byte by byte access +@@ -194,6 +211,87 @@ DSA_TAG_DRIVER(brcm_netdev_ops); + MODULE_ALIAS_DSA_TAG_DRIVER(DSA_TAG_PROTO_BRCM); + #endif + ++#if IS_ENABLED(CONFIG_NET_DSA_TAG_BRCM_LEGACY) ++static struct sk_buff *brcm_leg_tag_xmit(struct sk_buff *skb, ++ struct net_device *dev) ++{ ++ struct dsa_port *dp = dsa_slave_to_port(dev); ++ u8 *brcm_tag; ++ ++ /* The Ethernet switch we are interfaced with needs packets to be at ++ * least 64 bytes (including FCS) otherwise they will be discarded when ++ * they enter the switch port logic. When Broadcom tags are enabled, we ++ * need to make sure that packets are at least 70 bytes ++ * (including FCS and tag) because the length verification is done after ++ * the Broadcom tag is stripped off the ingress packet. ++ * ++ * Let dsa_slave_xmit() free the SKB ++ */ ++ if (__skb_put_padto(skb, ETH_ZLEN + BRCM_LEG_TAG_LEN, false)) ++ return NULL; ++ ++ skb_push(skb, BRCM_LEG_TAG_LEN); ++ ++ memmove(skb->data, skb->data + BRCM_LEG_TAG_LEN, 2 * ETH_ALEN); ++ ++ brcm_tag = skb->data + 2 * ETH_ALEN; ++ ++ /* Broadcom tag type */ ++ brcm_tag[0] = BRCM_LEG_TYPE_HI; ++ brcm_tag[1] = BRCM_LEG_TYPE_LO; ++ ++ /* Broadcom tag value */ ++ brcm_tag[2] = BRCM_LEG_EGRESS; ++ brcm_tag[3] = 0; ++ brcm_tag[4] = 0; ++ brcm_tag[5] = dp->index & BRCM_LEG_PORT_ID; ++ ++ return skb; ++} ++ ++static struct sk_buff *brcm_leg_tag_rcv(struct sk_buff *skb, ++ struct net_device *dev, ++ struct packet_type *pt) ++{ ++ int source_port; ++ u8 *brcm_tag; ++ ++ if (unlikely(!pskb_may_pull(skb, BRCM_LEG_PORT_ID))) ++ return NULL; ++ ++ brcm_tag = skb->data - 2; ++ ++ source_port = brcm_tag[5] & BRCM_LEG_PORT_ID; ++ ++ skb->dev = dsa_master_find_slave(dev, 0, source_port); ++ if (!skb->dev) ++ return NULL; ++ ++ /* Remove Broadcom tag and update checksum */ ++ skb_pull_rcsum(skb, BRCM_LEG_TAG_LEN); ++ ++ skb->offload_fwd_mark = 1; ++ ++ /* Move the Ethernet DA and SA */ ++ memmove(skb->data - ETH_HLEN, ++ skb->data - ETH_HLEN - BRCM_LEG_TAG_LEN, ++ 2 * ETH_ALEN); ++ ++ return skb; ++} ++ ++static const struct dsa_device_ops brcm_legacy_netdev_ops = { ++ .name = "brcm-legacy", ++ .proto = DSA_TAG_PROTO_BRCM_LEGACY, ++ .xmit = brcm_leg_tag_xmit, ++ .rcv = brcm_leg_tag_rcv, ++ .overhead = BRCM_LEG_TAG_LEN, ++}; ++ ++DSA_TAG_DRIVER(brcm_legacy_netdev_ops); ++MODULE_ALIAS_DSA_TAG_DRIVER(DSA_TAG_PROTO_BRCM_LEGACY); ++#endif /* CONFIG_NET_DSA_TAG_BRCM_LEGACY */ ++ + #if IS_ENABLED(CONFIG_NET_DSA_TAG_BRCM_PREPEND) + static struct sk_buff *brcm_tag_xmit_prepend(struct sk_buff *skb, + struct net_device *dev) +@@ -226,6 +324,9 @@ static struct dsa_tag_driver *dsa_tag_dr + #if IS_ENABLED(CONFIG_NET_DSA_TAG_BRCM) + &DSA_TAG_DRIVER_NAME(brcm_netdev_ops), + #endif ++#if IS_ENABLED(CONFIG_NET_DSA_TAG_BRCM_LEGACY) ++ &DSA_TAG_DRIVER_NAME(brcm_legacy_netdev_ops), ++#endif + #if IS_ENABLED(CONFIG_NET_DSA_TAG_BRCM_PREPEND) + &DSA_TAG_DRIVER_NAME(brcm_prepend_netdev_ops), + #endif diff --git a/target/linux/generic/backport-5.15/740-v5.13-0005-net-dsa-b53-support-legacy-tags.patch b/target/linux/generic/backport-5.15/740-v5.13-0005-net-dsa-b53-support-legacy-tags.patch new file mode 100644 index 0000000000..838e78a057 --- /dev/null +++ b/target/linux/generic/backport-5.15/740-v5.13-0005-net-dsa-b53-support-legacy-tags.patch @@ -0,0 +1,53 @@ +From 46c5176c586c81306bf9e7024c13b95da775490f Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?=C3=81lvaro=20Fern=C3=A1ndez=20Rojas?= <noltari@gmail.com> +Date: Wed, 17 Mar 2021 11:29:27 +0100 +Subject: [PATCH] net: dsa: b53: support legacy tags +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +These tags are used on BCM5325, BCM5365 and BCM63xx switches. + +Signed-off-by: Álvaro Fernández Rojas <noltari@gmail.com> +Acked-by: Florian Fainelli <f.fainelli@gmail.com> +Signed-off-by: David S. Miller <davem@davemloft.net> +--- + drivers/net/dsa/b53/Kconfig | 1 + + drivers/net/dsa/b53/b53_common.c | 12 +++++++----- + 2 files changed, 8 insertions(+), 5 deletions(-) + +--- a/drivers/net/dsa/b53/Kconfig ++++ b/drivers/net/dsa/b53/Kconfig +@@ -3,6 +3,7 @@ menuconfig B53 + tristate "Broadcom BCM53xx managed switch support" + depends on NET_DSA + select NET_DSA_TAG_BRCM ++ select NET_DSA_TAG_BRCM_LEGACY + select NET_DSA_TAG_BRCM_PREPEND + help + This driver adds support for Broadcom managed switch chips. It supports +--- a/drivers/net/dsa/b53/b53_common.c ++++ b/drivers/net/dsa/b53/b53_common.c +@@ -2024,15 +2024,17 @@ enum dsa_tag_protocol b53_get_tag_protoc + { + struct b53_device *dev = ds->priv; + +- /* Older models (5325, 5365) support a different tag format that we do +- * not support in net/dsa/tag_brcm.c yet. +- */ +- if (is5325(dev) || is5365(dev) || +- !b53_can_enable_brcm_tags(ds, port, mprot)) { ++ if (!b53_can_enable_brcm_tags(ds, port, mprot)) { + dev->tag_protocol = DSA_TAG_PROTO_NONE; + goto out; + } + ++ /* Older models require a different 6 byte tag */ ++ if (is5325(dev) || is5365(dev) || is63xx(dev)) { ++ dev->tag_protocol = DSA_TAG_PROTO_BRCM_LEGACY; ++ goto out; ++ } ++ + /* Broadcom BCM58xx chips have a flow accelerator on Port 8 + * which requires us to use the prepended Broadcom tag type + */ diff --git a/target/linux/generic/backport-5.15/740-v5.13-0006-net-dsa-b53-mmap-Add-device-tree-support.patch b/target/linux/generic/backport-5.15/740-v5.13-0006-net-dsa-b53-mmap-Add-device-tree-support.patch new file mode 100644 index 0000000000..c1014b20bc --- /dev/null +++ b/target/linux/generic/backport-5.15/740-v5.13-0006-net-dsa-b53-mmap-Add-device-tree-support.patch @@ -0,0 +1,92 @@ +From a5538a777b73b35750ed1ffff8c1ef539e861624 Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?=C3=81lvaro=20Fern=C3=A1ndez=20Rojas?= <noltari@gmail.com> +Date: Wed, 17 Mar 2021 10:23:17 +0100 +Subject: [PATCH] net: dsa: b53: mmap: Add device tree support +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +Add device tree support to b53_mmap.c while keeping platform devices support. + +Signed-off-by: Álvaro Fernández Rojas <noltari@gmail.com> +Signed-off-by: David S. Miller <davem@davemloft.net> +--- + drivers/net/dsa/b53/b53_mmap.c | 55 ++++++++++++++++++++++++++++++++++ + 1 file changed, 55 insertions(+) + +--- a/drivers/net/dsa/b53/b53_mmap.c ++++ b/drivers/net/dsa/b53/b53_mmap.c +@@ -16,6 +16,7 @@ + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + ++#include <linux/bits.h> + #include <linux/kernel.h> + #include <linux/module.h> + #include <linux/io.h> +@@ -228,11 +229,65 @@ static const struct b53_io_ops b53_mmap_ + .write64 = b53_mmap_write64, + }; + ++static int b53_mmap_probe_of(struct platform_device *pdev, ++ struct b53_platform_data **ppdata) ++{ ++ struct device_node *np = pdev->dev.of_node; ++ struct device_node *of_ports, *of_port; ++ struct device *dev = &pdev->dev; ++ struct b53_platform_data *pdata; ++ void __iomem *mem; ++ ++ mem = devm_platform_ioremap_resource(pdev, 0); ++ if (IS_ERR(mem)) ++ return PTR_ERR(mem); ++ ++ pdata = devm_kzalloc(dev, sizeof(struct b53_platform_data), ++ GFP_KERNEL); ++ if (!pdata) ++ return -ENOMEM; ++ ++ pdata->regs = mem; ++ pdata->chip_id = BCM63XX_DEVICE_ID; ++ pdata->big_endian = of_property_read_bool(np, "big-endian"); ++ ++ of_ports = of_get_child_by_name(np, "ports"); ++ if (!of_ports) { ++ dev_err(dev, "no ports child node found\n"); ++ return -EINVAL; ++ } ++ ++ for_each_available_child_of_node(of_ports, of_port) { ++ u32 reg; ++ ++ if (of_property_read_u32(of_port, "reg", ®)) ++ continue; ++ ++ if (reg < B53_CPU_PORT) ++ pdata->enabled_ports |= BIT(reg); ++ } ++ ++ of_node_put(of_ports); ++ *ppdata = pdata; ++ ++ return 0; ++} ++ + static int b53_mmap_probe(struct platform_device *pdev) + { ++ struct device_node *np = pdev->dev.of_node; + struct b53_platform_data *pdata = pdev->dev.platform_data; + struct b53_mmap_priv *priv; + struct b53_device *dev; ++ int ret; ++ ++ if (!pdata && np) { ++ ret = b53_mmap_probe_of(pdev, &pdata); ++ if (ret) { ++ dev_err(&pdev->dev, "OF probe error\n"); ++ return ret; ++ } ++ } + + if (!pdata) + return -EINVAL; diff --git a/target/linux/generic/backport-5.15/740-v5.13-0007-net-dsa-b53-spi-add-missing-MODULE_DEVICE_TABLE.patch b/target/linux/generic/backport-5.15/740-v5.13-0007-net-dsa-b53-spi-add-missing-MODULE_DEVICE_TABLE.patch new file mode 100644 index 0000000000..ea36755732 --- /dev/null +++ b/target/linux/generic/backport-5.15/740-v5.13-0007-net-dsa-b53-spi-add-missing-MODULE_DEVICE_TABLE.patch @@ -0,0 +1,27 @@ +From 866f1577ba69bde2b9f36c300f603596c7d84a62 Mon Sep 17 00:00:00 2001 +From: Qinglang Miao <miaoqinglang@huawei.com> +Date: Thu, 25 Mar 2021 17:19:54 +0800 +Subject: [PATCH] net: dsa: b53: spi: add missing MODULE_DEVICE_TABLE + +This patch adds missing MODULE_DEVICE_TABLE definition which generates +correct modalias for automatic loading of this driver when it is built +as an external module. + +Reported-by: Hulk Robot <hulkci@huawei.com> +Signed-off-by: Qinglang Miao <miaoqinglang@huawei.com> +Acked-by: Florian Fainelli <f.fainelli@gmail.com> +Signed-off-by: David S. Miller <davem@davemloft.net> +--- + drivers/net/dsa/b53/b53_spi.c | 1 + + 1 file changed, 1 insertion(+) + +--- a/drivers/net/dsa/b53/b53_spi.c ++++ b/drivers/net/dsa/b53/b53_spi.c +@@ -335,6 +335,7 @@ static const struct of_device_id b53_spi + { .compatible = "brcm,bcm53128" }, + { /* sentinel */ } + }; ++MODULE_DEVICE_TABLE(of, b53_spi_of_match); + + static struct spi_driver b53_spi_driver = { + .driver = { diff --git a/target/linux/generic/backport-5.15/741-v5.14-0001-net-dsa-b53-Do-not-force-CPU-to-be-always-tagged.patch b/target/linux/generic/backport-5.15/741-v5.14-0001-net-dsa-b53-Do-not-force-CPU-to-be-always-tagged.patch new file mode 100644 index 0000000000..2a8def39b8 --- /dev/null +++ b/target/linux/generic/backport-5.15/741-v5.14-0001-net-dsa-b53-Do-not-force-CPU-to-be-always-tagged.patch @@ -0,0 +1,86 @@ +From 2c32a3d3c233b855943677609fe388f82b1f0975 Mon Sep 17 00:00:00 2001 +From: Florian Fainelli <f.fainelli@gmail.com> +Date: Tue, 8 Jun 2021 14:22:04 -0700 +Subject: [PATCH] net: dsa: b53: Do not force CPU to be always tagged + +Commit ca8931948344 ("net: dsa: b53: Keep CPU port as tagged in all +VLANs") forced the CPU port to be always tagged in any VLAN membership. +This was necessary back then because we did not support Broadcom tags +for all configurations so the only way to differentiate tagged and +untagged traffic while DSA_TAG_PROTO_NONE was used was to force the CPU +port into being always tagged. + +With most configurations enabling Broadcom tags, especially after +8fab459e69ab ("net: dsa: b53: Enable Broadcom tags for 531x5/539x +families") we do not need to apply this unconditional force tagging of +the CPU port in all VLANs. + +A helper function is introduced to faciliate the encapsulation of the +specific condition requiring the CPU port to be tagged in all VLANs and +the dsa_switch_ops::untag_bridge_pvid boolean is moved to when +dsa_switch_ops::setup is called when we have already determined the +tagging protocol we will be using. + +Reported-by: Matthew Hagan <mnhagan88@gmail.com> +Signed-off-by: Florian Fainelli <f.fainelli@gmail.com> +Reviewed-by: Vladimir Oltean <olteanv@gmail.com> +Tested-by: Matthew Hagan <mnhagan88@gmail.com> +Signed-off-by: David S. Miller <davem@davemloft.net> +--- + drivers/net/dsa/b53/b53_common.c | 17 ++++++++++++++--- + 1 file changed, 14 insertions(+), 3 deletions(-) + +--- a/drivers/net/dsa/b53/b53_common.c ++++ b/drivers/net/dsa/b53/b53_common.c +@@ -1049,6 +1049,11 @@ static int b53_setup(struct dsa_switch * + unsigned int port; + int ret; + ++ /* Request bridge PVID untagged when DSA_TAG_PROTO_NONE is set ++ * which forces the CPU port to be tagged in all VLANs. ++ */ ++ ds->untag_bridge_pvid = dev->tag_protocol == DSA_TAG_PROTO_NONE; ++ + ret = b53_reset_switch(dev); + if (ret) { + dev_err(ds->dev, "failed to reset switch\n"); +@@ -1423,6 +1428,13 @@ int b53_vlan_prepare(struct dsa_switch * + return 0; + } + EXPORT_SYMBOL(b53_vlan_prepare); ++ ++static bool b53_vlan_port_needs_forced_tagged(struct dsa_switch *ds, int port) ++{ ++ struct b53_device *dev = ds->priv; ++ ++ return dev->tag_protocol == DSA_TAG_PROTO_NONE && dsa_is_cpu_port(ds, port); ++} + + void b53_vlan_add(struct dsa_switch *ds, int port, + const struct switchdev_obj_port_vlan *vlan) +@@ -1442,7 +1454,7 @@ void b53_vlan_add(struct dsa_switch *ds, + untagged = true; + + vl->members |= BIT(port); +- if (untagged && !dsa_is_cpu_port(ds, port)) ++ if (untagged && !b53_vlan_port_needs_forced_tagged(ds, port)) + vl->untag |= BIT(port); + else + vl->untag &= ~BIT(port); +@@ -1480,7 +1492,7 @@ int b53_vlan_del(struct dsa_switch *ds, + if (pvid == vid) + pvid = b53_default_pvid(dev); + +- if (untagged && !dsa_is_cpu_port(ds, port)) ++ if (untagged && !b53_vlan_port_needs_forced_tagged(ds, port)) + vl->untag &= ~(BIT(port)); + + b53_set_vlan_entry(dev, vid, vl); +@@ -2644,7 +2656,6 @@ struct b53_device *b53_switch_alloc(stru + dev->ops = ops; + ds->ops = &b53_switch_ops; + ds->configure_vlan_while_not_filtering = true; +- ds->untag_bridge_pvid = true; + dev->vlan_enabled = ds->configure_vlan_while_not_filtering; + /* Let DSA handle the case were multiple bridges span the same switch + * device and different VLAN awareness settings are requested, which diff --git a/target/linux/generic/backport-5.15/741-v5.14-0002-net-dsa-b53-remove-redundant-null-check-on-dev.patch b/target/linux/generic/backport-5.15/741-v5.14-0002-net-dsa-b53-remove-redundant-null-check-on-dev.patch new file mode 100644 index 0000000000..ee3a71ffa5 --- /dev/null +++ b/target/linux/generic/backport-5.15/741-v5.14-0002-net-dsa-b53-remove-redundant-null-check-on-dev.patch @@ -0,0 +1,30 @@ +From 11b57faf951cd3a570e3d9e463fc7c41023bc8c6 Mon Sep 17 00:00:00 2001 +From: Colin Ian King <colin.king@canonical.com> +Date: Tue, 15 Jun 2021 10:05:16 +0100 +Subject: [PATCH] net: dsa: b53: remove redundant null check on dev + +The pointer dev can never be null, the null check is redundant +and can be removed. Cleans up a static analysis warning that +pointer priv is dereferencing dev before dev is being null +checked. + +Addresses-Coverity: ("Dereference before null check") +Signed-off-by: Colin Ian King <colin.king@canonical.com> +Acked-by: Florian Fainelli <f.fainelli@gmail.com> +Signed-off-by: David S. Miller <davem@davemloft.net> +--- + drivers/net/dsa/b53/b53_srab.c | 3 +-- + 1 file changed, 1 insertion(+), 2 deletions(-) + +--- a/drivers/net/dsa/b53/b53_srab.c ++++ b/drivers/net/dsa/b53/b53_srab.c +@@ -632,8 +632,7 @@ static int b53_srab_remove(struct platfo + struct b53_srab_priv *priv = dev->priv; + + b53_srab_intr_set(priv, false); +- if (dev) +- b53_switch_remove(dev); ++ b53_switch_remove(dev); + + return 0; + } diff --git a/target/linux/generic/backport-5.15/741-v5.14-0003-net-dsa-b53-Create-default-VLAN-entry-explicitly.patch b/target/linux/generic/backport-5.15/741-v5.14-0003-net-dsa-b53-Create-default-VLAN-entry-explicitly.patch new file mode 100644 index 0000000000..df891d68ab --- /dev/null +++ b/target/linux/generic/backport-5.15/741-v5.14-0003-net-dsa-b53-Create-default-VLAN-entry-explicitly.patch @@ -0,0 +1,71 @@ +From 64a81b24487f0d2fba0f033029eec2abc7d82cee Mon Sep 17 00:00:00 2001 +From: Florian Fainelli <f.fainelli@gmail.com> +Date: Mon, 21 Jun 2021 15:10:55 -0700 +Subject: [PATCH] net: dsa: b53: Create default VLAN entry explicitly + +In case CONFIG_VLAN_8021Q is not set, there will be no call down to the +b53 driver to ensure that the default PVID VLAN entry will be configured +with the appropriate untagged attribute towards the CPU port. We were +implicitly relying on dsa_slave_vlan_rx_add_vid() to do that for us, +instead make it explicit. + +Reported-by: Vladimir Oltean <olteanv@gmail.com> +Signed-off-by: Florian Fainelli <f.fainelli@gmail.com> +Reviewed-by: Vladimir Oltean <olteanv@gmail.com> +Signed-off-by: David S. Miller <davem@davemloft.net> +--- + drivers/net/dsa/b53/b53_common.c | 27 +++++++++++++++++++-------- + 1 file changed, 19 insertions(+), 8 deletions(-) + +--- a/drivers/net/dsa/b53/b53_common.c ++++ b/drivers/net/dsa/b53/b53_common.c +@@ -693,6 +693,13 @@ static u16 b53_default_pvid(struct b53_d + return 0; + } + ++static bool b53_vlan_port_needs_forced_tagged(struct dsa_switch *ds, int port) ++{ ++ struct b53_device *dev = ds->priv; ++ ++ return dev->tag_protocol == DSA_TAG_PROTO_NONE && dsa_is_cpu_port(ds, port); ++} ++ + int b53_configure_vlan(struct dsa_switch *ds) + { + struct b53_device *dev = ds->priv; +@@ -713,9 +720,20 @@ int b53_configure_vlan(struct dsa_switch + + b53_enable_vlan(dev, -1, dev->vlan_enabled, ds->vlan_filtering); + +- b53_for_each_port(dev, i) ++ /* Create an untagged VLAN entry for the default PVID in case ++ * CONFIG_VLAN_8021Q is disabled and there are no calls to ++ * dsa_slave_vlan_rx_add_vid() to create the default VLAN ++ * entry. Do this only when the tagging protocol is not ++ * DSA_TAG_PROTO_NONE ++ */ ++ b53_for_each_port(dev, i) { ++ v = &dev->vlans[def_vid]; ++ v->members |= BIT(i); ++ if (!b53_vlan_port_needs_forced_tagged(ds, i)) ++ v->untag = v->members; + b53_write16(dev, B53_VLAN_PAGE, + B53_VLAN_PORT_DEF_TAG(i), def_vid); ++ } + + /* Upon initial call we have not set-up any VLANs, but upon + * system resume, we need to restore all VLAN entries. +@@ -1429,13 +1447,6 @@ int b53_vlan_prepare(struct dsa_switch * + } + EXPORT_SYMBOL(b53_vlan_prepare); + +-static bool b53_vlan_port_needs_forced_tagged(struct dsa_switch *ds, int port) +-{ +- struct b53_device *dev = ds->priv; +- +- return dev->tag_protocol == DSA_TAG_PROTO_NONE && dsa_is_cpu_port(ds, port); +-} +- + void b53_vlan_add(struct dsa_switch *ds, int port, + const struct switchdev_obj_port_vlan *vlan) + { diff --git a/target/linux/generic/backport-5.15/742-v5.16-net-phy-at803x-add-support-for-qca-8327-internal-phy.patch b/target/linux/generic/backport-5.15/742-v5.16-net-phy-at803x-add-support-for-qca-8327-internal-phy.patch new file mode 100644 index 0000000000..16aa0711ad --- /dev/null +++ b/target/linux/generic/backport-5.15/742-v5.16-net-phy-at803x-add-support-for-qca-8327-internal-phy.patch @@ -0,0 +1,48 @@ +From 0ccf8511182436183c031e8a2f740ae91a02c625 Mon Sep 17 00:00:00 2001 +From: Ansuel Smith <ansuelsmth@gmail.com> +Date: Tue, 14 Sep 2021 14:33:45 +0200 +Subject: net: phy: at803x: add support for qca 8327 internal phy + +Add support for qca8327 internal phy needed for correct init of the +switch port. It does use the same qca8337 function and reg just with a +different id. + +Signed-off-by: Ansuel Smith <ansuelsmth@gmail.com> +Tested-by: Rosen Penev <rosenp@gmail.com> +Tested-by: Andrew Lunn <andrew@lunn.ch> +Signed-off-by: David S. Miller <davem@davemloft.net> +--- + drivers/net/phy/at803x.c | 15 +++++++++++++++ + 1 file changed, 15 insertions(+) + +--- a/drivers/net/phy/at803x.c ++++ b/drivers/net/phy/at803x.c +@@ -1328,6 +1328,19 @@ static struct phy_driver at803x_driver[] + .get_sset_count = at803x_get_sset_count, + .get_strings = at803x_get_strings, + .get_stats = at803x_get_stats, ++}, { ++ /* QCA8327 */ ++ .phy_id = QCA8327_PHY_ID, ++ .phy_id_mask = QCA8K_PHY_ID_MASK, ++ .name = "QCA PHY 8327", ++ /* PHY_GBIT_FEATURES */ ++ .probe = at803x_probe, ++ .flags = PHY_IS_INTERNAL, ++ .config_init = qca83xx_config_init, ++ .soft_reset = genphy_soft_reset, ++ .get_sset_count = at803x_get_sset_count, ++ .get_strings = at803x_get_strings, ++ .get_stats = at803x_get_stats, + }, }; + + module_phy_driver(at803x_driver); +@@ -1338,6 +1351,8 @@ static struct mdio_device_id __maybe_unu + { PHY_ID_MATCH_EXACT(ATH8032_PHY_ID) }, + { PHY_ID_MATCH_EXACT(ATH8035_PHY_ID) }, + { PHY_ID_MATCH_EXACT(ATH9331_PHY_ID) }, ++ { PHY_ID_MATCH_EXACT(QCA8337_PHY_ID) }, ++ { PHY_ID_MATCH_EXACT(QCA8327_PHY_ID) }, + { } + }; + diff --git a/target/linux/generic/backport-5.15/743-v5.16-0001-net-dsa-b53-Include-all-ports-in-enabled_ports.patch b/target/linux/generic/backport-5.15/743-v5.16-0001-net-dsa-b53-Include-all-ports-in-enabled_ports.patch new file mode 100644 index 0000000000..d80b5db714 --- /dev/null +++ b/target/linux/generic/backport-5.15/743-v5.16-0001-net-dsa-b53-Include-all-ports-in-enabled_ports.patch @@ -0,0 +1,131 @@ +From 983d96a9116a328668601555d96736261d33170c Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?Rafa=C5=82=20Mi=C5=82ecki?= <rafal@milecki.pl> +Date: Thu, 16 Sep 2021 14:03:51 +0200 +Subject: [PATCH] net: dsa: b53: Include all ports in "enabled_ports" +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +Make "enabled_ports" bitfield contain all available switch ports +including a CPU port. This way there is no need for fixup during +initialization. + +For BCM53010, BCM53018 and BCM53019 include also other available ports. + +Signed-off-by: Rafał Miłecki <rafal@milecki.pl> +Reviewed-by: Florian Fainelli <f.fainelli@gmail.com> +Tested-by: Florian Fainelli <f.fainelli@gmail.com> +Signed-off-by: Jakub Kicinski <kuba@kernel.org> +--- + drivers/net/dsa/b53/b53_common.c | 23 +++++++++++------------ + 1 file changed, 11 insertions(+), 12 deletions(-) + +--- a/drivers/net/dsa/b53/b53_common.c ++++ b/drivers/net/dsa/b53/b53_common.c +@@ -2288,7 +2288,7 @@ static const struct b53_chip_data b53_sw + .chip_id = BCM5325_DEVICE_ID, + .dev_name = "BCM5325", + .vlans = 16, +- .enabled_ports = 0x1f, ++ .enabled_ports = 0x3f, + .arl_bins = 2, + .arl_buckets = 1024, + .imp_port = 5, +@@ -2299,7 +2299,7 @@ static const struct b53_chip_data b53_sw + .chip_id = BCM5365_DEVICE_ID, + .dev_name = "BCM5365", + .vlans = 256, +- .enabled_ports = 0x1f, ++ .enabled_ports = 0x3f, + .arl_bins = 2, + .arl_buckets = 1024, + .imp_port = 5, +@@ -2310,7 +2310,7 @@ static const struct b53_chip_data b53_sw + .chip_id = BCM5389_DEVICE_ID, + .dev_name = "BCM5389", + .vlans = 4096, +- .enabled_ports = 0x1f, ++ .enabled_ports = 0x11f, + .arl_bins = 4, + .arl_buckets = 1024, + .imp_port = 8, +@@ -2324,7 +2324,7 @@ static const struct b53_chip_data b53_sw + .chip_id = BCM5395_DEVICE_ID, + .dev_name = "BCM5395", + .vlans = 4096, +- .enabled_ports = 0x1f, ++ .enabled_ports = 0x11f, + .arl_bins = 4, + .arl_buckets = 1024, + .imp_port = 8, +@@ -2338,7 +2338,7 @@ static const struct b53_chip_data b53_sw + .chip_id = BCM5397_DEVICE_ID, + .dev_name = "BCM5397", + .vlans = 4096, +- .enabled_ports = 0x1f, ++ .enabled_ports = 0x11f, + .arl_bins = 4, + .arl_buckets = 1024, + .imp_port = 8, +@@ -2352,7 +2352,7 @@ static const struct b53_chip_data b53_sw + .chip_id = BCM5398_DEVICE_ID, + .dev_name = "BCM5398", + .vlans = 4096, +- .enabled_ports = 0x7f, ++ .enabled_ports = 0x17f, + .arl_bins = 4, + .arl_buckets = 1024, + .imp_port = 8, +@@ -2366,7 +2366,7 @@ static const struct b53_chip_data b53_sw + .chip_id = BCM53115_DEVICE_ID, + .dev_name = "BCM53115", + .vlans = 4096, +- .enabled_ports = 0x1f, ++ .enabled_ports = 0x11f, + .arl_bins = 4, + .arl_buckets = 1024, + .vta_regs = B53_VTA_REGS, +@@ -2380,7 +2380,7 @@ static const struct b53_chip_data b53_sw + .chip_id = BCM53125_DEVICE_ID, + .dev_name = "BCM53125", + .vlans = 4096, +- .enabled_ports = 0xff, ++ .enabled_ports = 0x1ff, + .arl_bins = 4, + .arl_buckets = 1024, + .imp_port = 8, +@@ -2422,7 +2422,7 @@ static const struct b53_chip_data b53_sw + .chip_id = BCM53010_DEVICE_ID, + .dev_name = "BCM53010", + .vlans = 4096, +- .enabled_ports = 0x1f, ++ .enabled_ports = 0x1bf, + .arl_bins = 4, + .arl_buckets = 1024, + .imp_port = 8, +@@ -2464,7 +2464,7 @@ static const struct b53_chip_data b53_sw + .chip_id = BCM53018_DEVICE_ID, + .dev_name = "BCM53018", + .vlans = 4096, +- .enabled_ports = 0x1f, ++ .enabled_ports = 0x1bf, + .arl_bins = 4, + .arl_buckets = 1024, + .imp_port = 8, +@@ -2478,7 +2478,7 @@ static const struct b53_chip_data b53_sw + .chip_id = BCM53019_DEVICE_ID, + .dev_name = "BCM53019", + .vlans = 4096, +- .enabled_ports = 0x1f, ++ .enabled_ports = 0x1bf, + .arl_bins = 4, + .arl_buckets = 1024, + .imp_port = 8, +@@ -2605,7 +2605,6 @@ static int b53_switch_init(struct b53_de + dev->cpu_port = 5; + } + +- dev->enabled_ports |= BIT(dev->cpu_port); + dev->num_ports = fls(dev->enabled_ports); + + dev->ds->num_ports = min_t(unsigned int, dev->num_ports, DSA_MAX_PORTS); diff --git a/target/linux/generic/backport-5.15/743-v5.16-0002-net-dsa-b53-Drop-BCM5301x-workaround-for-a-wrong-CPU.patch b/target/linux/generic/backport-5.15/743-v5.16-0002-net-dsa-b53-Drop-BCM5301x-workaround-for-a-wrong-CPU.patch new file mode 100644 index 0000000000..4a4f8e940d --- /dev/null +++ b/target/linux/generic/backport-5.15/743-v5.16-0002-net-dsa-b53-Drop-BCM5301x-workaround-for-a-wrong-CPU.patch @@ -0,0 +1,42 @@ +From b290c6384afabbca5ae6e2af72fb1b2bc37922be Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?Rafa=C5=82=20Mi=C5=82ecki?= <rafal@milecki.pl> +Date: Thu, 16 Sep 2021 14:03:52 +0200 +Subject: [PATCH] net: dsa: b53: Drop BCM5301x workaround for a wrong CPU/IMP + port +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +On BCM5301x port 8 requires a fixed link when used. + +Years ago when b53 was an OpenWrt downstream driver (with configuration +based on sometimes bugged NVRAM) there was a need for a fixup. In case +of forcing fixed link for (incorrectly specified) port 5 the code had to +actually setup port 8 link. + +For upstream b53 driver with setup based on DT there is no need for that +workaround. In DT we have and require correct ports setup. + +Signed-off-by: Rafał Miłecki <rafal@milecki.pl> +Reviewed-by: Florian Fainelli <f.fainelli@gmail.com> +Tested-by: Florian Fainelli <f.fainelli@gmail.com> +Signed-off-by: Jakub Kicinski <kuba@kernel.org> +--- + drivers/net/dsa/b53/b53_common.c | 6 ------ + 1 file changed, 6 deletions(-) + +--- a/drivers/net/dsa/b53/b53_common.c ++++ b/drivers/net/dsa/b53/b53_common.c +@@ -1256,12 +1256,6 @@ static void b53_adjust_link(struct dsa_s + return; + } + } +- } else if (is5301x(dev)) { +- if (port != dev->cpu_port) { +- b53_force_port_config(dev, dev->cpu_port, 2000, +- DUPLEX_FULL, true, true); +- b53_force_link(dev, dev->cpu_port, 1); +- } + } + + /* Re-negotiate EEE if it was enabled already */ diff --git a/target/linux/generic/backport-5.15/743-v5.16-0003-net-dsa-b53-Improve-flow-control-setup-on-BCM5301x.patch b/target/linux/generic/backport-5.15/743-v5.16-0003-net-dsa-b53-Improve-flow-control-setup-on-BCM5301x.patch new file mode 100644 index 0000000000..3954ee4aac --- /dev/null +++ b/target/linux/generic/backport-5.15/743-v5.16-0003-net-dsa-b53-Improve-flow-control-setup-on-BCM5301x.patch @@ -0,0 +1,32 @@ +From 3ff26b29230c54fea2353b63124c589b61953e14 Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?Rafa=C5=82=20Mi=C5=82ecki?= <rafal@milecki.pl> +Date: Thu, 16 Sep 2021 14:03:53 +0200 +Subject: [PATCH] net: dsa: b53: Improve flow control setup on BCM5301x +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +According to the Broadcom's reference driver flow control needs to be +enabled for any CPU switch port (5, 7 or 8 - depending on which one is +used). Current code makes it work only for the port 5. Use +dsa_is_cpu_port() which solved that problem. + +Signed-off-by: Rafał Miłecki <rafal@milecki.pl> +Reviewed-by: Florian Fainelli <f.fainelli@gmail.com> +Tested-by: Florian Fainelli <f.fainelli@gmail.com> +Signed-off-by: Jakub Kicinski <kuba@kernel.org> +--- + drivers/net/dsa/b53/b53_common.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +--- a/drivers/net/dsa/b53/b53_common.c ++++ b/drivers/net/dsa/b53/b53_common.c +@@ -1187,7 +1187,7 @@ static void b53_adjust_link(struct dsa_s + return; + + /* Enable flow control on BCM5301x's CPU port */ +- if (is5301x(dev) && port == dev->cpu_port) ++ if (is5301x(dev) && dsa_is_cpu_port(ds, port)) + tx_pause = rx_pause = true; + + if (phydev->pause) { diff --git a/target/linux/generic/backport-5.15/743-v5.16-0004-net-dsa-b53-Drop-unused-cpu_port-field.patch b/target/linux/generic/backport-5.15/743-v5.16-0004-net-dsa-b53-Drop-unused-cpu_port-field.patch new file mode 100644 index 0000000000..9e687b1488 --- /dev/null +++ b/target/linux/generic/backport-5.15/743-v5.16-0004-net-dsa-b53-Drop-unused-cpu_port-field.patch @@ -0,0 +1,205 @@ +From 7d5af56418d7d01e43247a33b6fe6492ea871923 Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?Rafa=C5=82=20Mi=C5=82ecki?= <rafal@milecki.pl> +Date: Thu, 16 Sep 2021 14:03:54 +0200 +Subject: [PATCH] net: dsa: b53: Drop unused "cpu_port" field +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +It's set but never used anymore. + +Signed-off-by: Rafał Miłecki <rafal@milecki.pl> +Reviewed-by: Florian Fainelli <f.fainelli@gmail.com> +Tested-by: Florian Fainelli <f.fainelli@gmail.com> +Signed-off-by: Jakub Kicinski <kuba@kernel.org> +--- + drivers/net/dsa/b53/b53_common.c | 28 ---------------------------- + drivers/net/dsa/b53/b53_priv.h | 1 - + 2 files changed, 29 deletions(-) + +--- a/drivers/net/dsa/b53/b53_common.c ++++ b/drivers/net/dsa/b53/b53_common.c +@@ -2286,7 +2286,6 @@ static const struct b53_chip_data b53_sw + .arl_bins = 2, + .arl_buckets = 1024, + .imp_port = 5, +- .cpu_port = B53_CPU_PORT_25, + .duplex_reg = B53_DUPLEX_STAT_FE, + }, + { +@@ -2297,7 +2296,6 @@ static const struct b53_chip_data b53_sw + .arl_bins = 2, + .arl_buckets = 1024, + .imp_port = 5, +- .cpu_port = B53_CPU_PORT_25, + .duplex_reg = B53_DUPLEX_STAT_FE, + }, + { +@@ -2308,7 +2306,6 @@ static const struct b53_chip_data b53_sw + .arl_bins = 4, + .arl_buckets = 1024, + .imp_port = 8, +- .cpu_port = B53_CPU_PORT, + .vta_regs = B53_VTA_REGS, + .duplex_reg = B53_DUPLEX_STAT_GE, + .jumbo_pm_reg = B53_JUMBO_PORT_MASK, +@@ -2322,7 +2319,6 @@ static const struct b53_chip_data b53_sw + .arl_bins = 4, + .arl_buckets = 1024, + .imp_port = 8, +- .cpu_port = B53_CPU_PORT, + .vta_regs = B53_VTA_REGS, + .duplex_reg = B53_DUPLEX_STAT_GE, + .jumbo_pm_reg = B53_JUMBO_PORT_MASK, +@@ -2336,7 +2332,6 @@ static const struct b53_chip_data b53_sw + .arl_bins = 4, + .arl_buckets = 1024, + .imp_port = 8, +- .cpu_port = B53_CPU_PORT, + .vta_regs = B53_VTA_REGS_9798, + .duplex_reg = B53_DUPLEX_STAT_GE, + .jumbo_pm_reg = B53_JUMBO_PORT_MASK, +@@ -2350,7 +2345,6 @@ static const struct b53_chip_data b53_sw + .arl_bins = 4, + .arl_buckets = 1024, + .imp_port = 8, +- .cpu_port = B53_CPU_PORT, + .vta_regs = B53_VTA_REGS_9798, + .duplex_reg = B53_DUPLEX_STAT_GE, + .jumbo_pm_reg = B53_JUMBO_PORT_MASK, +@@ -2365,7 +2359,6 @@ static const struct b53_chip_data b53_sw + .arl_buckets = 1024, + .vta_regs = B53_VTA_REGS, + .imp_port = 8, +- .cpu_port = B53_CPU_PORT, + .duplex_reg = B53_DUPLEX_STAT_GE, + .jumbo_pm_reg = B53_JUMBO_PORT_MASK, + .jumbo_size_reg = B53_JUMBO_MAX_SIZE, +@@ -2378,7 +2371,6 @@ static const struct b53_chip_data b53_sw + .arl_bins = 4, + .arl_buckets = 1024, + .imp_port = 8, +- .cpu_port = B53_CPU_PORT, + .vta_regs = B53_VTA_REGS, + .duplex_reg = B53_DUPLEX_STAT_GE, + .jumbo_pm_reg = B53_JUMBO_PORT_MASK, +@@ -2392,7 +2384,6 @@ static const struct b53_chip_data b53_sw + .arl_bins = 4, + .arl_buckets = 1024, + .imp_port = 8, +- .cpu_port = B53_CPU_PORT, + .vta_regs = B53_VTA_REGS, + .duplex_reg = B53_DUPLEX_STAT_GE, + .jumbo_pm_reg = B53_JUMBO_PORT_MASK, +@@ -2406,7 +2397,6 @@ static const struct b53_chip_data b53_sw + .arl_bins = 4, + .arl_buckets = 1024, + .imp_port = 8, +- .cpu_port = B53_CPU_PORT, + .vta_regs = B53_VTA_REGS_63XX, + .duplex_reg = B53_DUPLEX_STAT_63XX, + .jumbo_pm_reg = B53_JUMBO_PORT_MASK_63XX, +@@ -2420,7 +2410,6 @@ static const struct b53_chip_data b53_sw + .arl_bins = 4, + .arl_buckets = 1024, + .imp_port = 8, +- .cpu_port = B53_CPU_PORT_25, /* TODO: auto detect */ + .vta_regs = B53_VTA_REGS, + .duplex_reg = B53_DUPLEX_STAT_GE, + .jumbo_pm_reg = B53_JUMBO_PORT_MASK, +@@ -2434,7 +2423,6 @@ static const struct b53_chip_data b53_sw + .arl_bins = 4, + .arl_buckets = 1024, + .imp_port = 8, +- .cpu_port = B53_CPU_PORT_25, /* TODO: auto detect */ + .vta_regs = B53_VTA_REGS, + .duplex_reg = B53_DUPLEX_STAT_GE, + .jumbo_pm_reg = B53_JUMBO_PORT_MASK, +@@ -2448,7 +2436,6 @@ static const struct b53_chip_data b53_sw + .arl_bins = 4, + .arl_buckets = 1024, + .imp_port = 8, +- .cpu_port = B53_CPU_PORT_25, /* TODO: auto detect */ + .vta_regs = B53_VTA_REGS, + .duplex_reg = B53_DUPLEX_STAT_GE, + .jumbo_pm_reg = B53_JUMBO_PORT_MASK, +@@ -2462,7 +2449,6 @@ static const struct b53_chip_data b53_sw + .arl_bins = 4, + .arl_buckets = 1024, + .imp_port = 8, +- .cpu_port = B53_CPU_PORT_25, /* TODO: auto detect */ + .vta_regs = B53_VTA_REGS, + .duplex_reg = B53_DUPLEX_STAT_GE, + .jumbo_pm_reg = B53_JUMBO_PORT_MASK, +@@ -2476,7 +2462,6 @@ static const struct b53_chip_data b53_sw + .arl_bins = 4, + .arl_buckets = 1024, + .imp_port = 8, +- .cpu_port = B53_CPU_PORT_25, /* TODO: auto detect */ + .vta_regs = B53_VTA_REGS, + .duplex_reg = B53_DUPLEX_STAT_GE, + .jumbo_pm_reg = B53_JUMBO_PORT_MASK, +@@ -2490,7 +2475,6 @@ static const struct b53_chip_data b53_sw + .arl_bins = 4, + .arl_buckets = 1024, + .imp_port = 8, +- .cpu_port = B53_CPU_PORT, + .vta_regs = B53_VTA_REGS, + .duplex_reg = B53_DUPLEX_STAT_GE, + .jumbo_pm_reg = B53_JUMBO_PORT_MASK, +@@ -2504,7 +2488,6 @@ static const struct b53_chip_data b53_sw + .arl_bins = 4, + .arl_buckets = 1024, + .imp_port = 8, +- .cpu_port = B53_CPU_PORT, + .vta_regs = B53_VTA_REGS, + .duplex_reg = B53_DUPLEX_STAT_GE, + .jumbo_pm_reg = B53_JUMBO_PORT_MASK, +@@ -2518,7 +2501,6 @@ static const struct b53_chip_data b53_sw + .arl_bins = 4, + .arl_buckets = 1024, + .imp_port = 8, +- .cpu_port = B53_CPU_PORT, + .vta_regs = B53_VTA_REGS, + .duplex_reg = B53_DUPLEX_STAT_GE, + .jumbo_pm_reg = B53_JUMBO_PORT_MASK, +@@ -2532,7 +2514,6 @@ static const struct b53_chip_data b53_sw + .arl_bins = 4, + .arl_buckets = 256, + .imp_port = 8, +- .cpu_port = B53_CPU_PORT, + .vta_regs = B53_VTA_REGS, + .duplex_reg = B53_DUPLEX_STAT_GE, + .jumbo_pm_reg = B53_JUMBO_PORT_MASK, +@@ -2558,7 +2539,6 @@ static int b53_switch_init(struct b53_de + dev->vta_regs[2] = chip->vta_regs[2]; + dev->jumbo_pm_reg = chip->jumbo_pm_reg; + dev->imp_port = chip->imp_port; +- dev->cpu_port = chip->cpu_port; + dev->num_vlans = chip->vlans; + dev->num_arl_bins = chip->arl_bins; + dev->num_arl_buckets = chip->arl_buckets; +@@ -2590,13 +2570,6 @@ static int b53_switch_init(struct b53_de + break; + #endif + } +- } else if (dev->chip_id == BCM53115_DEVICE_ID) { +- u64 strap_value; +- +- b53_read48(dev, B53_STAT_PAGE, B53_STRAP_VALUE, &strap_value); +- /* use second IMP port if GMII is enabled */ +- if (strap_value & SV_GMII_CTRL_115) +- dev->cpu_port = 5; + } + + dev->num_ports = fls(dev->enabled_ports); +--- a/drivers/net/dsa/b53/b53_priv.h ++++ b/drivers/net/dsa/b53/b53_priv.h +@@ -123,7 +123,6 @@ struct b53_device { + /* used ports mask */ + u16 enabled_ports; + unsigned int imp_port; +- unsigned int cpu_port; + + /* connect specific data */ + u8 current_page; diff --git a/target/linux/generic/backport-5.15/744-v5.15-net-dsa-don-t-set-skb-offload_fwd_mark-when-not-offl.patch b/target/linux/generic/backport-5.15/744-v5.15-net-dsa-don-t-set-skb-offload_fwd_mark-when-not-offl.patch new file mode 100644 index 0000000000..ab4fdf8509 --- /dev/null +++ b/target/linux/generic/backport-5.15/744-v5.15-net-dsa-don-t-set-skb-offload_fwd_mark-when-not-offl.patch @@ -0,0 +1,138 @@ +From bea7907837c57a0aaac009931eb14efb056dafab Mon Sep 17 00:00:00 2001 +From: Vladimir Oltean <vladimir.oltean@nxp.com> +Date: Thu, 29 Jul 2021 17:56:00 +0300 +Subject: [PATCH] net: dsa: don't set skb->offload_fwd_mark when not offloading + the bridge + +DSA has gained the recent ability to deal gracefully with upper +interfaces it cannot offload, such as the bridge, bonding or team +drivers. When such uppers exist, the ports are still in standalone mode +as far as the hardware is concerned. + +But when we deliver packets to the software bridge in order for that to +do the forwarding, there is an unpleasant surprise in that the bridge +will refuse to forward them. This is because we unconditionally set +skb->offload_fwd_mark = true, meaning that the bridge thinks the frames +were already forwarded in hardware by us. + +Since dp->bridge_dev is populated only when there is hardware offload +for it, but not in the software fallback case, let's introduce a new +helper that can be called from the tagger data path which sets the +skb->offload_fwd_mark accordingly to zero when there is no hardware +offload for bridging. This lets the bridge forward packets back to other +interfaces of our switch, if needed. + +Signed-off-by: Vladimir Oltean <vladimir.oltean@nxp.com> +Reviewed-by: Tobias Waldekranz <tobias@waldekranz.com> +Signed-off-by: David S. Miller <davem@davemloft.net> +--- + net/dsa/dsa_priv.h | 14 ++++++++++++++ + net/dsa/tag_brcm.c | 4 ++-- + net/dsa/tag_dsa.c | 15 +++++++++++---- + net/dsa/tag_ksz.c | 2 +- + net/dsa/tag_lan9303.c | 3 ++- + net/dsa/tag_mtk.c | 2 +- + net/dsa/tag_ocelot.c | 2 +- + net/dsa/tag_rtl4_a.c | 2 +- + net/dsa/tag_sja1105.c | 20 ++++++++++++++------ + 9 files changed, 47 insertions(+), 17 deletions(-) + +--- a/net/dsa/dsa_priv.h ++++ b/net/dsa/dsa_priv.h +@@ -266,6 +266,20 @@ static inline struct sk_buff *dsa_untag_ + return skb; + } + ++/* If the ingress port offloads the bridge, we mark the frame as autonomously ++ * forwarded by hardware, so the software bridge doesn't forward in twice, back ++ * to us, because we already did. However, if we're in fallback mode and we do ++ * software bridging, we are not offloading it, therefore the dp->bridge_dev ++ * pointer is not populated, and flooding needs to be done by software (we are ++ * effectively operating in standalone ports mode). ++ */ ++static inline void dsa_default_offload_fwd_mark(struct sk_buff *skb) ++{ ++ struct dsa_port *dp = dsa_slave_to_port(skb->dev); ++ ++ skb->offload_fwd_mark = !!(dp->bridge_dev); ++} ++ + /* switch.c */ + int dsa_switch_register_notifier(struct dsa_switch *ds); + void dsa_switch_unregister_notifier(struct dsa_switch *ds); +--- a/net/dsa/tag_brcm.c ++++ b/net/dsa/tag_brcm.c +@@ -166,7 +166,7 @@ static struct sk_buff *brcm_tag_rcv_ll(s + /* Remove Broadcom tag and update checksum */ + skb_pull_rcsum(skb, BRCM_TAG_LEN); + +- skb->offload_fwd_mark = 1; ++ dsa_default_offload_fwd_mark(skb); + + return skb; + } +@@ -270,7 +270,7 @@ static struct sk_buff *brcm_leg_tag_rcv( + /* Remove Broadcom tag and update checksum */ + skb_pull_rcsum(skb, BRCM_LEG_TAG_LEN); + +- skb->offload_fwd_mark = 1; ++ dsa_default_offload_fwd_mark(skb); + + /* Move the Ethernet DA and SA */ + memmove(skb->data - ETH_HLEN, +--- a/net/dsa/tag_ksz.c ++++ b/net/dsa/tag_ksz.c +@@ -24,7 +24,7 @@ static struct sk_buff *ksz_common_rcv(st + + pskb_trim_rcsum(skb, skb->len - len); + +- skb->offload_fwd_mark = true; ++ dsa_default_offload_fwd_mark(skb); + + return skb; + } +--- a/net/dsa/tag_lan9303.c ++++ b/net/dsa/tag_lan9303.c +@@ -115,7 +115,8 @@ static struct sk_buff *lan9303_rcv(struc + skb_pull_rcsum(skb, 2 + 2); + memmove(skb->data - ETH_HLEN, skb->data - (ETH_HLEN + LAN9303_TAG_LEN), + 2 * ETH_ALEN); +- skb->offload_fwd_mark = !(lan9303_tag1 & LAN9303_TAG_RX_TRAPPED_TO_CPU); ++ if (!(lan9303_tag1 & LAN9303_TAG_RX_TRAPPED_TO_CPU)) ++ dsa_default_offload_fwd_mark(skb); + + return skb; + } +--- a/net/dsa/tag_mtk.c ++++ b/net/dsa/tag_mtk.c +@@ -104,7 +104,7 @@ static struct sk_buff *mtk_tag_rcv(struc + + /* Only unicast or broadcast frames are offloaded */ + if (likely(!is_multicast_skb)) +- skb->offload_fwd_mark = 1; ++ dsa_default_offload_fwd_mark(skb); + + return skb; + } +--- a/net/dsa/tag_ocelot.c ++++ b/net/dsa/tag_ocelot.c +@@ -225,7 +225,7 @@ static struct sk_buff *ocelot_rcv(struct + */ + return NULL; + +- skb->offload_fwd_mark = 1; ++ dsa_default_offload_fwd_mark(skb); + skb->priority = qos_class; + + /* Ocelot switches copy frames unmodified to the CPU. However, it is +--- a/net/dsa/tag_rtl4_a.c ++++ b/net/dsa/tag_rtl4_a.c +@@ -115,7 +115,7 @@ static struct sk_buff *rtl4a_tag_rcv(str + skb->data - ETH_HLEN - RTL4_A_HDR_LEN, + 2 * ETH_ALEN); + +- skb->offload_fwd_mark = 1; ++ dsa_default_offload_fwd_mark(skb); + + return skb; + } diff --git a/target/linux/generic/backport-5.15/745-v5.16-01-net-phy-at803x-add-support-for-qca-8327-A-variant.patch b/target/linux/generic/backport-5.15/745-v5.16-01-net-phy-at803x-add-support-for-qca-8327-A-variant.patch new file mode 100644 index 0000000000..c82bf913a0 --- /dev/null +++ b/target/linux/generic/backport-5.15/745-v5.16-01-net-phy-at803x-add-support-for-qca-8327-A-variant.patch @@ -0,0 +1,65 @@ +From b4df02b562f4aa14ff6811f30e1b4d2159585c59 Mon Sep 17 00:00:00 2001 +From: Ansuel Smith <ansuelsmth@gmail.com> +Date: Sun, 19 Sep 2021 18:28:15 +0200 +Subject: net: phy: at803x: add support for qca 8327 A variant internal phy + +For qca8327 internal phy there are 2 different switch variant with 2 +different phy id. Add this missing variant so the internal phy can be +correctly identified and fixed. + +Signed-off-by: Ansuel Smith <ansuelsmth@gmail.com> +Reviewed-by: Andrew Lunn <andrew@lunn.ch> +Signed-off-by: David S. Miller <davem@davemloft.net> +--- + drivers/net/phy/at803x.c | 25 ++++++++++++++++++++----- + 1 file changed, 20 insertions(+), 5 deletions(-) + +--- a/drivers/net/phy/at803x.c ++++ b/drivers/net/phy/at803x.c +@@ -148,7 +148,8 @@ + #define AT803X_PAGE_FIBER 0 + #define AT803X_PAGE_COPPER 1 + +-#define QCA8327_PHY_ID 0x004dd034 ++#define QCA8327_A_PHY_ID 0x004dd033 ++#define QCA8327_B_PHY_ID 0x004dd034 + #define QCA8337_PHY_ID 0x004dd036 + #define QCA8K_PHY_ID_MASK 0xffffffff + +@@ -1329,10 +1330,23 @@ static struct phy_driver at803x_driver[] + .get_strings = at803x_get_strings, + .get_stats = at803x_get_stats, + }, { +- /* QCA8327 */ +- .phy_id = QCA8327_PHY_ID, ++ /* QCA8327-A from switch QCA8327-AL1A */ ++ .phy_id = QCA8327_A_PHY_ID, + .phy_id_mask = QCA8K_PHY_ID_MASK, +- .name = "QCA PHY 8327", ++ .name = "QCA PHY 8327-A", ++ /* PHY_GBIT_FEATURES */ ++ .probe = at803x_probe, ++ .flags = PHY_IS_INTERNAL, ++ .config_init = qca83xx_config_init, ++ .soft_reset = genphy_soft_reset, ++ .get_sset_count = at803x_get_sset_count, ++ .get_strings = at803x_get_strings, ++ .get_stats = at803x_get_stats, ++}, { ++ /* QCA8327-B from switch QCA8327-BL1A */ ++ .phy_id = QCA8327_B_PHY_ID, ++ .phy_id_mask = QCA8K_PHY_ID_MASK, ++ .name = "QCA PHY 8327-B", + /* PHY_GBIT_FEATURES */ + .probe = at803x_probe, + .flags = PHY_IS_INTERNAL, +@@ -1352,7 +1366,8 @@ static struct mdio_device_id __maybe_unu + { PHY_ID_MATCH_EXACT(ATH8035_PHY_ID) }, + { PHY_ID_MATCH_EXACT(ATH9331_PHY_ID) }, + { PHY_ID_MATCH_EXACT(QCA8337_PHY_ID) }, +- { PHY_ID_MATCH_EXACT(QCA8327_PHY_ID) }, ++ { PHY_ID_MATCH_EXACT(QCA8327_A_PHY_ID) }, ++ { PHY_ID_MATCH_EXACT(QCA8327_B_PHY_ID) }, + { } + }; + diff --git a/target/linux/generic/backport-5.15/745-v5.16-02-net-phy-at803x-add-resume-suspend-function-to-qca83x.patch b/target/linux/generic/backport-5.15/745-v5.16-02-net-phy-at803x-add-resume-suspend-function-to-qca83x.patch new file mode 100644 index 0000000000..be24fd5cf7 --- /dev/null +++ b/target/linux/generic/backport-5.15/745-v5.16-02-net-phy-at803x-add-resume-suspend-function-to-qca83x.patch @@ -0,0 +1,45 @@ +From 15b9df4ece17d084f14eb0ca1cf05f2ad497e425 Mon Sep 17 00:00:00 2001 +From: Ansuel Smith <ansuelsmth@gmail.com> +Date: Sun, 19 Sep 2021 18:28:16 +0200 +Subject: net: phy: at803x: add resume/suspend function to qca83xx phy + +Add resume/suspend function to qca83xx internal phy. +We can't use the at803x generic function as the documentation lacks of +any support for WoL regs. + +Signed-off-by: Ansuel Smith <ansuelsmth@gmail.com> +Reviewed-by: Andrew Lunn <andrew@lunn.ch> +Signed-off-by: David S. Miller <davem@davemloft.net> +--- + drivers/net/phy/at803x.c | 6 ++++++ + 1 file changed, 6 insertions(+) + +--- a/drivers/net/phy/at803x.c ++++ b/drivers/net/phy/at803x.c +@@ -1329,6 +1329,8 @@ static struct phy_driver at803x_driver[] + .get_sset_count = at803x_get_sset_count, + .get_strings = at803x_get_strings, + .get_stats = at803x_get_stats, ++ .suspend = genphy_suspend, ++ .resume = genphy_resume, + }, { + /* QCA8327-A from switch QCA8327-AL1A */ + .phy_id = QCA8327_A_PHY_ID, +@@ -1342,6 +1344,8 @@ static struct phy_driver at803x_driver[] + .get_sset_count = at803x_get_sset_count, + .get_strings = at803x_get_strings, + .get_stats = at803x_get_stats, ++ .suspend = genphy_suspend, ++ .resume = genphy_resume, + }, { + /* QCA8327-B from switch QCA8327-BL1A */ + .phy_id = QCA8327_B_PHY_ID, +@@ -1355,6 +1359,8 @@ static struct phy_driver at803x_driver[] + .get_sset_count = at803x_get_sset_count, + .get_strings = at803x_get_strings, + .get_stats = at803x_get_stats, ++ .suspend = genphy_suspend, ++ .resume = genphy_resume, + }, }; + + module_phy_driver(at803x_driver); diff --git a/target/linux/generic/backport-5.15/745-v5.16-03-net-phy-at803x-fix-spacing-and-improve-name-for-83xx.patch b/target/linux/generic/backport-5.15/745-v5.16-03-net-phy-at803x-fix-spacing-and-improve-name-for-83xx.patch new file mode 100644 index 0000000000..23f574c76f --- /dev/null +++ b/target/linux/generic/backport-5.15/745-v5.16-03-net-phy-at803x-fix-spacing-and-improve-name-for-83xx.patch @@ -0,0 +1,95 @@ +From d44fd8604a4ab92119adb35f05fd87612af722b5 Mon Sep 17 00:00:00 2001 +From: Ansuel Smith <ansuelsmth@gmail.com> +Date: Sun, 19 Sep 2021 18:28:17 +0200 +Subject: net: phy: at803x: fix spacing and improve name for 83xx phy + +Fix spacing and improve name for 83xx phy following other phy in the +same driver. + +Signed-off-by: Ansuel Smith <ansuelsmth@gmail.com> +Reviewed-by: Andrew Lunn <andrew@lunn.ch> +Signed-off-by: David S. Miller <davem@davemloft.net> +--- + drivers/net/phy/at803x.c | 60 ++++++++++++++++++++++++------------------------ + 1 file changed, 30 insertions(+), 30 deletions(-) + +--- a/drivers/net/phy/at803x.c ++++ b/drivers/net/phy/at803x.c +@@ -1318,47 +1318,47 @@ static struct phy_driver at803x_driver[] + .config_aneg = at803x_config_aneg, + }, { + /* QCA8337 */ +- .phy_id = QCA8337_PHY_ID, +- .phy_id_mask = QCA8K_PHY_ID_MASK, +- .name = "QCA PHY 8337", ++ .phy_id = QCA8337_PHY_ID, ++ .phy_id_mask = QCA8K_PHY_ID_MASK, ++ .name = "Qualcomm Atheros 8337 internal PHY", + /* PHY_GBIT_FEATURES */ +- .probe = at803x_probe, +- .flags = PHY_IS_INTERNAL, +- .config_init = qca83xx_config_init, +- .soft_reset = genphy_soft_reset, +- .get_sset_count = at803x_get_sset_count, +- .get_strings = at803x_get_strings, +- .get_stats = at803x_get_stats, ++ .probe = at803x_probe, ++ .flags = PHY_IS_INTERNAL, ++ .config_init = qca83xx_config_init, ++ .soft_reset = genphy_soft_reset, ++ .get_sset_count = at803x_get_sset_count, ++ .get_strings = at803x_get_strings, ++ .get_stats = at803x_get_stats, + .suspend = genphy_suspend, + .resume = genphy_resume, + }, { + /* QCA8327-A from switch QCA8327-AL1A */ +- .phy_id = QCA8327_A_PHY_ID, +- .phy_id_mask = QCA8K_PHY_ID_MASK, +- .name = "QCA PHY 8327-A", ++ .phy_id = QCA8327_A_PHY_ID, ++ .phy_id_mask = QCA8K_PHY_ID_MASK, ++ .name = "Qualcomm Atheros 8327-A internal PHY", + /* PHY_GBIT_FEATURES */ +- .probe = at803x_probe, +- .flags = PHY_IS_INTERNAL, +- .config_init = qca83xx_config_init, +- .soft_reset = genphy_soft_reset, +- .get_sset_count = at803x_get_sset_count, +- .get_strings = at803x_get_strings, +- .get_stats = at803x_get_stats, ++ .probe = at803x_probe, ++ .flags = PHY_IS_INTERNAL, ++ .config_init = qca83xx_config_init, ++ .soft_reset = genphy_soft_reset, ++ .get_sset_count = at803x_get_sset_count, ++ .get_strings = at803x_get_strings, ++ .get_stats = at803x_get_stats, + .suspend = genphy_suspend, + .resume = genphy_resume, + }, { + /* QCA8327-B from switch QCA8327-BL1A */ +- .phy_id = QCA8327_B_PHY_ID, +- .phy_id_mask = QCA8K_PHY_ID_MASK, +- .name = "QCA PHY 8327-B", ++ .phy_id = QCA8327_B_PHY_ID, ++ .phy_id_mask = QCA8K_PHY_ID_MASK, ++ .name = "Qualcomm Atheros 8327-B internal PHY", + /* PHY_GBIT_FEATURES */ +- .probe = at803x_probe, +- .flags = PHY_IS_INTERNAL, +- .config_init = qca83xx_config_init, +- .soft_reset = genphy_soft_reset, +- .get_sset_count = at803x_get_sset_count, +- .get_strings = at803x_get_strings, +- .get_stats = at803x_get_stats, ++ .probe = at803x_probe, ++ .flags = PHY_IS_INTERNAL, ++ .config_init = qca83xx_config_init, ++ .soft_reset = genphy_soft_reset, ++ .get_sset_count = at803x_get_sset_count, ++ .get_strings = at803x_get_strings, ++ .get_stats = at803x_get_stats, + .suspend = genphy_suspend, + .resume = genphy_resume, + }, }; diff --git a/target/linux/generic/backport-5.15/746-v5.16-01-net-phy-at803x-fix-resume-for-QCA8327-phy.patch b/target/linux/generic/backport-5.15/746-v5.16-01-net-phy-at803x-fix-resume-for-QCA8327-phy.patch new file mode 100644 index 0000000000..5dfe27dd24 --- /dev/null +++ b/target/linux/generic/backport-5.15/746-v5.16-01-net-phy-at803x-fix-resume-for-QCA8327-phy.patch @@ -0,0 +1,131 @@ +From ba3c01ee02ed0d821c9f241f179bbc9457542b8f Mon Sep 17 00:00:00 2001 +From: Ansuel Smith <ansuelsmth@gmail.com> +Date: Sun, 10 Oct 2021 00:46:15 +0200 +Subject: net: phy: at803x: fix resume for QCA8327 phy + +From Documentation phy resume triggers phy reset and restart +auto-negotiation. Add a dedicated function to wait reset to finish as +it was notice a regression where port sometime are not reliable after a +suspend/resume session. The reset wait logic is copied from phy_poll_reset. +Add dedicated suspend function to use genphy_suspend only with QCA8337 +phy and set only additional debug settings for QCA8327. With more test +it was reported that QCA8327 doesn't proprely support this mode and +using this cause the unreliability of the switch ports, especially the +malfunction of the port0. + +Fixes: 15b9df4ece17 ("net: phy: at803x: add resume/suspend function to qca83xx phy") +Signed-off-by: Ansuel Smith <ansuelsmth@gmail.com> +Signed-off-by: David S. Miller <davem@davemloft.net> +--- + drivers/net/phy/at803x.c | 69 +++++++++++++++++++++++++++++++++++++++++++----- + 1 file changed, 63 insertions(+), 6 deletions(-) + +--- a/drivers/net/phy/at803x.c ++++ b/drivers/net/phy/at803x.c +@@ -92,9 +92,14 @@ + #define AT803X_DEBUG_REG_5 0x05 + #define AT803X_DEBUG_TX_CLK_DLY_EN BIT(8) + ++#define AT803X_DEBUG_REG_HIB_CTRL 0x0b ++#define AT803X_DEBUG_HIB_CTRL_SEL_RST_80U BIT(10) ++#define AT803X_DEBUG_HIB_CTRL_EN_ANY_CHANGE BIT(13) ++ + #define AT803X_DEBUG_REG_3C 0x3C + + #define AT803X_DEBUG_REG_3D 0x3D ++#define AT803X_DEBUG_GATE_CLK_IN1000 BIT(6) + + #define AT803X_DEBUG_REG_1F 0x1F + #define AT803X_DEBUG_PLL_ON BIT(2) +@@ -1220,6 +1225,58 @@ static int qca83xx_config_init(struct ph + return 0; + } + ++static int qca83xx_resume(struct phy_device *phydev) ++{ ++ int ret, val; ++ ++ /* Skip reset if not suspended */ ++ if (!phydev->suspended) ++ return 0; ++ ++ /* Reinit the port, reset values set by suspend */ ++ qca83xx_config_init(phydev); ++ ++ /* Reset the port on port resume */ ++ phy_set_bits(phydev, MII_BMCR, BMCR_RESET | BMCR_ANENABLE); ++ ++ /* On resume from suspend the switch execute a reset and ++ * restart auto-negotiation. Wait for reset to complete. ++ */ ++ ret = phy_read_poll_timeout(phydev, MII_BMCR, val, !(val & BMCR_RESET), ++ 50000, 600000, true); ++ if (ret) ++ return ret; ++ ++ msleep(1); ++ ++ return 0; ++} ++ ++static int qca83xx_suspend(struct phy_device *phydev) ++{ ++ u16 mask = 0; ++ ++ /* Only QCA8337 support actual suspend. ++ * QCA8327 cause port unreliability when phy suspend ++ * is set. ++ */ ++ if (phydev->drv->phy_id == QCA8337_PHY_ID) { ++ genphy_suspend(phydev); ++ } else { ++ mask |= ~(BMCR_SPEED1000 | BMCR_FULLDPLX); ++ phy_modify(phydev, MII_BMCR, mask, 0); ++ } ++ ++ at803x_debug_reg_mask(phydev, AT803X_DEBUG_REG_3D, ++ AT803X_DEBUG_GATE_CLK_IN1000, 0); ++ ++ at803x_debug_reg_mask(phydev, AT803X_DEBUG_REG_HIB_CTRL, ++ AT803X_DEBUG_HIB_CTRL_EN_ANY_CHANGE | ++ AT803X_DEBUG_HIB_CTRL_SEL_RST_80U, 0); ++ ++ return 0; ++} ++ + static struct phy_driver at803x_driver[] = { + { + /* Qualcomm Atheros AR8035 */ +@@ -1329,8 +1386,8 @@ static struct phy_driver at803x_driver[] + .get_sset_count = at803x_get_sset_count, + .get_strings = at803x_get_strings, + .get_stats = at803x_get_stats, +- .suspend = genphy_suspend, +- .resume = genphy_resume, ++ .suspend = qca83xx_suspend, ++ .resume = qca83xx_resume, + }, { + /* QCA8327-A from switch QCA8327-AL1A */ + .phy_id = QCA8327_A_PHY_ID, +@@ -1344,8 +1401,8 @@ static struct phy_driver at803x_driver[] + .get_sset_count = at803x_get_sset_count, + .get_strings = at803x_get_strings, + .get_stats = at803x_get_stats, +- .suspend = genphy_suspend, +- .resume = genphy_resume, ++ .suspend = qca83xx_suspend, ++ .resume = qca83xx_resume, + }, { + /* QCA8327-B from switch QCA8327-BL1A */ + .phy_id = QCA8327_B_PHY_ID, +@@ -1359,8 +1416,8 @@ static struct phy_driver at803x_driver[] + .get_sset_count = at803x_get_sset_count, + .get_strings = at803x_get_strings, + .get_stats = at803x_get_stats, +- .suspend = genphy_suspend, +- .resume = genphy_resume, ++ .suspend = qca83xx_suspend, ++ .resume = qca83xx_resume, + }, }; + + module_phy_driver(at803x_driver); diff --git a/target/linux/generic/backport-5.15/746-v5.16-02-net-phy-at803x-add-DAC-amplitude-fix-for-8327-phy.patch b/target/linux/generic/backport-5.15/746-v5.16-02-net-phy-at803x-add-DAC-amplitude-fix-for-8327-phy.patch new file mode 100644 index 0000000000..aeb43e2f67 --- /dev/null +++ b/target/linux/generic/backport-5.15/746-v5.16-02-net-phy-at803x-add-DAC-amplitude-fix-for-8327-phy.patch @@ -0,0 +1,91 @@ +From 1ca8311949aec5c9447645731ef1c6bc5bd71350 Mon Sep 17 00:00:00 2001 +From: Ansuel Smith <ansuelsmth@gmail.com> +Date: Sun, 10 Oct 2021 00:46:16 +0200 +Subject: net: phy: at803x: add DAC amplitude fix for 8327 phy + +QCA8327 internal phy require DAC amplitude adjustement set to +6% with +100m speed. Also add additional define to report a change of the same +reg in QCA8337. (different scope it does set 1000m voltage) +Add link_change_notify function to set the proper amplitude adjustement +on PHY_RUNNING state and disable on any other state. + +Fixes: b4df02b562f4 ("net: phy: at803x: add support for qca 8327 A variant internal phy") +Signed-off-by: Ansuel Smith <ansuelsmth@gmail.com> +Signed-off-by: David S. Miller <davem@davemloft.net> +--- + drivers/net/phy/at803x.c | 33 +++++++++++++++++++++++++++++++++ + 1 file changed, 33 insertions(+) + +--- a/drivers/net/phy/at803x.c ++++ b/drivers/net/phy/at803x.c +@@ -87,6 +87,8 @@ + #define AT803X_PSSR_MR_AN_COMPLETE 0x0200 + + #define AT803X_DEBUG_REG_0 0x00 ++#define QCA8327_DEBUG_MANU_CTRL_EN BIT(2) ++#define QCA8337_DEBUG_MANU_CTRL_EN GENMASK(3, 2) + #define AT803X_DEBUG_RX_CLK_DLY_EN BIT(15) + + #define AT803X_DEBUG_REG_5 0x05 +@@ -1222,9 +1224,37 @@ static int qca83xx_config_init(struct ph + break; + } + ++ /* QCA8327 require DAC amplitude adjustment for 100m set to +6%. ++ * Disable on init and enable only with 100m speed following ++ * qca original source code. ++ */ ++ if (phydev->drv->phy_id == QCA8327_A_PHY_ID || ++ phydev->drv->phy_id == QCA8327_B_PHY_ID) ++ at803x_debug_reg_mask(phydev, AT803X_DEBUG_REG_0, ++ QCA8327_DEBUG_MANU_CTRL_EN, 0); ++ + return 0; + } + ++static void qca83xx_link_change_notify(struct phy_device *phydev) ++{ ++ /* QCA8337 doesn't require DAC Amplitude adjustement */ ++ if (phydev->drv->phy_id == QCA8337_PHY_ID) ++ return; ++ ++ /* Set DAC Amplitude adjustment to +6% for 100m on link running */ ++ if (phydev->state == PHY_RUNNING) { ++ if (phydev->speed == SPEED_100) ++ at803x_debug_reg_mask(phydev, AT803X_DEBUG_REG_0, ++ QCA8327_DEBUG_MANU_CTRL_EN, ++ QCA8327_DEBUG_MANU_CTRL_EN); ++ } else { ++ /* Reset DAC Amplitude adjustment */ ++ at803x_debug_reg_mask(phydev, AT803X_DEBUG_REG_0, ++ QCA8327_DEBUG_MANU_CTRL_EN, 0); ++ } ++} ++ + static int qca83xx_resume(struct phy_device *phydev) + { + int ret, val; +@@ -1379,6 +1409,7 @@ static struct phy_driver at803x_driver[] + .phy_id_mask = QCA8K_PHY_ID_MASK, + .name = "Qualcomm Atheros 8337 internal PHY", + /* PHY_GBIT_FEATURES */ ++ .link_change_notify = qca83xx_link_change_notify, + .probe = at803x_probe, + .flags = PHY_IS_INTERNAL, + .config_init = qca83xx_config_init, +@@ -1394,6 +1425,7 @@ static struct phy_driver at803x_driver[] + .phy_id_mask = QCA8K_PHY_ID_MASK, + .name = "Qualcomm Atheros 8327-A internal PHY", + /* PHY_GBIT_FEATURES */ ++ .link_change_notify = qca83xx_link_change_notify, + .probe = at803x_probe, + .flags = PHY_IS_INTERNAL, + .config_init = qca83xx_config_init, +@@ -1409,6 +1441,7 @@ static struct phy_driver at803x_driver[] + .phy_id_mask = QCA8K_PHY_ID_MASK, + .name = "Qualcomm Atheros 8327-B internal PHY", + /* PHY_GBIT_FEATURES */ ++ .link_change_notify = qca83xx_link_change_notify, + .probe = at803x_probe, + .flags = PHY_IS_INTERNAL, + .config_init = qca83xx_config_init, diff --git a/target/linux/generic/backport-5.15/746-v5.16-03-net-phy-at803x-enable-prefer-master-for-83xx-interna.patch b/target/linux/generic/backport-5.15/746-v5.16-03-net-phy-at803x-enable-prefer-master-for-83xx-interna.patch new file mode 100644 index 0000000000..2352fa0e44 --- /dev/null +++ b/target/linux/generic/backport-5.15/746-v5.16-03-net-phy-at803x-enable-prefer-master-for-83xx-interna.patch @@ -0,0 +1,27 @@ +From 9d1c29b4028557a496be9c5eb2b4b86063700636 Mon Sep 17 00:00:00 2001 +From: Ansuel Smith <ansuelsmth@gmail.com> +Date: Sun, 10 Oct 2021 00:46:17 +0200 +Subject: net: phy: at803x: enable prefer master for 83xx internal phy + +From original QCA source code the port was set to prefer master as port +type in 1000BASE-T mode. Apply the same settings also here. + +Signed-off-by: Ansuel Smith <ansuelsmth@gmail.com> +Reviewed-by: Andrew Lunn <andrew@lunn.ch> +Signed-off-by: David S. Miller <davem@davemloft.net> +--- + drivers/net/phy/at803x.c | 3 +++ + 1 file changed, 3 insertions(+) + +--- a/drivers/net/phy/at803x.c ++++ b/drivers/net/phy/at803x.c +@@ -1233,6 +1233,9 @@ static int qca83xx_config_init(struct ph + at803x_debug_reg_mask(phydev, AT803X_DEBUG_REG_0, + QCA8327_DEBUG_MANU_CTRL_EN, 0); + ++ /* Following original QCA sourcecode set port to prefer master */ ++ phy_set_bits(phydev, MII_CTRL1000, CTL1000_PREFER_MASTER); ++ + return 0; + } + diff --git a/target/linux/generic/backport-5.15/746-v5.16-04-net-phy-at803x-better-describe-debug-regs.patch b/target/linux/generic/backport-5.15/746-v5.16-04-net-phy-at803x-better-describe-debug-regs.patch new file mode 100644 index 0000000000..9c28a893f1 --- /dev/null +++ b/target/linux/generic/backport-5.15/746-v5.16-04-net-phy-at803x-better-describe-debug-regs.patch @@ -0,0 +1,127 @@ +From 67999555ff42e91de7654488d9a7735bd9e84555 Mon Sep 17 00:00:00 2001 +From: Ansuel Smith <ansuelsmth@gmail.com> +Date: Sun, 10 Oct 2021 00:46:18 +0200 +Subject: net: phy: at803x: better describe debug regs + +Give a name to known debug regs from Documentation instead of using +unknown hex values. + +Signed-off-by: Ansuel Smith <ansuelsmth@gmail.com> +Reviewed-by: Andrew Lunn <andrew@lunn.ch> +Signed-off-by: David S. Miller <davem@davemloft.net> +--- + drivers/net/phy/at803x.c | 30 +++++++++++++++--------------- + 1 file changed, 15 insertions(+), 15 deletions(-) + +--- a/drivers/net/phy/at803x.c ++++ b/drivers/net/phy/at803x.c +@@ -86,12 +86,12 @@ + #define AT803X_PSSR 0x11 /*PHY-Specific Status Register*/ + #define AT803X_PSSR_MR_AN_COMPLETE 0x0200 + +-#define AT803X_DEBUG_REG_0 0x00 ++#define AT803X_DEBUG_ANALOG_TEST_CTRL 0x00 + #define QCA8327_DEBUG_MANU_CTRL_EN BIT(2) + #define QCA8337_DEBUG_MANU_CTRL_EN GENMASK(3, 2) + #define AT803X_DEBUG_RX_CLK_DLY_EN BIT(15) + +-#define AT803X_DEBUG_REG_5 0x05 ++#define AT803X_DEBUG_SYSTEM_CTRL_MODE 0x05 + #define AT803X_DEBUG_TX_CLK_DLY_EN BIT(8) + + #define AT803X_DEBUG_REG_HIB_CTRL 0x0b +@@ -100,7 +100,7 @@ + + #define AT803X_DEBUG_REG_3C 0x3C + +-#define AT803X_DEBUG_REG_3D 0x3D ++#define AT803X_DEBUG_REG_GREEN 0x3D + #define AT803X_DEBUG_GATE_CLK_IN1000 BIT(6) + + #define AT803X_DEBUG_REG_1F 0x1F +@@ -274,25 +274,25 @@ static int at803x_read_page(struct phy_d + + static int at803x_enable_rx_delay(struct phy_device *phydev) + { +- return at803x_debug_reg_mask(phydev, AT803X_DEBUG_REG_0, 0, ++ return at803x_debug_reg_mask(phydev, AT803X_DEBUG_ANALOG_TEST_CTRL, 0, + AT803X_DEBUG_RX_CLK_DLY_EN); + } + + static int at803x_enable_tx_delay(struct phy_device *phydev) + { +- return at803x_debug_reg_mask(phydev, AT803X_DEBUG_REG_5, 0, ++ return at803x_debug_reg_mask(phydev, AT803X_DEBUG_SYSTEM_CTRL_MODE, 0, + AT803X_DEBUG_TX_CLK_DLY_EN); + } + + static int at803x_disable_rx_delay(struct phy_device *phydev) + { +- return at803x_debug_reg_mask(phydev, AT803X_DEBUG_REG_0, ++ return at803x_debug_reg_mask(phydev, AT803X_DEBUG_ANALOG_TEST_CTRL, + AT803X_DEBUG_RX_CLK_DLY_EN, 0); + } + + static int at803x_disable_tx_delay(struct phy_device *phydev) + { +- return at803x_debug_reg_mask(phydev, AT803X_DEBUG_REG_5, ++ return at803x_debug_reg_mask(phydev, AT803X_DEBUG_SYSTEM_CTRL_MODE, + AT803X_DEBUG_TX_CLK_DLY_EN, 0); + } + +@@ -1208,9 +1208,9 @@ static int qca83xx_config_init(struct ph + switch (switch_revision) { + case 1: + /* For 100M waveform */ +- at803x_debug_reg_write(phydev, AT803X_DEBUG_REG_0, 0x02ea); ++ at803x_debug_reg_write(phydev, AT803X_DEBUG_ANALOG_TEST_CTRL, 0x02ea); + /* Turn on Gigabit clock */ +- at803x_debug_reg_write(phydev, AT803X_DEBUG_REG_3D, 0x68a0); ++ at803x_debug_reg_write(phydev, AT803X_DEBUG_REG_GREEN, 0x68a0); + break; + + case 2: +@@ -1218,8 +1218,8 @@ static int qca83xx_config_init(struct ph + fallthrough; + case 4: + phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_AZ_DEBUG, 0x803f); +- at803x_debug_reg_write(phydev, AT803X_DEBUG_REG_3D, 0x6860); +- at803x_debug_reg_write(phydev, AT803X_DEBUG_REG_5, 0x2c46); ++ at803x_debug_reg_write(phydev, AT803X_DEBUG_REG_GREEN, 0x6860); ++ at803x_debug_reg_write(phydev, AT803X_DEBUG_SYSTEM_CTRL_MODE, 0x2c46); + at803x_debug_reg_write(phydev, AT803X_DEBUG_REG_3C, 0x6000); + break; + } +@@ -1230,7 +1230,7 @@ static int qca83xx_config_init(struct ph + */ + if (phydev->drv->phy_id == QCA8327_A_PHY_ID || + phydev->drv->phy_id == QCA8327_B_PHY_ID) +- at803x_debug_reg_mask(phydev, AT803X_DEBUG_REG_0, ++ at803x_debug_reg_mask(phydev, AT803X_DEBUG_ANALOG_TEST_CTRL, + QCA8327_DEBUG_MANU_CTRL_EN, 0); + + /* Following original QCA sourcecode set port to prefer master */ +@@ -1248,12 +1248,12 @@ static void qca83xx_link_change_notify(s + /* Set DAC Amplitude adjustment to +6% for 100m on link running */ + if (phydev->state == PHY_RUNNING) { + if (phydev->speed == SPEED_100) +- at803x_debug_reg_mask(phydev, AT803X_DEBUG_REG_0, ++ at803x_debug_reg_mask(phydev, AT803X_DEBUG_ANALOG_TEST_CTRL, + QCA8327_DEBUG_MANU_CTRL_EN, + QCA8327_DEBUG_MANU_CTRL_EN); + } else { + /* Reset DAC Amplitude adjustment */ +- at803x_debug_reg_mask(phydev, AT803X_DEBUG_REG_0, ++ at803x_debug_reg_mask(phydev, AT803X_DEBUG_ANALOG_TEST_CTRL, + QCA8327_DEBUG_MANU_CTRL_EN, 0); + } + } +@@ -1300,7 +1300,7 @@ static int qca83xx_suspend(struct phy_de + phy_modify(phydev, MII_BMCR, mask, 0); + } + +- at803x_debug_reg_mask(phydev, AT803X_DEBUG_REG_3D, ++ at803x_debug_reg_mask(phydev, AT803X_DEBUG_REG_GREEN, + AT803X_DEBUG_GATE_CLK_IN1000, 0); + + at803x_debug_reg_mask(phydev, AT803X_DEBUG_REG_HIB_CTRL, diff --git a/target/linux/generic/backport-5.15/747-v5.16-01-dsa-qca8k-add-mac-power-sel-support.patch b/target/linux/generic/backport-5.15/747-v5.16-01-dsa-qca8k-add-mac-power-sel-support.patch new file mode 100644 index 0000000000..c8d424de38 --- /dev/null +++ b/target/linux/generic/backport-5.15/747-v5.16-01-dsa-qca8k-add-mac-power-sel-support.patch @@ -0,0 +1,80 @@ +From d8b6f5bae6d3b648a67b6958cb98e4e97256d652 Mon Sep 17 00:00:00 2001 +From: Ansuel Smith <ansuelsmth@gmail.com> +Date: Thu, 14 Oct 2021 00:39:06 +0200 +Subject: dsa: qca8k: add mac_power_sel support + +Add missing mac power sel support needed for ipq8064/5 SoC that require +1.8v for the internal regulator port instead of the default 1.5v. +If other device needs this, consider adding a dedicated binding to +support this. + +Signed-off-by: Ansuel Smith <ansuelsmth@gmail.com> +Reviewed-by: Vladimir Oltean <olteanv@gmail.com> +Reviewed-by: Florian Fainelli <f.fainelli@gmail.com> +Signed-off-by: David S. Miller <davem@davemloft.net> +--- + drivers/net/dsa/qca8k.c | 31 +++++++++++++++++++++++++++++++ + drivers/net/dsa/qca8k.h | 5 +++++ + 2 files changed, 36 insertions(+) + +--- a/drivers/net/dsa/qca8k.c ++++ b/drivers/net/dsa/qca8k.c +@@ -951,6 +951,33 @@ qca8k_setup_of_rgmii_delay(struct qca8k_ + } + + static int ++qca8k_setup_mac_pwr_sel(struct qca8k_priv *priv) ++{ ++ u32 mask = 0; ++ int ret = 0; ++ ++ /* SoC specific settings for ipq8064. ++ * If more device require this consider adding ++ * a dedicated binding. ++ */ ++ if (of_machine_is_compatible("qcom,ipq8064")) ++ mask |= QCA8K_MAC_PWR_RGMII0_1_8V; ++ ++ /* SoC specific settings for ipq8065 */ ++ if (of_machine_is_compatible("qcom,ipq8065")) ++ mask |= QCA8K_MAC_PWR_RGMII1_1_8V; ++ ++ if (mask) { ++ ret = qca8k_rmw(priv, QCA8K_REG_MAC_PWR_SEL, ++ QCA8K_MAC_PWR_RGMII0_1_8V | ++ QCA8K_MAC_PWR_RGMII1_1_8V, ++ mask); ++ } ++ ++ return ret; ++} ++ ++static int + qca8k_setup(struct dsa_switch *ds) + { + struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv; +@@ -979,6 +1006,10 @@ qca8k_setup(struct dsa_switch *ds) + if (ret) + return ret; + ++ ret = qca8k_setup_mac_pwr_sel(priv); ++ if (ret) ++ return ret; ++ + /* Enable CPU Port */ + ret = qca8k_reg_set(priv, QCA8K_REG_GLOBAL_FW_CTRL0, + QCA8K_GLOBAL_FW_CTRL0_CPU_PORT_EN); +--- a/drivers/net/dsa/qca8k.h ++++ b/drivers/net/dsa/qca8k.h +@@ -100,6 +100,11 @@ + #define QCA8K_SGMII_MODE_CTRL_PHY (1 << 22) + #define QCA8K_SGMII_MODE_CTRL_MAC (2 << 22) + ++/* MAC_PWR_SEL registers */ ++#define QCA8K_REG_MAC_PWR_SEL 0x0e4 ++#define QCA8K_MAC_PWR_RGMII1_1_8V BIT(18) ++#define QCA8K_MAC_PWR_RGMII0_1_8V BIT(19) ++ + /* EEE control registers */ + #define QCA8K_REG_EEE_CTRL 0x100 + #define QCA8K_REG_EEE_CTRL_LPI_EN(_i) ((_i + 1) * 2) diff --git a/target/linux/generic/backport-5.15/747-v5.16-02-dt-bindings-net-dsa-qca8k-Add-SGMII-clock-phase-prop.patch b/target/linux/generic/backport-5.15/747-v5.16-02-dt-bindings-net-dsa-qca8k-Add-SGMII-clock-phase-prop.patch new file mode 100644 index 0000000000..bd768ec27d --- /dev/null +++ b/target/linux/generic/backport-5.15/747-v5.16-02-dt-bindings-net-dsa-qca8k-Add-SGMII-clock-phase-prop.patch @@ -0,0 +1,30 @@ +From fdbf35df9c091db9c46e57e9938e3f7a4f603a7c Mon Sep 17 00:00:00 2001 +From: Ansuel Smith <ansuelsmth@gmail.com> +Date: Thu, 14 Oct 2021 00:39:07 +0200 +Subject: dt-bindings: net: dsa: qca8k: Add SGMII clock phase properties + +Add names and descriptions of additional PORT0_PAD_CTRL properties. +qca,sgmii-(rx|tx)clk-falling-edge are for setting the respective clock +phase to failling edge. + +Co-developed-by: Matthew Hagan <mnhagan88@gmail.com> +Signed-off-by: Matthew Hagan <mnhagan88@gmail.com> +Signed-off-by: Ansuel Smith <ansuelsmth@gmail.com> +Signed-off-by: David S. Miller <davem@davemloft.net> +--- + Documentation/devicetree/bindings/net/dsa/qca8k.txt | 4 ++++ + 1 file changed, 4 insertions(+) + +--- a/Documentation/devicetree/bindings/net/dsa/qca8k.txt ++++ b/Documentation/devicetree/bindings/net/dsa/qca8k.txt +@@ -37,6 +37,10 @@ A CPU port node has the following option + managed entity. See + Documentation/devicetree/bindings/net/fixed-link.txt + for details. ++- qca,sgmii-rxclk-falling-edge: Set the receive clock phase to falling edge. ++ Mostly used in qca8327 with CPU port 0 set to ++ sgmii. ++- qca,sgmii-txclk-falling-edge: Set the transmit clock phase to falling edge. + + For QCA8K the 'fixed-link' sub-node supports only the following properties: + diff --git a/target/linux/generic/backport-5.15/747-v5.16-03-net-dsa-qca8k-add-support-for-sgmii-falling-edge.patch b/target/linux/generic/backport-5.15/747-v5.16-03-net-dsa-qca8k-add-support-for-sgmii-falling-edge.patch new file mode 100644 index 0000000000..e464452d82 --- /dev/null +++ b/target/linux/generic/backport-5.15/747-v5.16-03-net-dsa-qca8k-add-support-for-sgmii-falling-edge.patch @@ -0,0 +1,127 @@ +From 6c43809bf1bee76c434e365a26546a92a5fbec14 Mon Sep 17 00:00:00 2001 +From: Ansuel Smith <ansuelsmth@gmail.com> +Date: Thu, 14 Oct 2021 00:39:08 +0200 +Subject: net: dsa: qca8k: add support for sgmii falling edge + +Add support for this in the qca8k driver. Also add support for SGMII +rx/tx clock falling edge. This is only present for pad0, pad5 and +pad6 have these bit reserved from Documentation. Add a comment that this +is hardcoded to PAD0 as qca8327/28/34/37 have an unique sgmii line and +setting falling in port0 applies to both configuration with sgmii used +for port0 or port6. + +Co-developed-by: Matthew Hagan <mnhagan88@gmail.com> +Signed-off-by: Matthew Hagan <mnhagan88@gmail.com> +Signed-off-by: Ansuel Smith <ansuelsmth@gmail.com> +Signed-off-by: David S. Miller <davem@davemloft.net> +--- + drivers/net/dsa/qca8k.c | 63 +++++++++++++++++++++++++++++++++++++++++++++++++ + drivers/net/dsa/qca8k.h | 4 ++++ + 2 files changed, 67 insertions(+) + +--- a/drivers/net/dsa/qca8k.c ++++ b/drivers/net/dsa/qca8k.c +@@ -978,6 +978,42 @@ qca8k_setup_mac_pwr_sel(struct qca8k_pri + } + + static int ++qca8k_parse_port_config(struct qca8k_priv *priv) ++{ ++ struct device_node *port_dn; ++ phy_interface_t mode; ++ struct dsa_port *dp; ++ int port, ret; ++ ++ /* We have 2 CPU port. Check them */ ++ for (port = 0; port < QCA8K_NUM_PORTS; port++) { ++ /* Skip every other port */ ++ if (port != 0 && port != 6) ++ continue; ++ ++ dp = dsa_to_port(priv->ds, port); ++ port_dn = dp->dn; ++ ++ if (!of_device_is_available(port_dn)) ++ continue; ++ ++ ret = of_get_phy_mode(port_dn, &mode); ++ if (ret) ++ continue; ++ ++ if (mode == PHY_INTERFACE_MODE_SGMII) { ++ if (of_property_read_bool(port_dn, "qca,sgmii-txclk-falling-edge")) ++ priv->sgmii_tx_clk_falling_edge = true; ++ ++ if (of_property_read_bool(port_dn, "qca,sgmii-rxclk-falling-edge")) ++ priv->sgmii_rx_clk_falling_edge = true; ++ } ++ } ++ ++ return 0; ++} ++ ++static int + qca8k_setup(struct dsa_switch *ds) + { + struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv; +@@ -990,6 +1026,11 @@ qca8k_setup(struct dsa_switch *ds) + return -EINVAL; + } + ++ /* Parse CPU port config to be later used in phy_link mac_config */ ++ ret = qca8k_parse_port_config(priv); ++ if (ret) ++ return ret; ++ + mutex_init(&priv->reg_mutex); + + /* Start by setting up the register mapping */ +@@ -1274,6 +1315,28 @@ qca8k_phylink_mac_config(struct dsa_swit + } + + qca8k_write(priv, QCA8K_REG_SGMII_CTRL, val); ++ ++ /* For qca8327/qca8328/qca8334/qca8338 sgmii is unique and ++ * falling edge is set writing in the PORT0 PAD reg ++ */ ++ if (priv->switch_id == QCA8K_ID_QCA8327 || ++ priv->switch_id == QCA8K_ID_QCA8337) ++ reg = QCA8K_REG_PORT0_PAD_CTRL; ++ ++ val = 0; ++ ++ /* SGMII Clock phase configuration */ ++ if (priv->sgmii_rx_clk_falling_edge) ++ val |= QCA8K_PORT0_PAD_SGMII_RXCLK_FALLING_EDGE; ++ ++ if (priv->sgmii_tx_clk_falling_edge) ++ val |= QCA8K_PORT0_PAD_SGMII_TXCLK_FALLING_EDGE; ++ ++ if (val) ++ ret = qca8k_rmw(priv, reg, ++ QCA8K_PORT0_PAD_SGMII_RXCLK_FALLING_EDGE | ++ QCA8K_PORT0_PAD_SGMII_TXCLK_FALLING_EDGE, ++ val); + break; + default: + dev_err(ds->dev, "xMII mode %s not supported for port %d\n", +--- a/drivers/net/dsa/qca8k.h ++++ b/drivers/net/dsa/qca8k.h +@@ -35,6 +35,8 @@ + #define QCA8K_MASK_CTRL_DEVICE_ID_MASK GENMASK(15, 8) + #define QCA8K_MASK_CTRL_DEVICE_ID(x) ((x) >> 8) + #define QCA8K_REG_PORT0_PAD_CTRL 0x004 ++#define QCA8K_PORT0_PAD_SGMII_RXCLK_FALLING_EDGE BIT(19) ++#define QCA8K_PORT0_PAD_SGMII_TXCLK_FALLING_EDGE BIT(18) + #define QCA8K_REG_PORT5_PAD_CTRL 0x008 + #define QCA8K_REG_PORT6_PAD_CTRL 0x00c + #define QCA8K_PORT_PAD_RGMII_EN BIT(26) +@@ -260,6 +262,8 @@ struct qca8k_priv { + u8 switch_revision; + u8 rgmii_tx_delay; + u8 rgmii_rx_delay; ++ bool sgmii_rx_clk_falling_edge; ++ bool sgmii_tx_clk_falling_edge; + bool legacy_phy_port_mapping; + struct regmap *regmap; + struct mii_bus *bus; diff --git a/target/linux/generic/backport-5.15/747-v5.16-04-dt-bindings-net-dsa-qca8k-Document-support-for-CPU-p.patch b/target/linux/generic/backport-5.15/747-v5.16-04-dt-bindings-net-dsa-qca8k-Document-support-for-CPU-p.patch new file mode 100644 index 0000000000..606ac0af3d --- /dev/null +++ b/target/linux/generic/backport-5.15/747-v5.16-04-dt-bindings-net-dsa-qca8k-Document-support-for-CPU-p.patch @@ -0,0 +1,29 @@ +From 731d613338ec6de482053ffa3f71be2325b0f8eb Mon Sep 17 00:00:00 2001 +From: Ansuel Smith <ansuelsmth@gmail.com> +Date: Thu, 14 Oct 2021 00:39:09 +0200 +Subject: dt-bindings: net: dsa: qca8k: Document support for CPU port 6 + +The switch now support CPU port to be set 6 instead of be hardcoded to +0. Document support for it and describe logic selection. + +Signed-off-by: Ansuel Smith <ansuelsmth@gmail.com> +Signed-off-by: David S. Miller <davem@davemloft.net> +--- + Documentation/devicetree/bindings/net/dsa/qca8k.txt | 6 +++++- + 1 file changed, 5 insertions(+), 1 deletion(-) + +--- a/Documentation/devicetree/bindings/net/dsa/qca8k.txt ++++ b/Documentation/devicetree/bindings/net/dsa/qca8k.txt +@@ -29,7 +29,11 @@ the mdio MASTER is used as communication + Don't use mixed external and internal mdio-bus configurations, as this is + not supported by the hardware. + +-The CPU port of this switch is always port 0. ++This switch support 2 CPU port. Normally and advised configuration is with ++CPU port set to port 0. It is also possible to set the CPU port to port 6 ++if the device requires it. The driver will configure the switch to the defined ++port. With both CPU port declared the first CPU port is selected as primary ++and the secondary CPU ignored. + + A CPU port node has the following optional node: + diff --git a/target/linux/generic/backport-5.15/747-v5.16-05-net-dsa-qca8k-add-support-for-cpu-port-6.patch b/target/linux/generic/backport-5.15/747-v5.16-05-net-dsa-qca8k-add-support-for-cpu-port-6.patch new file mode 100644 index 0000000000..320db8fa9f --- /dev/null +++ b/target/linux/generic/backport-5.15/747-v5.16-05-net-dsa-qca8k-add-support-for-cpu-port-6.patch @@ -0,0 +1,153 @@ +From 3fcf734aa482487df83cf8f18608438fcf59127f Mon Sep 17 00:00:00 2001 +From: Ansuel Smith <ansuelsmth@gmail.com> +Date: Thu, 14 Oct 2021 00:39:10 +0200 +Subject: net: dsa: qca8k: add support for cpu port 6 + +Currently CPU port is always hardcoded to port 0. This switch have 2 CPU +ports. The original intention of this driver seems to be use the +mac06_exchange bit to swap MAC0 with MAC6 in the strange configuration +where device have connected only the CPU port 6. To skip the +introduction of a new binding, rework the driver to address the +secondary CPU port as primary and drop any reference of hardcoded port. +With configuration of mac06 exchange, just skip the definition of port0 +and define the CPU port as a secondary. The driver will autoconfigure +the switch to use that as the primary CPU port. + +Signed-off-by: Ansuel Smith <ansuelsmth@gmail.com> +Signed-off-by: David S. Miller <davem@davemloft.net> +--- + drivers/net/dsa/qca8k.c | 51 ++++++++++++++++++++++++++++++++++--------------- + drivers/net/dsa/qca8k.h | 2 -- + 2 files changed, 36 insertions(+), 17 deletions(-) + +--- a/drivers/net/dsa/qca8k.c ++++ b/drivers/net/dsa/qca8k.c +@@ -977,6 +977,22 @@ qca8k_setup_mac_pwr_sel(struct qca8k_pri + return ret; + } + ++static int qca8k_find_cpu_port(struct dsa_switch *ds) ++{ ++ struct qca8k_priv *priv = ds->priv; ++ ++ /* Find the connected cpu port. Valid port are 0 or 6 */ ++ if (dsa_is_cpu_port(ds, 0)) ++ return 0; ++ ++ dev_dbg(priv->dev, "port 0 is not the CPU port. Checking port 6"); ++ ++ if (dsa_is_cpu_port(ds, 6)) ++ return 6; ++ ++ return -EINVAL; ++} ++ + static int + qca8k_parse_port_config(struct qca8k_priv *priv) + { +@@ -1017,13 +1033,13 @@ static int + qca8k_setup(struct dsa_switch *ds) + { + struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv; +- int ret, i; ++ int cpu_port, ret, i; + u32 mask; + +- /* Make sure that port 0 is the cpu port */ +- if (!dsa_is_cpu_port(ds, 0)) { +- dev_err(priv->dev, "port 0 is not the CPU port"); +- return -EINVAL; ++ cpu_port = qca8k_find_cpu_port(ds); ++ if (cpu_port < 0) { ++ dev_err(priv->dev, "No cpu port configured in both cpu port0 and port6"); ++ return cpu_port; + } + + /* Parse CPU port config to be later used in phy_link mac_config */ +@@ -1065,7 +1081,7 @@ qca8k_setup(struct dsa_switch *ds) + dev_warn(priv->dev, "mib init failed"); + + /* Enable QCA header mode on the cpu port */ +- ret = qca8k_write(priv, QCA8K_REG_PORT_HDR_CTRL(QCA8K_CPU_PORT), ++ ret = qca8k_write(priv, QCA8K_REG_PORT_HDR_CTRL(cpu_port), + QCA8K_PORT_HDR_CTRL_ALL << QCA8K_PORT_HDR_CTRL_TX_S | + QCA8K_PORT_HDR_CTRL_ALL << QCA8K_PORT_HDR_CTRL_RX_S); + if (ret) { +@@ -1087,10 +1103,10 @@ qca8k_setup(struct dsa_switch *ds) + + /* Forward all unknown frames to CPU port for Linux processing */ + ret = qca8k_write(priv, QCA8K_REG_GLOBAL_FW_CTRL1, +- BIT(0) << QCA8K_GLOBAL_FW_CTRL1_IGMP_DP_S | +- BIT(0) << QCA8K_GLOBAL_FW_CTRL1_BC_DP_S | +- BIT(0) << QCA8K_GLOBAL_FW_CTRL1_MC_DP_S | +- BIT(0) << QCA8K_GLOBAL_FW_CTRL1_UC_DP_S); ++ BIT(cpu_port) << QCA8K_GLOBAL_FW_CTRL1_IGMP_DP_S | ++ BIT(cpu_port) << QCA8K_GLOBAL_FW_CTRL1_BC_DP_S | ++ BIT(cpu_port) << QCA8K_GLOBAL_FW_CTRL1_MC_DP_S | ++ BIT(cpu_port) << QCA8K_GLOBAL_FW_CTRL1_UC_DP_S); + if (ret) + return ret; + +@@ -1098,7 +1114,7 @@ qca8k_setup(struct dsa_switch *ds) + for (i = 0; i < QCA8K_NUM_PORTS; i++) { + /* CPU port gets connected to all user ports of the switch */ + if (dsa_is_cpu_port(ds, i)) { +- ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(QCA8K_CPU_PORT), ++ ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(cpu_port), + QCA8K_PORT_LOOKUP_MEMBER, dsa_user_ports(ds)); + if (ret) + return ret; +@@ -1110,7 +1126,7 @@ qca8k_setup(struct dsa_switch *ds) + + ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(i), + QCA8K_PORT_LOOKUP_MEMBER, +- BIT(QCA8K_CPU_PORT)); ++ BIT(cpu_port)); + if (ret) + return ret; + +@@ -1616,9 +1632,12 @@ static int + qca8k_port_bridge_join(struct dsa_switch *ds, int port, struct net_device *br) + { + struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv; +- int port_mask = BIT(QCA8K_CPU_PORT); ++ int port_mask, cpu_port; + int i, ret; + ++ cpu_port = dsa_to_port(ds, port)->cpu_dp->index; ++ port_mask = BIT(cpu_port); ++ + for (i = 1; i < QCA8K_NUM_PORTS; i++) { + if (dsa_to_port(ds, i)->bridge_dev != br) + continue; +@@ -1645,7 +1664,9 @@ static void + qca8k_port_bridge_leave(struct dsa_switch *ds, int port, struct net_device *br) + { + struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv; +- int i; ++ int cpu_port, i; ++ ++ cpu_port = dsa_to_port(ds, port)->cpu_dp->index; + + for (i = 1; i < QCA8K_NUM_PORTS; i++) { + if (dsa_to_port(ds, i)->bridge_dev != br) +@@ -1662,7 +1683,7 @@ qca8k_port_bridge_leave(struct dsa_switc + * this port + */ + qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port), +- QCA8K_PORT_LOOKUP_MEMBER, BIT(QCA8K_CPU_PORT)); ++ QCA8K_PORT_LOOKUP_MEMBER, BIT(cpu_port)); + } + + static int +--- a/drivers/net/dsa/qca8k.h ++++ b/drivers/net/dsa/qca8k.h +@@ -24,8 +24,6 @@ + + #define QCA8K_NUM_FDB_RECORDS 2048 + +-#define QCA8K_CPU_PORT 0 +- + #define QCA8K_PORT_VID_DEF 1 + + /* Global control registers */ diff --git a/target/linux/generic/backport-5.15/747-v5.16-06-net-dsa-qca8k-rework-rgmii-delay-logic-and-scan-for-.patch b/target/linux/generic/backport-5.15/747-v5.16-06-net-dsa-qca8k-rework-rgmii-delay-logic-and-scan-for-.patch new file mode 100644 index 0000000000..de201764f9 --- /dev/null +++ b/target/linux/generic/backport-5.15/747-v5.16-06-net-dsa-qca8k-rework-rgmii-delay-logic-and-scan-for-.patch @@ -0,0 +1,295 @@ +From 5654ec78dd7e64b1e04777b24007344329e6a63b Mon Sep 17 00:00:00 2001 +From: Ansuel Smith <ansuelsmth@gmail.com> +Date: Thu, 14 Oct 2021 00:39:11 +0200 +Subject: net: dsa: qca8k: rework rgmii delay logic and scan for cpu port 6 + +Future proof commit. This switch have 2 CPU ports and one valid +configuration is first CPU port set to sgmii and second CPU port set to +rgmii-id. The current implementation detects delay only for CPU port +zero set to rgmii and doesn't count any delay set in a secondary CPU +port. Drop the current delay scan function and move it to the sgmii +parser function to generalize and implicitly add support for secondary +CPU port set to rgmii-id. Introduce new logic where delay is enabled +also with internal delay binding declared and rgmii set as PHY mode. + +Signed-off-by: Ansuel Smith <ansuelsmth@gmail.com> +Signed-off-by: David S. Miller <davem@davemloft.net> +--- + drivers/net/dsa/qca8k.c | 165 ++++++++++++++++++++++++------------------------ + drivers/net/dsa/qca8k.h | 10 ++- + 2 files changed, 89 insertions(+), 86 deletions(-) + +--- a/drivers/net/dsa/qca8k.c ++++ b/drivers/net/dsa/qca8k.c +@@ -889,68 +889,6 @@ qca8k_setup_mdio_bus(struct qca8k_priv * + } + + static int +-qca8k_setup_of_rgmii_delay(struct qca8k_priv *priv) +-{ +- struct device_node *port_dn; +- phy_interface_t mode; +- struct dsa_port *dp; +- u32 val; +- +- /* CPU port is already checked */ +- dp = dsa_to_port(priv->ds, 0); +- +- port_dn = dp->dn; +- +- /* Check if port 0 is set to the correct type */ +- of_get_phy_mode(port_dn, &mode); +- if (mode != PHY_INTERFACE_MODE_RGMII_ID && +- mode != PHY_INTERFACE_MODE_RGMII_RXID && +- mode != PHY_INTERFACE_MODE_RGMII_TXID) { +- return 0; +- } +- +- switch (mode) { +- case PHY_INTERFACE_MODE_RGMII_ID: +- case PHY_INTERFACE_MODE_RGMII_RXID: +- if (of_property_read_u32(port_dn, "rx-internal-delay-ps", &val)) +- val = 2; +- else +- /* Switch regs accept value in ns, convert ps to ns */ +- val = val / 1000; +- +- if (val > QCA8K_MAX_DELAY) { +- dev_err(priv->dev, "rgmii rx delay is limited to a max value of 3ns, setting to the max value"); +- val = 3; +- } +- +- priv->rgmii_rx_delay = val; +- /* Stop here if we need to check only for rx delay */ +- if (mode != PHY_INTERFACE_MODE_RGMII_ID) +- break; +- +- fallthrough; +- case PHY_INTERFACE_MODE_RGMII_TXID: +- if (of_property_read_u32(port_dn, "tx-internal-delay-ps", &val)) +- val = 1; +- else +- /* Switch regs accept value in ns, convert ps to ns */ +- val = val / 1000; +- +- if (val > QCA8K_MAX_DELAY) { +- dev_err(priv->dev, "rgmii tx delay is limited to a max value of 3ns, setting to the max value"); +- val = 3; +- } +- +- priv->rgmii_tx_delay = val; +- break; +- default: +- return 0; +- } +- +- return 0; +-} +- +-static int + qca8k_setup_mac_pwr_sel(struct qca8k_priv *priv) + { + u32 mask = 0; +@@ -996,19 +934,21 @@ static int qca8k_find_cpu_port(struct ds + static int + qca8k_parse_port_config(struct qca8k_priv *priv) + { ++ int port, cpu_port_index = 0, ret; + struct device_node *port_dn; + phy_interface_t mode; + struct dsa_port *dp; +- int port, ret; ++ u32 delay; + + /* We have 2 CPU port. Check them */ +- for (port = 0; port < QCA8K_NUM_PORTS; port++) { ++ for (port = 0; port < QCA8K_NUM_PORTS && cpu_port_index < QCA8K_NUM_CPU_PORTS; port++) { + /* Skip every other port */ + if (port != 0 && port != 6) + continue; + + dp = dsa_to_port(priv->ds, port); + port_dn = dp->dn; ++ cpu_port_index++; + + if (!of_device_is_available(port_dn)) + continue; +@@ -1017,12 +957,54 @@ qca8k_parse_port_config(struct qca8k_pri + if (ret) + continue; + +- if (mode == PHY_INTERFACE_MODE_SGMII) { ++ switch (mode) { ++ case PHY_INTERFACE_MODE_RGMII: ++ case PHY_INTERFACE_MODE_RGMII_ID: ++ case PHY_INTERFACE_MODE_RGMII_TXID: ++ case PHY_INTERFACE_MODE_RGMII_RXID: ++ delay = 0; ++ ++ if (!of_property_read_u32(port_dn, "tx-internal-delay-ps", &delay)) ++ /* Switch regs accept value in ns, convert ps to ns */ ++ delay = delay / 1000; ++ else if (mode == PHY_INTERFACE_MODE_RGMII_ID || ++ mode == PHY_INTERFACE_MODE_RGMII_TXID) ++ delay = 1; ++ ++ if (delay > QCA8K_MAX_DELAY) { ++ dev_err(priv->dev, "rgmii tx delay is limited to a max value of 3ns, setting to the max value"); ++ delay = 3; ++ } ++ ++ priv->rgmii_tx_delay[cpu_port_index] = delay; ++ ++ delay = 0; ++ ++ if (!of_property_read_u32(port_dn, "rx-internal-delay-ps", &delay)) ++ /* Switch regs accept value in ns, convert ps to ns */ ++ delay = delay / 1000; ++ else if (mode == PHY_INTERFACE_MODE_RGMII_ID || ++ mode == PHY_INTERFACE_MODE_RGMII_RXID) ++ delay = 2; ++ ++ if (delay > QCA8K_MAX_DELAY) { ++ dev_err(priv->dev, "rgmii rx delay is limited to a max value of 3ns, setting to the max value"); ++ delay = 3; ++ } ++ ++ priv->rgmii_rx_delay[cpu_port_index] = delay; ++ ++ break; ++ case PHY_INTERFACE_MODE_SGMII: + if (of_property_read_bool(port_dn, "qca,sgmii-txclk-falling-edge")) + priv->sgmii_tx_clk_falling_edge = true; + + if (of_property_read_bool(port_dn, "qca,sgmii-rxclk-falling-edge")) + priv->sgmii_rx_clk_falling_edge = true; ++ ++ break; ++ default: ++ continue; + } + } + +@@ -1059,10 +1041,6 @@ qca8k_setup(struct dsa_switch *ds) + if (ret) + return ret; + +- ret = qca8k_setup_of_rgmii_delay(priv); +- if (ret) +- return ret; +- + ret = qca8k_setup_mac_pwr_sel(priv); + if (ret) + return ret; +@@ -1229,8 +1207,8 @@ qca8k_phylink_mac_config(struct dsa_swit + const struct phylink_link_state *state) + { + struct qca8k_priv *priv = ds->priv; +- u32 reg, val; +- int ret; ++ int cpu_port_index, ret; ++ u32 reg, val, delay; + + switch (port) { + case 0: /* 1st CPU port */ +@@ -1242,6 +1220,7 @@ qca8k_phylink_mac_config(struct dsa_swit + return; + + reg = QCA8K_REG_PORT0_PAD_CTRL; ++ cpu_port_index = QCA8K_CPU_PORT0; + break; + case 1: + case 2: +@@ -1260,6 +1239,7 @@ qca8k_phylink_mac_config(struct dsa_swit + return; + + reg = QCA8K_REG_PORT6_PAD_CTRL; ++ cpu_port_index = QCA8K_CPU_PORT6; + break; + default: + dev_err(ds->dev, "%s: unsupported port: %i\n", __func__, port); +@@ -1274,23 +1254,40 @@ qca8k_phylink_mac_config(struct dsa_swit + + switch (state->interface) { + case PHY_INTERFACE_MODE_RGMII: +- /* RGMII mode means no delay so don't enable the delay */ +- qca8k_write(priv, reg, QCA8K_PORT_PAD_RGMII_EN); +- break; + case PHY_INTERFACE_MODE_RGMII_ID: + case PHY_INTERFACE_MODE_RGMII_TXID: + case PHY_INTERFACE_MODE_RGMII_RXID: +- /* RGMII_ID needs internal delay. This is enabled through +- * PORT5_PAD_CTRL for all ports, rather than individual port +- * registers ++ val = QCA8K_PORT_PAD_RGMII_EN; ++ ++ /* Delay can be declared in 3 different way. ++ * Mode to rgmii and internal-delay standard binding defined ++ * rgmii-id or rgmii-tx/rx phy mode set. ++ * The parse logic set a delay different than 0 only when one ++ * of the 3 different way is used. In all other case delay is ++ * not enabled. With ID or TX/RXID delay is enabled and set ++ * to the default and recommended value. ++ */ ++ if (priv->rgmii_tx_delay[cpu_port_index]) { ++ delay = priv->rgmii_tx_delay[cpu_port_index]; ++ ++ val |= QCA8K_PORT_PAD_RGMII_TX_DELAY(delay) | ++ QCA8K_PORT_PAD_RGMII_TX_DELAY_EN; ++ } ++ ++ if (priv->rgmii_rx_delay[cpu_port_index]) { ++ delay = priv->rgmii_rx_delay[cpu_port_index]; ++ ++ val |= QCA8K_PORT_PAD_RGMII_RX_DELAY(delay) | ++ QCA8K_PORT_PAD_RGMII_RX_DELAY_EN; ++ } ++ ++ /* Set RGMII delay based on the selected values */ ++ qca8k_write(priv, reg, val); ++ ++ /* QCA8337 requires to set rgmii rx delay for all ports. ++ * This is enabled through PORT5_PAD_CTRL for all ports, ++ * rather than individual port registers. + */ +- qca8k_write(priv, reg, +- QCA8K_PORT_PAD_RGMII_EN | +- QCA8K_PORT_PAD_RGMII_TX_DELAY(priv->rgmii_tx_delay) | +- QCA8K_PORT_PAD_RGMII_RX_DELAY(priv->rgmii_rx_delay) | +- QCA8K_PORT_PAD_RGMII_TX_DELAY_EN | +- QCA8K_PORT_PAD_RGMII_RX_DELAY_EN); +- /* QCA8337 requires to set rgmii rx delay */ + if (priv->switch_id == QCA8K_ID_QCA8337) + qca8k_write(priv, QCA8K_REG_PORT5_PAD_CTRL, + QCA8K_PORT_PAD_RGMII_RX_DELAY_EN); +--- a/drivers/net/dsa/qca8k.h ++++ b/drivers/net/dsa/qca8k.h +@@ -13,6 +13,7 @@ + #include <linux/gpio.h> + + #define QCA8K_NUM_PORTS 7 ++#define QCA8K_NUM_CPU_PORTS 2 + #define QCA8K_MAX_MTU 9000 + + #define PHY_ID_QCA8327 0x004dd034 +@@ -255,13 +256,18 @@ struct qca8k_match_data { + u8 id; + }; + ++enum { ++ QCA8K_CPU_PORT0, ++ QCA8K_CPU_PORT6, ++}; ++ + struct qca8k_priv { + u8 switch_id; + u8 switch_revision; +- u8 rgmii_tx_delay; +- u8 rgmii_rx_delay; + bool sgmii_rx_clk_falling_edge; + bool sgmii_tx_clk_falling_edge; ++ u8 rgmii_rx_delay[QCA8K_NUM_CPU_PORTS]; /* 0: CPU port0, 1: CPU port6 */ ++ u8 rgmii_tx_delay[QCA8K_NUM_CPU_PORTS]; /* 0: CPU port0, 1: CPU port6 */ + bool legacy_phy_port_mapping; + struct regmap *regmap; + struct mii_bus *bus; diff --git a/target/linux/generic/backport-5.15/747-v5.16-07-dt-bindings-net-dsa-qca8k-Document-qca-sgmii-enable-.patch b/target/linux/generic/backport-5.15/747-v5.16-07-dt-bindings-net-dsa-qca8k-Document-qca-sgmii-enable-.patch new file mode 100644 index 0000000000..8abd264e79 --- /dev/null +++ b/target/linux/generic/backport-5.15/747-v5.16-07-dt-bindings-net-dsa-qca8k-Document-qca-sgmii-enable-.patch @@ -0,0 +1,33 @@ +From 13ad5ccc093ff448b99ac7e138e91e78796adb48 Mon Sep 17 00:00:00 2001 +From: Ansuel Smith <ansuelsmth@gmail.com> +Date: Thu, 14 Oct 2021 00:39:12 +0200 +Subject: dt-bindings: net: dsa: qca8k: Document qca,sgmii-enable-pll + +Document qca,sgmii-enable-pll binding used in the CPU nodes to +enable SGMII PLL on MAC config. + +Signed-off-by: Ansuel Smith <ansuelsmth@gmail.com> +Signed-off-by: David S. Miller <davem@davemloft.net> +--- + Documentation/devicetree/bindings/net/dsa/qca8k.txt | 10 ++++++++++ + 1 file changed, 10 insertions(+) + +--- a/Documentation/devicetree/bindings/net/dsa/qca8k.txt ++++ b/Documentation/devicetree/bindings/net/dsa/qca8k.txt +@@ -45,6 +45,16 @@ A CPU port node has the following option + Mostly used in qca8327 with CPU port 0 set to + sgmii. + - qca,sgmii-txclk-falling-edge: Set the transmit clock phase to falling edge. ++- qca,sgmii-enable-pll : For SGMII CPU port, explicitly enable PLL, TX and RX ++ chain along with Signal Detection. ++ This should NOT be enabled for qca8327. If enabled with ++ qca8327 the sgmii port won't correctly init and an err ++ is printed. ++ This can be required for qca8337 switch with revision 2. ++ A warning is displayed when used with revision greater ++ 2. ++ With CPU port set to sgmii and qca8337 it is advised ++ to set this unless a communication problem is observed. + + For QCA8K the 'fixed-link' sub-node supports only the following properties: + diff --git a/target/linux/generic/backport-5.15/747-v5.16-08-net-dsa-qca8k-add-explicit-SGMII-PLL-enable.patch b/target/linux/generic/backport-5.15/747-v5.16-08-net-dsa-qca8k-add-explicit-SGMII-PLL-enable.patch new file mode 100644 index 0000000000..2b5a84a1b0 --- /dev/null +++ b/target/linux/generic/backport-5.15/747-v5.16-08-net-dsa-qca8k-add-explicit-SGMII-PLL-enable.patch @@ -0,0 +1,65 @@ +From bbc4799e8bb6c397e3b3fec13de68e179f5db9ff Mon Sep 17 00:00:00 2001 +From: Ansuel Smith <ansuelsmth@gmail.com> +Date: Thu, 14 Oct 2021 00:39:13 +0200 +Subject: net: dsa: qca8k: add explicit SGMII PLL enable + +Support enabling PLL on the SGMII CPU port. Some device require this +special configuration or no traffic is transmitted and the switch +doesn't work at all. A dedicated binding is added to the CPU node +port to apply the correct reg on mac config. +Fail to correctly configure sgmii with qca8327 switch and warn if pll is +used on qca8337 with a revision greater than 1. + +Signed-off-by: Ansuel Smith <ansuelsmth@gmail.com> +Reviewed-by: Florian Fainelli <f.fainelli@gmail.com> +Signed-off-by: David S. Miller <davem@davemloft.net> +--- + drivers/net/dsa/qca8k.c | 19 +++++++++++++++++-- + drivers/net/dsa/qca8k.h | 1 + + 2 files changed, 18 insertions(+), 2 deletions(-) + +--- a/drivers/net/dsa/qca8k.c ++++ b/drivers/net/dsa/qca8k.c +@@ -1002,6 +1002,18 @@ qca8k_parse_port_config(struct qca8k_pri + if (of_property_read_bool(port_dn, "qca,sgmii-rxclk-falling-edge")) + priv->sgmii_rx_clk_falling_edge = true; + ++ if (of_property_read_bool(port_dn, "qca,sgmii-enable-pll")) { ++ priv->sgmii_enable_pll = true; ++ ++ if (priv->switch_id == QCA8K_ID_QCA8327) { ++ dev_err(priv->dev, "SGMII PLL should NOT be enabled for qca8327. Aborting enabling"); ++ priv->sgmii_enable_pll = false; ++ } ++ ++ if (priv->switch_revision < 2) ++ dev_warn(priv->dev, "SGMII PLL should NOT be enabled for qca8337 with revision 2 or more."); ++ } ++ + break; + default: + continue; +@@ -1312,8 +1324,11 @@ qca8k_phylink_mac_config(struct dsa_swit + if (ret) + return; + +- val |= QCA8K_SGMII_EN_PLL | QCA8K_SGMII_EN_RX | +- QCA8K_SGMII_EN_TX | QCA8K_SGMII_EN_SD; ++ val |= QCA8K_SGMII_EN_SD; ++ ++ if (priv->sgmii_enable_pll) ++ val |= QCA8K_SGMII_EN_PLL | QCA8K_SGMII_EN_RX | ++ QCA8K_SGMII_EN_TX; + + if (dsa_is_cpu_port(ds, port)) { + /* CPU port, we're talking to the CPU MAC, be a PHY */ +--- a/drivers/net/dsa/qca8k.h ++++ b/drivers/net/dsa/qca8k.h +@@ -266,6 +266,7 @@ struct qca8k_priv { + u8 switch_revision; + bool sgmii_rx_clk_falling_edge; + bool sgmii_tx_clk_falling_edge; ++ bool sgmii_enable_pll; + u8 rgmii_rx_delay[QCA8K_NUM_CPU_PORTS]; /* 0: CPU port0, 1: CPU port6 */ + u8 rgmii_tx_delay[QCA8K_NUM_CPU_PORTS]; /* 0: CPU port0, 1: CPU port6 */ + bool legacy_phy_port_mapping; diff --git a/target/linux/generic/backport-5.15/747-v5.16-09-dt-bindings-net-dsa-qca8k-Document-qca-led-open-drai.patch b/target/linux/generic/backport-5.15/747-v5.16-09-dt-bindings-net-dsa-qca8k-Document-qca-led-open-drai.patch new file mode 100644 index 0000000000..38dc954e8c --- /dev/null +++ b/target/linux/generic/backport-5.15/747-v5.16-09-dt-bindings-net-dsa-qca8k-Document-qca-led-open-drai.patch @@ -0,0 +1,37 @@ +From 924087c5c3d41553700b0eb83ca2a53b91643dca Mon Sep 17 00:00:00 2001 +From: Ansuel Smith <ansuelsmth@gmail.com> +Date: Thu, 14 Oct 2021 00:39:14 +0200 +Subject: dt-bindings: net: dsa: qca8k: Document qca,led-open-drain binding + +Document new binding qca,ignore-power-on-sel used to ignore +power on strapping and use sw regs instead. +Document qca,led-open.drain to set led to open drain mode, the +qca,ignore-power-on-sel is mandatory with this enabled or an error will +be reported. + +Signed-off-by: Ansuel Smith <ansuelsmth@gmail.com> +Signed-off-by: David S. Miller <davem@davemloft.net> +--- + Documentation/devicetree/bindings/net/dsa/qca8k.txt | 11 +++++++++++ + 1 file changed, 11 insertions(+) + +--- a/Documentation/devicetree/bindings/net/dsa/qca8k.txt ++++ b/Documentation/devicetree/bindings/net/dsa/qca8k.txt +@@ -13,6 +13,17 @@ Required properties: + Optional properties: + + - reset-gpios: GPIO to be used to reset the whole device ++- qca,ignore-power-on-sel: Ignore power on pin strapping to configure led open ++ drain or eeprom presence. This is needed for broken ++ devices that have wrong configuration or when the oem ++ decided to not use pin strapping and fallback to sw ++ regs. ++- qca,led-open-drain: Set leds to open-drain mode. This requires the ++ qca,ignore-power-on-sel to be set or the driver will fail ++ to probe. This is needed if the oem doesn't use pin ++ strapping to set this mode and prefers to set it using sw ++ regs. The pin strapping related to led open drain mode is ++ the pin B68 for QCA832x and B49 for QCA833x + + Subnodes: + diff --git a/target/linux/generic/backport-5.15/747-v5.16-10-net-dsa-qca8k-add-support-for-pws-config-reg.patch b/target/linux/generic/backport-5.15/747-v5.16-10-net-dsa-qca8k-add-support-for-pws-config-reg.patch new file mode 100644 index 0000000000..aa5d92a4fd --- /dev/null +++ b/target/linux/generic/backport-5.15/747-v5.16-10-net-dsa-qca8k-add-support-for-pws-config-reg.patch @@ -0,0 +1,92 @@ +From 362bb238d8bf1470424214a8a5968d9c6cce68fa Mon Sep 17 00:00:00 2001 +From: Ansuel Smith <ansuelsmth@gmail.com> +Date: Thu, 14 Oct 2021 00:39:15 +0200 +Subject: net: dsa: qca8k: add support for pws config reg + +Some qca8327 switch require to force the ignore of power on sel +strapping. Some switch require to set the led open drain mode in regs +instead of using strapping. While most of the device implements this +using the correct way using pin strapping, there are still some broken +device that require to be set using sw regs. +Introduce a new binding and support these special configuration. +As led open drain require to ignore pin strapping to work, the probe +fails with EINVAL error with incorrect configuration. + +Signed-off-by: Ansuel Smith <ansuelsmth@gmail.com> +Reviewed-by: Florian Fainelli <f.fainelli@gmail.com> +Signed-off-by: David S. Miller <davem@davemloft.net> +--- + drivers/net/dsa/qca8k.c | 39 +++++++++++++++++++++++++++++++++++++++ + drivers/net/dsa/qca8k.h | 6 ++++++ + 2 files changed, 45 insertions(+) + +--- a/drivers/net/dsa/qca8k.c ++++ b/drivers/net/dsa/qca8k.c +@@ -932,6 +932,41 @@ static int qca8k_find_cpu_port(struct ds + } + + static int ++qca8k_setup_of_pws_reg(struct qca8k_priv *priv) ++{ ++ struct device_node *node = priv->dev->of_node; ++ u32 val = 0; ++ int ret; ++ ++ /* QCA8327 require to set to the correct mode. ++ * His bigger brother QCA8328 have the 172 pin layout. ++ * Should be applied by default but we set this just to make sure. ++ */ ++ if (priv->switch_id == QCA8K_ID_QCA8327) { ++ ret = qca8k_rmw(priv, QCA8K_REG_PWS, QCA8327_PWS_PACKAGE148_EN, ++ QCA8327_PWS_PACKAGE148_EN); ++ if (ret) ++ return ret; ++ } ++ ++ if (of_property_read_bool(node, "qca,ignore-power-on-sel")) ++ val |= QCA8K_PWS_POWER_ON_SEL; ++ ++ if (of_property_read_bool(node, "qca,led-open-drain")) { ++ if (!(val & QCA8K_PWS_POWER_ON_SEL)) { ++ dev_err(priv->dev, "qca,led-open-drain require qca,ignore-power-on-sel to be set."); ++ return -EINVAL; ++ } ++ ++ val |= QCA8K_PWS_LED_OPEN_EN_CSR; ++ } ++ ++ return qca8k_rmw(priv, QCA8K_REG_PWS, ++ QCA8K_PWS_LED_OPEN_EN_CSR | QCA8K_PWS_POWER_ON_SEL, ++ val); ++} ++ ++static int + qca8k_parse_port_config(struct qca8k_priv *priv) + { + int port, cpu_port_index = 0, ret; +@@ -1053,6 +1088,10 @@ qca8k_setup(struct dsa_switch *ds) + if (ret) + return ret; + ++ ret = qca8k_setup_of_pws_reg(priv); ++ if (ret) ++ return ret; ++ + ret = qca8k_setup_mac_pwr_sel(priv); + if (ret) + return ret; +--- a/drivers/net/dsa/qca8k.h ++++ b/drivers/net/dsa/qca8k.h +@@ -46,6 +46,12 @@ + #define QCA8K_MAX_DELAY 3 + #define QCA8K_PORT_PAD_SGMII_EN BIT(7) + #define QCA8K_REG_PWS 0x010 ++#define QCA8K_PWS_POWER_ON_SEL BIT(31) ++/* This reg is only valid for QCA832x and toggle the package ++ * type from 176 pin (by default) to 148 pin used on QCA8327 ++ */ ++#define QCA8327_PWS_PACKAGE148_EN BIT(30) ++#define QCA8K_PWS_LED_OPEN_EN_CSR BIT(24) + #define QCA8K_PWS_SERDES_AEN_DIS BIT(7) + #define QCA8K_REG_MODULE_EN 0x030 + #define QCA8K_MODULE_EN_MIB BIT(0) diff --git a/target/linux/generic/backport-5.15/747-v5.16-11-dt-bindings-net-dsa-qca8k-document-support-for-qca83.patch b/target/linux/generic/backport-5.15/747-v5.16-11-dt-bindings-net-dsa-qca8k-document-support-for-qca83.patch new file mode 100644 index 0000000000..1bfb00c5b2 --- /dev/null +++ b/target/linux/generic/backport-5.15/747-v5.16-11-dt-bindings-net-dsa-qca8k-document-support-for-qca83.patch @@ -0,0 +1,32 @@ +From ed7988d77fbfb79366b68f9e7fa60a6080da23d4 Mon Sep 17 00:00:00 2001 +From: Ansuel Smith <ansuelsmth@gmail.com> +Date: Thu, 14 Oct 2021 00:39:16 +0200 +Subject: dt-bindings: net: dsa: qca8k: document support for qca8328 + +QCA8328 is the bigger brother of qca8327. Document the new compatible +binding and add some information to understand the various switch +compatible. + +Signed-off-by: Ansuel Smith <ansuelsmth@gmail.com> +Reviewed-by: Florian Fainelli <f.fainelli@gmail.com> +Signed-off-by: David S. Miller <davem@davemloft.net> +--- + Documentation/devicetree/bindings/net/dsa/qca8k.txt | 7 ++++--- + 1 file changed, 4 insertions(+), 3 deletions(-) + +--- a/Documentation/devicetree/bindings/net/dsa/qca8k.txt ++++ b/Documentation/devicetree/bindings/net/dsa/qca8k.txt +@@ -3,9 +3,10 @@ + Required properties: + + - compatible: should be one of: +- "qca,qca8327" +- "qca,qca8334" +- "qca,qca8337" ++ "qca,qca8328": referenced as AR8328(N)-AK1(A/B) QFN 176 pin package ++ "qca,qca8327": referenced as AR8327(N)-AL1A DR-QFN 148 pin package ++ "qca,qca8334": referenced as QCA8334-AL3C QFN 88 pin package ++ "qca,qca8337": referenced as QCA8337N-AL3(B/C) DR-QFN 148 pin package + + - #size-cells: must be 0 + - #address-cells: must be 1 diff --git a/target/linux/generic/backport-5.15/747-v5.16-12-net-dsa-qca8k-add-support-for-QCA8328.patch b/target/linux/generic/backport-5.15/747-v5.16-12-net-dsa-qca8k-add-support-for-QCA8328.patch new file mode 100644 index 0000000000..6e118f5a14 --- /dev/null +++ b/target/linux/generic/backport-5.15/747-v5.16-12-net-dsa-qca8k-add-support-for-QCA8328.patch @@ -0,0 +1,78 @@ +From f477d1c8bdbef4f400718238e350f16f521d2a3e Mon Sep 17 00:00:00 2001 +From: Ansuel Smith <ansuelsmth@gmail.com> +Date: Thu, 14 Oct 2021 00:39:17 +0200 +Subject: net: dsa: qca8k: add support for QCA8328 + +QCA8328 switch is the bigger brother of the qca8327. Same regs different +chip. Change the function to set the correct pin layout and introduce a +new match_data to differentiate the 2 switch as they have the same ID +and their internal PHY have the same ID. + +Signed-off-by: Ansuel Smith <ansuelsmth@gmail.com> +Reviewed-by: Florian Fainelli <f.fainelli@gmail.com> +Signed-off-by: David S. Miller <davem@davemloft.net> +--- + drivers/net/dsa/qca8k.c | 19 ++++++++++++++++--- + drivers/net/dsa/qca8k.h | 1 + + 2 files changed, 17 insertions(+), 3 deletions(-) + +--- a/drivers/net/dsa/qca8k.c ++++ b/drivers/net/dsa/qca8k.c +@@ -935,6 +935,7 @@ static int + qca8k_setup_of_pws_reg(struct qca8k_priv *priv) + { + struct device_node *node = priv->dev->of_node; ++ const struct qca8k_match_data *data; + u32 val = 0; + int ret; + +@@ -943,8 +944,14 @@ qca8k_setup_of_pws_reg(struct qca8k_priv + * Should be applied by default but we set this just to make sure. + */ + if (priv->switch_id == QCA8K_ID_QCA8327) { ++ data = of_device_get_match_data(priv->dev); ++ ++ /* Set the correct package of 148 pin for QCA8327 */ ++ if (data->reduced_package) ++ val |= QCA8327_PWS_PACKAGE148_EN; ++ + ret = qca8k_rmw(priv, QCA8K_REG_PWS, QCA8327_PWS_PACKAGE148_EN, +- QCA8327_PWS_PACKAGE148_EN); ++ val); + if (ret) + return ret; + } +@@ -2098,7 +2105,12 @@ static int qca8k_resume(struct device *d + static SIMPLE_DEV_PM_OPS(qca8k_pm_ops, + qca8k_suspend, qca8k_resume); + +-static const struct qca8k_match_data qca832x = { ++static const struct qca8k_match_data qca8327 = { ++ .id = QCA8K_ID_QCA8327, ++ .reduced_package = true, ++}; ++ ++static const struct qca8k_match_data qca8328 = { + .id = QCA8K_ID_QCA8327, + }; + +@@ -2107,7 +2119,8 @@ static const struct qca8k_match_data qca + }; + + static const struct of_device_id qca8k_of_match[] = { +- { .compatible = "qca,qca8327", .data = &qca832x }, ++ { .compatible = "qca,qca8327", .data = &qca8327 }, ++ { .compatible = "qca,qca8328", .data = &qca8328 }, + { .compatible = "qca,qca8334", .data = &qca833x }, + { .compatible = "qca,qca8337", .data = &qca833x }, + { /* sentinel */ }, +--- a/drivers/net/dsa/qca8k.h ++++ b/drivers/net/dsa/qca8k.h +@@ -260,6 +260,7 @@ struct ar8xxx_port_status { + + struct qca8k_match_data { + u8 id; ++ bool reduced_package; + }; + + enum { diff --git a/target/linux/generic/backport-5.15/747-v5.16-13-net-dsa-qca8k-set-internal-delay-also-for-sgmii.patch b/target/linux/generic/backport-5.15/747-v5.16-13-net-dsa-qca8k-set-internal-delay-also-for-sgmii.patch new file mode 100644 index 0000000000..27f94dca02 --- /dev/null +++ b/target/linux/generic/backport-5.15/747-v5.16-13-net-dsa-qca8k-set-internal-delay-also-for-sgmii.patch @@ -0,0 +1,159 @@ +From cef08115846e581f80ff99abf7bf218da1840616 Mon Sep 17 00:00:00 2001 +From: Ansuel Smith <ansuelsmth@gmail.com> +Date: Thu, 14 Oct 2021 00:39:18 +0200 +Subject: net: dsa: qca8k: set internal delay also for sgmii + +QCA original code report port instability and sa that SGMII also require +to set internal delay. Generalize the rgmii delay function and apply the +advised value if they are not defined in DT. + +Signed-off-by: Ansuel Smith <ansuelsmth@gmail.com> +Signed-off-by: David S. Miller <davem@davemloft.net> +--- + drivers/net/dsa/qca8k.c | 88 +++++++++++++++++++++++++++++++++---------------- + drivers/net/dsa/qca8k.h | 2 ++ + 2 files changed, 62 insertions(+), 28 deletions(-) + +--- a/drivers/net/dsa/qca8k.c ++++ b/drivers/net/dsa/qca8k.c +@@ -1004,6 +1004,7 @@ qca8k_parse_port_config(struct qca8k_pri + case PHY_INTERFACE_MODE_RGMII_ID: + case PHY_INTERFACE_MODE_RGMII_TXID: + case PHY_INTERFACE_MODE_RGMII_RXID: ++ case PHY_INTERFACE_MODE_SGMII: + delay = 0; + + if (!of_property_read_u32(port_dn, "tx-internal-delay-ps", &delay)) +@@ -1036,8 +1037,13 @@ qca8k_parse_port_config(struct qca8k_pri + + priv->rgmii_rx_delay[cpu_port_index] = delay; + +- break; +- case PHY_INTERFACE_MODE_SGMII: ++ /* Skip sgmii parsing for rgmii* mode */ ++ if (mode == PHY_INTERFACE_MODE_RGMII || ++ mode == PHY_INTERFACE_MODE_RGMII_ID || ++ mode == PHY_INTERFACE_MODE_RGMII_TXID || ++ mode == PHY_INTERFACE_MODE_RGMII_RXID) ++ break; ++ + if (of_property_read_bool(port_dn, "qca,sgmii-txclk-falling-edge")) + priv->sgmii_tx_clk_falling_edge = true; + +@@ -1261,12 +1267,53 @@ qca8k_setup(struct dsa_switch *ds) + } + + static void ++qca8k_mac_config_setup_internal_delay(struct qca8k_priv *priv, int cpu_port_index, ++ u32 reg) ++{ ++ u32 delay, val = 0; ++ int ret; ++ ++ /* Delay can be declared in 3 different way. ++ * Mode to rgmii and internal-delay standard binding defined ++ * rgmii-id or rgmii-tx/rx phy mode set. ++ * The parse logic set a delay different than 0 only when one ++ * of the 3 different way is used. In all other case delay is ++ * not enabled. With ID or TX/RXID delay is enabled and set ++ * to the default and recommended value. ++ */ ++ if (priv->rgmii_tx_delay[cpu_port_index]) { ++ delay = priv->rgmii_tx_delay[cpu_port_index]; ++ ++ val |= QCA8K_PORT_PAD_RGMII_TX_DELAY(delay) | ++ QCA8K_PORT_PAD_RGMII_TX_DELAY_EN; ++ } ++ ++ if (priv->rgmii_rx_delay[cpu_port_index]) { ++ delay = priv->rgmii_rx_delay[cpu_port_index]; ++ ++ val |= QCA8K_PORT_PAD_RGMII_RX_DELAY(delay) | ++ QCA8K_PORT_PAD_RGMII_RX_DELAY_EN; ++ } ++ ++ /* Set RGMII delay based on the selected values */ ++ ret = qca8k_rmw(priv, reg, ++ QCA8K_PORT_PAD_RGMII_TX_DELAY_MASK | ++ QCA8K_PORT_PAD_RGMII_RX_DELAY_MASK | ++ QCA8K_PORT_PAD_RGMII_TX_DELAY_EN | ++ QCA8K_PORT_PAD_RGMII_RX_DELAY_EN, ++ val); ++ if (ret) ++ dev_err(priv->dev, "Failed to set internal delay for CPU port%d", ++ cpu_port_index == QCA8K_CPU_PORT0 ? 0 : 6); ++} ++ ++static void + qca8k_phylink_mac_config(struct dsa_switch *ds, int port, unsigned int mode, + const struct phylink_link_state *state) + { + struct qca8k_priv *priv = ds->priv; + int cpu_port_index, ret; +- u32 reg, val, delay; ++ u32 reg, val; + + switch (port) { + case 0: /* 1st CPU port */ +@@ -1315,32 +1362,10 @@ qca8k_phylink_mac_config(struct dsa_swit + case PHY_INTERFACE_MODE_RGMII_ID: + case PHY_INTERFACE_MODE_RGMII_TXID: + case PHY_INTERFACE_MODE_RGMII_RXID: +- val = QCA8K_PORT_PAD_RGMII_EN; +- +- /* Delay can be declared in 3 different way. +- * Mode to rgmii and internal-delay standard binding defined +- * rgmii-id or rgmii-tx/rx phy mode set. +- * The parse logic set a delay different than 0 only when one +- * of the 3 different way is used. In all other case delay is +- * not enabled. With ID or TX/RXID delay is enabled and set +- * to the default and recommended value. +- */ +- if (priv->rgmii_tx_delay[cpu_port_index]) { +- delay = priv->rgmii_tx_delay[cpu_port_index]; +- +- val |= QCA8K_PORT_PAD_RGMII_TX_DELAY(delay) | +- QCA8K_PORT_PAD_RGMII_TX_DELAY_EN; +- } +- +- if (priv->rgmii_rx_delay[cpu_port_index]) { +- delay = priv->rgmii_rx_delay[cpu_port_index]; +- +- val |= QCA8K_PORT_PAD_RGMII_RX_DELAY(delay) | +- QCA8K_PORT_PAD_RGMII_RX_DELAY_EN; +- } ++ qca8k_write(priv, reg, QCA8K_PORT_PAD_RGMII_EN); + +- /* Set RGMII delay based on the selected values */ +- qca8k_write(priv, reg, val); ++ /* Configure rgmii delay */ ++ qca8k_mac_config_setup_internal_delay(priv, cpu_port_index, reg); + + /* QCA8337 requires to set rgmii rx delay for all ports. + * This is enabled through PORT5_PAD_CTRL for all ports, +@@ -1411,6 +1436,13 @@ qca8k_phylink_mac_config(struct dsa_swit + QCA8K_PORT0_PAD_SGMII_RXCLK_FALLING_EDGE | + QCA8K_PORT0_PAD_SGMII_TXCLK_FALLING_EDGE, + val); ++ ++ /* From original code is reported port instability as SGMII also ++ * require delay set. Apply advised values here or take them from DT. ++ */ ++ if (state->interface == PHY_INTERFACE_MODE_SGMII) ++ qca8k_mac_config_setup_internal_delay(priv, cpu_port_index, reg); ++ + break; + default: + dev_err(ds->dev, "xMII mode %s not supported for port %d\n", +--- a/drivers/net/dsa/qca8k.h ++++ b/drivers/net/dsa/qca8k.h +@@ -39,7 +39,9 @@ + #define QCA8K_REG_PORT5_PAD_CTRL 0x008 + #define QCA8K_REG_PORT6_PAD_CTRL 0x00c + #define QCA8K_PORT_PAD_RGMII_EN BIT(26) ++#define QCA8K_PORT_PAD_RGMII_TX_DELAY_MASK GENMASK(23, 22) + #define QCA8K_PORT_PAD_RGMII_TX_DELAY(x) ((x) << 22) ++#define QCA8K_PORT_PAD_RGMII_RX_DELAY_MASK GENMASK(21, 20) + #define QCA8K_PORT_PAD_RGMII_RX_DELAY(x) ((x) << 20) + #define QCA8K_PORT_PAD_RGMII_TX_DELAY_EN BIT(25) + #define QCA8K_PORT_PAD_RGMII_RX_DELAY_EN BIT(24) diff --git a/target/linux/generic/backport-5.15/747-v5.16-14-net-dsa-qca8k-move-port-config-to-dedicated-struct.patch b/target/linux/generic/backport-5.15/747-v5.16-14-net-dsa-qca8k-move-port-config-to-dedicated-struct.patch new file mode 100644 index 0000000000..b991798c87 --- /dev/null +++ b/target/linux/generic/backport-5.15/747-v5.16-14-net-dsa-qca8k-move-port-config-to-dedicated-struct.patch @@ -0,0 +1,124 @@ +From fd0bb28c547f7c8affb1691128cece38f5b626a1 Mon Sep 17 00:00:00 2001 +From: Ansuel Smith <ansuelsmth@gmail.com> +Date: Thu, 14 Oct 2021 00:39:19 +0200 +Subject: net: dsa: qca8k: move port config to dedicated struct + +Move ports related config to dedicated struct to keep things organized. + +Signed-off-by: Ansuel Smith <ansuelsmth@gmail.com> +Reviewed-by: Florian Fainelli <f.fainelli@gmail.com> +Signed-off-by: David S. Miller <davem@davemloft.net> +--- + drivers/net/dsa/qca8k.c | 26 +++++++++++++------------- + drivers/net/dsa/qca8k.h | 10 +++++++--- + 2 files changed, 20 insertions(+), 16 deletions(-) + +--- a/drivers/net/dsa/qca8k.c ++++ b/drivers/net/dsa/qca8k.c +@@ -1019,7 +1019,7 @@ qca8k_parse_port_config(struct qca8k_pri + delay = 3; + } + +- priv->rgmii_tx_delay[cpu_port_index] = delay; ++ priv->ports_config.rgmii_tx_delay[cpu_port_index] = delay; + + delay = 0; + +@@ -1035,7 +1035,7 @@ qca8k_parse_port_config(struct qca8k_pri + delay = 3; + } + +- priv->rgmii_rx_delay[cpu_port_index] = delay; ++ priv->ports_config.rgmii_rx_delay[cpu_port_index] = delay; + + /* Skip sgmii parsing for rgmii* mode */ + if (mode == PHY_INTERFACE_MODE_RGMII || +@@ -1045,17 +1045,17 @@ qca8k_parse_port_config(struct qca8k_pri + break; + + if (of_property_read_bool(port_dn, "qca,sgmii-txclk-falling-edge")) +- priv->sgmii_tx_clk_falling_edge = true; ++ priv->ports_config.sgmii_tx_clk_falling_edge = true; + + if (of_property_read_bool(port_dn, "qca,sgmii-rxclk-falling-edge")) +- priv->sgmii_rx_clk_falling_edge = true; ++ priv->ports_config.sgmii_rx_clk_falling_edge = true; + + if (of_property_read_bool(port_dn, "qca,sgmii-enable-pll")) { +- priv->sgmii_enable_pll = true; ++ priv->ports_config.sgmii_enable_pll = true; + + if (priv->switch_id == QCA8K_ID_QCA8327) { + dev_err(priv->dev, "SGMII PLL should NOT be enabled for qca8327. Aborting enabling"); +- priv->sgmii_enable_pll = false; ++ priv->ports_config.sgmii_enable_pll = false; + } + + if (priv->switch_revision < 2) +@@ -1281,15 +1281,15 @@ qca8k_mac_config_setup_internal_delay(st + * not enabled. With ID or TX/RXID delay is enabled and set + * to the default and recommended value. + */ +- if (priv->rgmii_tx_delay[cpu_port_index]) { +- delay = priv->rgmii_tx_delay[cpu_port_index]; ++ if (priv->ports_config.rgmii_tx_delay[cpu_port_index]) { ++ delay = priv->ports_config.rgmii_tx_delay[cpu_port_index]; + + val |= QCA8K_PORT_PAD_RGMII_TX_DELAY(delay) | + QCA8K_PORT_PAD_RGMII_TX_DELAY_EN; + } + +- if (priv->rgmii_rx_delay[cpu_port_index]) { +- delay = priv->rgmii_rx_delay[cpu_port_index]; ++ if (priv->ports_config.rgmii_rx_delay[cpu_port_index]) { ++ delay = priv->ports_config.rgmii_rx_delay[cpu_port_index]; + + val |= QCA8K_PORT_PAD_RGMII_RX_DELAY(delay) | + QCA8K_PORT_PAD_RGMII_RX_DELAY_EN; +@@ -1397,7 +1397,7 @@ qca8k_phylink_mac_config(struct dsa_swit + + val |= QCA8K_SGMII_EN_SD; + +- if (priv->sgmii_enable_pll) ++ if (priv->ports_config.sgmii_enable_pll) + val |= QCA8K_SGMII_EN_PLL | QCA8K_SGMII_EN_RX | + QCA8K_SGMII_EN_TX; + +@@ -1425,10 +1425,10 @@ qca8k_phylink_mac_config(struct dsa_swit + val = 0; + + /* SGMII Clock phase configuration */ +- if (priv->sgmii_rx_clk_falling_edge) ++ if (priv->ports_config.sgmii_rx_clk_falling_edge) + val |= QCA8K_PORT0_PAD_SGMII_RXCLK_FALLING_EDGE; + +- if (priv->sgmii_tx_clk_falling_edge) ++ if (priv->ports_config.sgmii_tx_clk_falling_edge) + val |= QCA8K_PORT0_PAD_SGMII_TXCLK_FALLING_EDGE; + + if (val) +--- a/drivers/net/dsa/qca8k.h ++++ b/drivers/net/dsa/qca8k.h +@@ -270,15 +270,19 @@ enum { + QCA8K_CPU_PORT6, + }; + +-struct qca8k_priv { +- u8 switch_id; +- u8 switch_revision; ++struct qca8k_ports_config { + bool sgmii_rx_clk_falling_edge; + bool sgmii_tx_clk_falling_edge; + bool sgmii_enable_pll; + u8 rgmii_rx_delay[QCA8K_NUM_CPU_PORTS]; /* 0: CPU port0, 1: CPU port6 */ + u8 rgmii_tx_delay[QCA8K_NUM_CPU_PORTS]; /* 0: CPU port0, 1: CPU port6 */ ++}; ++ ++struct qca8k_priv { ++ u8 switch_id; ++ u8 switch_revision; + bool legacy_phy_port_mapping; ++ struct qca8k_ports_config ports_config; + struct regmap *regmap; + struct mii_bus *bus; + struct ar8xxx_port_status port_sts[QCA8K_NUM_PORTS]; diff --git a/target/linux/generic/backport-5.15/747-v5.16-15-dt-bindings-net-ipq8064-mdio-fix-warning-with-new-qc.patch b/target/linux/generic/backport-5.15/747-v5.16-15-dt-bindings-net-ipq8064-mdio-fix-warning-with-new-qc.patch new file mode 100644 index 0000000000..f7cb514176 --- /dev/null +++ b/target/linux/generic/backport-5.15/747-v5.16-15-dt-bindings-net-ipq8064-mdio-fix-warning-with-new-qc.patch @@ -0,0 +1,26 @@ +From e52073a8e3086046a098b8a7cbeb282ff0cdb424 Mon Sep 17 00:00:00 2001 +From: Ansuel Smith <ansuelsmth@gmail.com> +Date: Thu, 14 Oct 2021 00:39:20 +0200 +Subject: dt-bindings: net: ipq8064-mdio: fix warning with new qca8k switch + +Fix warning now that we have qca8k switch Documentation using yaml. + +Signed-off-by: Ansuel Smith <ansuelsmth@gmail.com> +Signed-off-by: David S. Miller <davem@davemloft.net> +--- + Documentation/devicetree/bindings/net/qcom,ipq8064-mdio.yaml | 5 ++++- + 1 file changed, 4 insertions(+), 1 deletion(-) + +--- a/Documentation/devicetree/bindings/net/qcom,ipq8064-mdio.yaml ++++ b/Documentation/devicetree/bindings/net/qcom,ipq8064-mdio.yaml +@@ -51,6 +51,9 @@ examples: + switch@10 { + compatible = "qca,qca8337"; + reg = <0x10>; +- /* ... */ ++ ++ ports { ++ /* ... */ ++ }; + }; + }; diff --git a/target/linux/generic/backport-5.15/747-v5.16-16-dt-bindings-net-dsa-qca8k-convert-to-YAML-schema.patch b/target/linux/generic/backport-5.15/747-v5.16-16-dt-bindings-net-dsa-qca8k-convert-to-YAML-schema.patch new file mode 100644 index 0000000000..b9bce97dd3 --- /dev/null +++ b/target/linux/generic/backport-5.15/747-v5.16-16-dt-bindings-net-dsa-qca8k-convert-to-YAML-schema.patch @@ -0,0 +1,631 @@ +From d291fbb8245d5ba04979fed85575860a5cea7196 Mon Sep 17 00:00:00 2001 +From: Matthew Hagan <mnhagan88@gmail.com> +Date: Thu, 14 Oct 2021 00:39:21 +0200 +Subject: dt-bindings: net: dsa: qca8k: convert to YAML schema + +Convert the qca8k bindings to YAML format. + +Signed-off-by: Matthew Hagan <mnhagan88@gmail.com> +Co-developed-by: Ansuel Smith <ansuelsmth@gmail.com> +Signed-off-by: Ansuel Smith <ansuelsmth@gmail.com> +Signed-off-by: David S. Miller <davem@davemloft.net> +--- + .../devicetree/bindings/net/dsa/qca8k.txt | 245 -------------- + .../devicetree/bindings/net/dsa/qca8k.yaml | 362 +++++++++++++++++++++ + 2 files changed, 362 insertions(+), 245 deletions(-) + delete mode 100644 Documentation/devicetree/bindings/net/dsa/qca8k.txt + create mode 100644 Documentation/devicetree/bindings/net/dsa/qca8k.yaml + +--- a/Documentation/devicetree/bindings/net/dsa/qca8k.txt ++++ /dev/null +@@ -1,245 +0,0 @@ +-* Qualcomm Atheros QCA8xxx switch family +- +-Required properties: +- +-- compatible: should be one of: +- "qca,qca8328": referenced as AR8328(N)-AK1(A/B) QFN 176 pin package +- "qca,qca8327": referenced as AR8327(N)-AL1A DR-QFN 148 pin package +- "qca,qca8334": referenced as QCA8334-AL3C QFN 88 pin package +- "qca,qca8337": referenced as QCA8337N-AL3(B/C) DR-QFN 148 pin package +- +-- #size-cells: must be 0 +-- #address-cells: must be 1 +- +-Optional properties: +- +-- reset-gpios: GPIO to be used to reset the whole device +-- qca,ignore-power-on-sel: Ignore power on pin strapping to configure led open +- drain or eeprom presence. This is needed for broken +- devices that have wrong configuration or when the oem +- decided to not use pin strapping and fallback to sw +- regs. +-- qca,led-open-drain: Set leds to open-drain mode. This requires the +- qca,ignore-power-on-sel to be set or the driver will fail +- to probe. This is needed if the oem doesn't use pin +- strapping to set this mode and prefers to set it using sw +- regs. The pin strapping related to led open drain mode is +- the pin B68 for QCA832x and B49 for QCA833x +- +-Subnodes: +- +-The integrated switch subnode should be specified according to the binding +-described in dsa/dsa.txt. If the QCA8K switch is connect to a SoC's external +-mdio-bus each subnode describing a port needs to have a valid phandle +-referencing the internal PHY it is connected to. This is because there's no +-N:N mapping of port and PHY id. +-To declare the internal mdio-bus configuration, declare a mdio node in the +-switch node and declare the phandle for the port referencing the internal +-PHY is connected to. In this config a internal mdio-bus is registered and +-the mdio MASTER is used as communication. +- +-Don't use mixed external and internal mdio-bus configurations, as this is +-not supported by the hardware. +- +-This switch support 2 CPU port. Normally and advised configuration is with +-CPU port set to port 0. It is also possible to set the CPU port to port 6 +-if the device requires it. The driver will configure the switch to the defined +-port. With both CPU port declared the first CPU port is selected as primary +-and the secondary CPU ignored. +- +-A CPU port node has the following optional node: +- +-- fixed-link : Fixed-link subnode describing a link to a non-MDIO +- managed entity. See +- Documentation/devicetree/bindings/net/fixed-link.txt +- for details. +-- qca,sgmii-rxclk-falling-edge: Set the receive clock phase to falling edge. +- Mostly used in qca8327 with CPU port 0 set to +- sgmii. +-- qca,sgmii-txclk-falling-edge: Set the transmit clock phase to falling edge. +-- qca,sgmii-enable-pll : For SGMII CPU port, explicitly enable PLL, TX and RX +- chain along with Signal Detection. +- This should NOT be enabled for qca8327. If enabled with +- qca8327 the sgmii port won't correctly init and an err +- is printed. +- This can be required for qca8337 switch with revision 2. +- A warning is displayed when used with revision greater +- 2. +- With CPU port set to sgmii and qca8337 it is advised +- to set this unless a communication problem is observed. +- +-For QCA8K the 'fixed-link' sub-node supports only the following properties: +- +-- 'speed' (integer, mandatory), to indicate the link speed. Accepted +- values are 10, 100 and 1000 +-- 'full-duplex' (boolean, optional), to indicate that full duplex is +- used. When absent, half duplex is assumed. +- +-Examples: +- +-for the external mdio-bus configuration: +- +- &mdio0 { +- phy_port1: phy@0 { +- reg = <0>; +- }; +- +- phy_port2: phy@1 { +- reg = <1>; +- }; +- +- phy_port3: phy@2 { +- reg = <2>; +- }; +- +- phy_port4: phy@3 { +- reg = <3>; +- }; +- +- phy_port5: phy@4 { +- reg = <4>; +- }; +- +- switch@10 { +- compatible = "qca,qca8337"; +- #address-cells = <1>; +- #size-cells = <0>; +- +- reset-gpios = <&gpio 42 GPIO_ACTIVE_LOW>; +- reg = <0x10>; +- +- ports { +- #address-cells = <1>; +- #size-cells = <0>; +- port@0 { +- reg = <0>; +- label = "cpu"; +- ethernet = <&gmac1>; +- phy-mode = "rgmii"; +- fixed-link { +- speed = 1000; +- full-duplex; +- }; +- }; +- +- port@1 { +- reg = <1>; +- label = "lan1"; +- phy-handle = <&phy_port1>; +- }; +- +- port@2 { +- reg = <2>; +- label = "lan2"; +- phy-handle = <&phy_port2>; +- }; +- +- port@3 { +- reg = <3>; +- label = "lan3"; +- phy-handle = <&phy_port3>; +- }; +- +- port@4 { +- reg = <4>; +- label = "lan4"; +- phy-handle = <&phy_port4>; +- }; +- +- port@5 { +- reg = <5>; +- label = "wan"; +- phy-handle = <&phy_port5>; +- }; +- }; +- }; +- }; +- +-for the internal master mdio-bus configuration: +- +- &mdio0 { +- switch@10 { +- compatible = "qca,qca8337"; +- #address-cells = <1>; +- #size-cells = <0>; +- +- reset-gpios = <&gpio 42 GPIO_ACTIVE_LOW>; +- reg = <0x10>; +- +- ports { +- #address-cells = <1>; +- #size-cells = <0>; +- +- port@0 { +- reg = <0>; +- label = "cpu"; +- ethernet = <&gmac1>; +- phy-mode = "rgmii"; +- fixed-link { +- speed = 1000; +- full-duplex; +- }; +- }; +- +- port@1 { +- reg = <1>; +- label = "lan1"; +- phy-mode = "internal"; +- phy-handle = <&phy_port1>; +- }; +- +- port@2 { +- reg = <2>; +- label = "lan2"; +- phy-mode = "internal"; +- phy-handle = <&phy_port2>; +- }; +- +- port@3 { +- reg = <3>; +- label = "lan3"; +- phy-mode = "internal"; +- phy-handle = <&phy_port3>; +- }; +- +- port@4 { +- reg = <4>; +- label = "lan4"; +- phy-mode = "internal"; +- phy-handle = <&phy_port4>; +- }; +- +- port@5 { +- reg = <5>; +- label = "wan"; +- phy-mode = "internal"; +- phy-handle = <&phy_port5>; +- }; +- }; +- +- mdio { +- #address-cells = <1>; +- #size-cells = <0>; +- +- phy_port1: phy@0 { +- reg = <0>; +- }; +- +- phy_port2: phy@1 { +- reg = <1>; +- }; +- +- phy_port3: phy@2 { +- reg = <2>; +- }; +- +- phy_port4: phy@3 { +- reg = <3>; +- }; +- +- phy_port5: phy@4 { +- reg = <4>; +- }; +- }; +- }; +- }; +--- /dev/null ++++ b/Documentation/devicetree/bindings/net/dsa/qca8k.yaml +@@ -0,0 +1,362 @@ ++# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) ++%YAML 1.2 ++--- ++$id: http://devicetree.org/schemas/net/dsa/qca8k.yaml# ++$schema: http://devicetree.org/meta-schemas/core.yaml# ++ ++title: Qualcomm Atheros QCA83xx switch family ++ ++maintainers: ++ - John Crispin <john@phrozen.org> ++ ++description: ++ If the QCA8K switch is connect to an SoC's external mdio-bus, each subnode ++ describing a port needs to have a valid phandle referencing the internal PHY ++ it is connected to. This is because there is no N:N mapping of port and PHY ++ ID. To declare the internal mdio-bus configuration, declare an MDIO node in ++ the switch node and declare the phandle for the port, referencing the internal ++ PHY it is connected to. In this config, an internal mdio-bus is registered and ++ the MDIO master is used for communication. Mixed external and internal ++ mdio-bus configurations are not supported by the hardware. ++ ++properties: ++ compatible: ++ oneOf: ++ - enum: ++ - qca,qca8327 ++ - qca,qca8328 ++ - qca,qca8334 ++ - qca,qca8337 ++ description: | ++ qca,qca8328: referenced as AR8328(N)-AK1(A/B) QFN 176 pin package ++ qca,qca8327: referenced as AR8327(N)-AL1A DR-QFN 148 pin package ++ qca,qca8334: referenced as QCA8334-AL3C QFN 88 pin package ++ qca,qca8337: referenced as QCA8337N-AL3(B/C) DR-QFN 148 pin package ++ ++ reg: ++ maxItems: 1 ++ ++ reset-gpios: ++ description: ++ GPIO to be used to reset the whole device ++ maxItems: 1 ++ ++ qca,ignore-power-on-sel: ++ $ref: /schemas/types.yaml#/definitions/flag ++ description: ++ Ignore power-on pin strapping to configure LED open-drain or EEPROM ++ presence. This is needed for devices with incorrect configuration or when ++ the OEM has decided not to use pin strapping and falls back to SW regs. ++ ++ qca,led-open-drain: ++ $ref: /schemas/types.yaml#/definitions/flag ++ description: ++ Set LEDs to open-drain mode. This requires the qca,ignore-power-on-sel to ++ be set, otherwise the driver will fail at probe. This is required if the ++ OEM does not use pin strapping to set this mode and prefers to set it ++ using SW regs. The pin strappings related to LED open-drain mode are ++ B68 on the QCA832x and B49 on the QCA833x. ++ ++ mdio: ++ type: object ++ description: Qca8k switch have an internal mdio to access switch port. ++ If this is not present, the legacy mapping is used and the ++ internal mdio access is used. ++ With the legacy mapping the reg corresponding to the internal ++ mdio is the switch reg with an offset of -1. ++ ++ properties: ++ '#address-cells': ++ const: 1 ++ '#size-cells': ++ const: 0 ++ ++ patternProperties: ++ "^(ethernet-)?phy@[0-4]$": ++ type: object ++ ++ allOf: ++ - $ref: "http://devicetree.org/schemas/net/mdio.yaml#" ++ ++ properties: ++ reg: ++ maxItems: 1 ++ ++ required: ++ - reg ++ ++patternProperties: ++ "^(ethernet-)?ports$": ++ type: object ++ properties: ++ '#address-cells': ++ const: 1 ++ '#size-cells': ++ const: 0 ++ ++ patternProperties: ++ "^(ethernet-)?port@[0-6]$": ++ type: object ++ description: Ethernet switch ports ++ ++ properties: ++ reg: ++ description: Port number ++ ++ label: ++ description: ++ Describes the label associated with this port, which will become ++ the netdev name ++ $ref: /schemas/types.yaml#/definitions/string ++ ++ link: ++ description: ++ Should be a list of phandles to other switch's DSA port. This ++ port is used as the outgoing port towards the phandle ports. The ++ full routing information must be given, not just the one hop ++ routes to neighbouring switches ++ $ref: /schemas/types.yaml#/definitions/phandle-array ++ ++ ethernet: ++ description: ++ Should be a phandle to a valid Ethernet device node. This host ++ device is what the switch port is connected to ++ $ref: /schemas/types.yaml#/definitions/phandle ++ ++ phy-handle: true ++ ++ phy-mode: true ++ ++ fixed-link: true ++ ++ mac-address: true ++ ++ sfp: true ++ ++ qca,sgmii-rxclk-falling-edge: ++ $ref: /schemas/types.yaml#/definitions/flag ++ description: ++ Set the receive clock phase to falling edge. Mostly commonly used on ++ the QCA8327 with CPU port 0 set to SGMII. ++ ++ qca,sgmii-txclk-falling-edge: ++ $ref: /schemas/types.yaml#/definitions/flag ++ description: ++ Set the transmit clock phase to falling edge. ++ ++ qca,sgmii-enable-pll: ++ $ref: /schemas/types.yaml#/definitions/flag ++ description: ++ For SGMII CPU port, explicitly enable PLL, TX and RX chain along with ++ Signal Detection. On the QCA8327 this should not be enabled, otherwise ++ the SGMII port will not initialize. When used on the QCA8337, revision 3 ++ or greater, a warning will be displayed. When the CPU port is set to ++ SGMII on the QCA8337, it is advised to set this unless a communication ++ issue is observed. ++ ++ required: ++ - reg ++ ++ additionalProperties: false ++ ++oneOf: ++ - required: ++ - ports ++ - required: ++ - ethernet-ports ++ ++required: ++ - compatible ++ - reg ++ ++additionalProperties: true ++ ++examples: ++ - | ++ #include <dt-bindings/gpio/gpio.h> ++ ++ mdio { ++ #address-cells = <1>; ++ #size-cells = <0>; ++ ++ external_phy_port1: ethernet-phy@0 { ++ reg = <0>; ++ }; ++ ++ external_phy_port2: ethernet-phy@1 { ++ reg = <1>; ++ }; ++ ++ external_phy_port3: ethernet-phy@2 { ++ reg = <2>; ++ }; ++ ++ external_phy_port4: ethernet-phy@3 { ++ reg = <3>; ++ }; ++ ++ external_phy_port5: ethernet-phy@4 { ++ reg = <4>; ++ }; ++ ++ switch@10 { ++ compatible = "qca,qca8337"; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ reset-gpios = <&gpio 42 GPIO_ACTIVE_LOW>; ++ reg = <0x10>; ++ ++ ports { ++ #address-cells = <1>; ++ #size-cells = <0>; ++ ++ port@0 { ++ reg = <0>; ++ label = "cpu"; ++ ethernet = <&gmac1>; ++ phy-mode = "rgmii"; ++ ++ fixed-link { ++ speed = <1000>; ++ full-duplex; ++ }; ++ }; ++ ++ port@1 { ++ reg = <1>; ++ label = "lan1"; ++ phy-handle = <&external_phy_port1>; ++ }; ++ ++ port@2 { ++ reg = <2>; ++ label = "lan2"; ++ phy-handle = <&external_phy_port2>; ++ }; ++ ++ port@3 { ++ reg = <3>; ++ label = "lan3"; ++ phy-handle = <&external_phy_port3>; ++ }; ++ ++ port@4 { ++ reg = <4>; ++ label = "lan4"; ++ phy-handle = <&external_phy_port4>; ++ }; ++ ++ port@5 { ++ reg = <5>; ++ label = "wan"; ++ phy-handle = <&external_phy_port5>; ++ }; ++ }; ++ }; ++ }; ++ - | ++ #include <dt-bindings/gpio/gpio.h> ++ ++ mdio { ++ #address-cells = <1>; ++ #size-cells = <0>; ++ ++ switch@10 { ++ compatible = "qca,qca8337"; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ reset-gpios = <&gpio 42 GPIO_ACTIVE_LOW>; ++ reg = <0x10>; ++ ++ ports { ++ #address-cells = <1>; ++ #size-cells = <0>; ++ ++ port@0 { ++ reg = <0>; ++ label = "cpu"; ++ ethernet = <&gmac1>; ++ phy-mode = "rgmii"; ++ ++ fixed-link { ++ speed = <1000>; ++ full-duplex; ++ }; ++ }; ++ ++ port@1 { ++ reg = <1>; ++ label = "lan1"; ++ phy-mode = "internal"; ++ phy-handle = <&internal_phy_port1>; ++ }; ++ ++ port@2 { ++ reg = <2>; ++ label = "lan2"; ++ phy-mode = "internal"; ++ phy-handle = <&internal_phy_port2>; ++ }; ++ ++ port@3 { ++ reg = <3>; ++ label = "lan3"; ++ phy-mode = "internal"; ++ phy-handle = <&internal_phy_port3>; ++ }; ++ ++ port@4 { ++ reg = <4>; ++ label = "lan4"; ++ phy-mode = "internal"; ++ phy-handle = <&internal_phy_port4>; ++ }; ++ ++ port@5 { ++ reg = <5>; ++ label = "wan"; ++ phy-mode = "internal"; ++ phy-handle = <&internal_phy_port5>; ++ }; ++ ++ port@6 { ++ reg = <0>; ++ label = "cpu"; ++ ethernet = <&gmac1>; ++ phy-mode = "sgmii"; ++ ++ qca,sgmii-rxclk-falling-edge; ++ ++ fixed-link { ++ speed = <1000>; ++ full-duplex; ++ }; ++ }; ++ }; ++ ++ mdio { ++ #address-cells = <1>; ++ #size-cells = <0>; ++ ++ internal_phy_port1: ethernet-phy@0 { ++ reg = <0>; ++ }; ++ ++ internal_phy_port2: ethernet-phy@1 { ++ reg = <1>; ++ }; ++ ++ internal_phy_port3: ethernet-phy@2 { ++ reg = <2>; ++ }; ++ ++ internal_phy_port4: ethernet-phy@3 { ++ reg = <3>; ++ }; ++ ++ internal_phy_port5: ethernet-phy@4 { ++ reg = <4>; ++ }; ++ }; ++ }; ++ }; diff --git a/target/linux/generic/backport-5.15/748-v5.16-net-dsa-qca8k-fix-delay-applied-to-wrong-cpu-in-parse-p.patch b/target/linux/generic/backport-5.15/748-v5.16-net-dsa-qca8k-fix-delay-applied-to-wrong-cpu-in-parse-p.patch new file mode 100644 index 0000000000..a510cfdc18 --- /dev/null +++ b/target/linux/generic/backport-5.15/748-v5.16-net-dsa-qca8k-fix-delay-applied-to-wrong-cpu-in-parse-p.patch @@ -0,0 +1,28 @@ +From 06dd34a628ae5b6a839b757e746de165d6789ca8 Mon Sep 17 00:00:00 2001 +From: Ansuel Smith <ansuelsmth@gmail.com> +Date: Sun, 17 Oct 2021 16:56:46 +0200 +Subject: net: dsa: qca8k: fix delay applied to wrong cpu in parse_port_config + +Fix delay settings applied to wrong cpu in parse_port_config. The delay +values is set to the wrong index as the cpu_port_index is incremented +too early. Start the cpu_port_index to -1 so the correct value is +applied to address also the case with invalid phy mode and not available +port. + +Signed-off-by: Ansuel Smith <ansuelsmth@gmail.com> +Signed-off-by: David S. Miller <davem@davemloft.net> +--- + drivers/net/dsa/qca8k.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +--- a/drivers/net/dsa/qca8k.c ++++ b/drivers/net/dsa/qca8k.c +@@ -976,7 +976,7 @@ qca8k_setup_of_pws_reg(struct qca8k_priv + static int + qca8k_parse_port_config(struct qca8k_priv *priv) + { +- int port, cpu_port_index = 0, ret; ++ int port, cpu_port_index = -1, ret; + struct device_node *port_dn; + phy_interface_t mode; + struct dsa_port *dp; diff --git a/target/linux/generic/backport-5.15/749-v5.16-net-dsa-qca8k-tidy-for-loop-in-setup-and-add-cpu-port-c.patch b/target/linux/generic/backport-5.15/749-v5.16-net-dsa-qca8k-tidy-for-loop-in-setup-and-add-cpu-port-c.patch new file mode 100644 index 0000000000..71fa3022d5 --- /dev/null +++ b/target/linux/generic/backport-5.15/749-v5.16-net-dsa-qca8k-tidy-for-loop-in-setup-and-add-cpu-port-c.patch @@ -0,0 +1,151 @@ +From 040e926f5813a5f4cc18dbff7c942d1e52f368f2 Mon Sep 17 00:00:00 2001 +From: Ansuel Smith <ansuelsmth@gmail.com> +Date: Tue, 19 Oct 2021 02:08:50 +0200 +Subject: net: dsa: qca8k: tidy for loop in setup and add cpu port check + +Tidy and organize qca8k setup function from multiple for loop. +Change for loop in bridge leave/join to scan all port and skip cpu port. +No functional change intended. + +Signed-off-by: Ansuel Smith <ansuelsmth@gmail.com> +Signed-off-by: David S. Miller <davem@davemloft.net> +--- + drivers/net/dsa/qca8k.c | 74 +++++++++++++++++++++++++++++-------------------- + 1 file changed, 44 insertions(+), 30 deletions(-) + +--- a/drivers/net/dsa/qca8k.c ++++ b/drivers/net/dsa/qca8k.c +@@ -1122,28 +1122,34 @@ qca8k_setup(struct dsa_switch *ds) + if (ret) + dev_warn(priv->dev, "mib init failed"); + +- /* Enable QCA header mode on the cpu port */ +- ret = qca8k_write(priv, QCA8K_REG_PORT_HDR_CTRL(cpu_port), +- QCA8K_PORT_HDR_CTRL_ALL << QCA8K_PORT_HDR_CTRL_TX_S | +- QCA8K_PORT_HDR_CTRL_ALL << QCA8K_PORT_HDR_CTRL_RX_S); +- if (ret) { +- dev_err(priv->dev, "failed enabling QCA header mode"); +- return ret; +- } +- +- /* Disable forwarding by default on all ports */ ++ /* Initial setup of all ports */ + for (i = 0; i < QCA8K_NUM_PORTS; i++) { ++ /* Disable forwarding by default on all ports */ + ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(i), + QCA8K_PORT_LOOKUP_MEMBER, 0); + if (ret) + return ret; +- } + +- /* Disable MAC by default on all ports */ +- for (i = 1; i < QCA8K_NUM_PORTS; i++) +- qca8k_port_set_status(priv, i, 0); ++ /* Enable QCA header mode on all cpu ports */ ++ if (dsa_is_cpu_port(ds, i)) { ++ ret = qca8k_write(priv, QCA8K_REG_PORT_HDR_CTRL(i), ++ QCA8K_PORT_HDR_CTRL_ALL << QCA8K_PORT_HDR_CTRL_TX_S | ++ QCA8K_PORT_HDR_CTRL_ALL << QCA8K_PORT_HDR_CTRL_RX_S); ++ if (ret) { ++ dev_err(priv->dev, "failed enabling QCA header mode"); ++ return ret; ++ } ++ } ++ ++ /* Disable MAC by default on all user ports */ ++ if (dsa_is_user_port(ds, i)) ++ qca8k_port_set_status(priv, i, 0); ++ } + +- /* Forward all unknown frames to CPU port for Linux processing */ ++ /* Forward all unknown frames to CPU port for Linux processing ++ * Notice that in multi-cpu config only one port should be set ++ * for igmp, unknown, multicast and broadcast packet ++ */ + ret = qca8k_write(priv, QCA8K_REG_GLOBAL_FW_CTRL1, + BIT(cpu_port) << QCA8K_GLOBAL_FW_CTRL1_IGMP_DP_S | + BIT(cpu_port) << QCA8K_GLOBAL_FW_CTRL1_BC_DP_S | +@@ -1152,11 +1158,13 @@ qca8k_setup(struct dsa_switch *ds) + if (ret) + return ret; + +- /* Setup connection between CPU port & user ports */ ++ /* Setup connection between CPU port & user ports ++ * Configure specific switch configuration for ports ++ */ + for (i = 0; i < QCA8K_NUM_PORTS; i++) { + /* CPU port gets connected to all user ports of the switch */ + if (dsa_is_cpu_port(ds, i)) { +- ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(cpu_port), ++ ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(i), + QCA8K_PORT_LOOKUP_MEMBER, dsa_user_ports(ds)); + if (ret) + return ret; +@@ -1193,16 +1201,14 @@ qca8k_setup(struct dsa_switch *ds) + if (ret) + return ret; + } +- } + +- /* The port 5 of the qca8337 have some problem in flood condition. The +- * original legacy driver had some specific buffer and priority settings +- * for the different port suggested by the QCA switch team. Add this +- * missing settings to improve switch stability under load condition. +- * This problem is limited to qca8337 and other qca8k switch are not affected. +- */ +- if (priv->switch_id == QCA8K_ID_QCA8337) { +- for (i = 0; i < QCA8K_NUM_PORTS; i++) { ++ /* The port 5 of the qca8337 have some problem in flood condition. The ++ * original legacy driver had some specific buffer and priority settings ++ * for the different port suggested by the QCA switch team. Add this ++ * missing settings to improve switch stability under load condition. ++ * This problem is limited to qca8337 and other qca8k switch are not affected. ++ */ ++ if (priv->switch_id == QCA8K_ID_QCA8337) { + switch (i) { + /* The 2 CPU port and port 5 requires some different + * priority than any other ports. +@@ -1238,6 +1244,12 @@ qca8k_setup(struct dsa_switch *ds) + QCA8K_PORT_HOL_CTRL1_WRED_EN, + mask); + } ++ ++ /* Set initial MTU for every port. ++ * We have only have a general MTU setting. So track ++ * every port and set the max across all port. ++ */ ++ priv->port_mtu[i] = ETH_FRAME_LEN + ETH_FCS_LEN; + } + + /* Special GLOBAL_FC_THRESH value are needed for ar8327 switch */ +@@ -1251,8 +1263,6 @@ qca8k_setup(struct dsa_switch *ds) + } + + /* Setup our port MTUs to match power on defaults */ +- for (i = 0; i < QCA8K_NUM_PORTS; i++) +- priv->port_mtu[i] = ETH_FRAME_LEN + ETH_FCS_LEN; + ret = qca8k_write(priv, QCA8K_MAX_FRAME_SIZE, ETH_FRAME_LEN + ETH_FCS_LEN); + if (ret) + dev_warn(priv->dev, "failed setting MTU settings"); +@@ -1728,7 +1738,9 @@ qca8k_port_bridge_join(struct dsa_switch + cpu_port = dsa_to_port(ds, port)->cpu_dp->index; + port_mask = BIT(cpu_port); + +- for (i = 1; i < QCA8K_NUM_PORTS; i++) { ++ for (i = 0; i < QCA8K_NUM_PORTS; i++) { ++ if (dsa_is_cpu_port(ds, i)) ++ continue; + if (dsa_to_port(ds, i)->bridge_dev != br) + continue; + /* Add this port to the portvlan mask of the other ports +@@ -1758,7 +1770,9 @@ qca8k_port_bridge_leave(struct dsa_switc + + cpu_port = dsa_to_port(ds, port)->cpu_dp->index; + +- for (i = 1; i < QCA8K_NUM_PORTS; i++) { ++ for (i = 0; i < QCA8K_NUM_PORTS; i++) { ++ if (dsa_is_cpu_port(ds, i)) ++ continue; + if (dsa_to_port(ds, i)->bridge_dev != br) + continue; + /* Remove this port to the portvlan mask of the other ports diff --git a/target/linux/generic/backport-5.15/750-v5.16-net-dsa-qca8k-make-sure-pad0-mac06-exchange-is-disabled.patch b/target/linux/generic/backport-5.15/750-v5.16-net-dsa-qca8k-make-sure-pad0-mac06-exchange-is-disabled.patch new file mode 100644 index 0000000000..4a61703c52 --- /dev/null +++ b/target/linux/generic/backport-5.15/750-v5.16-net-dsa-qca8k-make-sure-pad0-mac06-exchange-is-disabled.patch @@ -0,0 +1,47 @@ +From 5f15d392dcb4aa250a63d6f2c5adfc26c0aedc78 Mon Sep 17 00:00:00 2001 +From: Ansuel Smith <ansuelsmth@gmail.com> +Date: Tue, 2 Nov 2021 19:30:41 +0100 +Subject: net: dsa: qca8k: make sure PAD0 MAC06 exchange is disabled + +Some device set MAC06 exchange in the bootloader. This cause some +problem as we don't support this strange mode and we just set the port6 +as the primary CPU port. With MAC06 exchange, PAD0 reg configure port6 +instead of port0. Add an extra check and explicitly disable MAC06 exchange +to correctly configure the port PAD config. + +Signed-off-by: Ansuel Smith <ansuelsmth@gmail.com> +Fixes: 3fcf734aa482 ("net: dsa: qca8k: add support for cpu port 6") +Reviewed-by: Vladimir Oltean <olteanv@gmail.com> +Signed-off-by: David S. Miller <davem@davemloft.net> +--- + drivers/net/dsa/qca8k.c | 8 ++++++++ + drivers/net/dsa/qca8k.h | 1 + + 2 files changed, 9 insertions(+) + +--- a/drivers/net/dsa/qca8k.c ++++ b/drivers/net/dsa/qca8k.c +@@ -1109,6 +1109,14 @@ qca8k_setup(struct dsa_switch *ds) + if (ret) + return ret; + ++ /* Make sure MAC06 is disabled */ ++ ret = qca8k_reg_clear(priv, QCA8K_REG_PORT0_PAD_CTRL, ++ QCA8K_PORT0_PAD_MAC06_EXCHANGE_EN); ++ if (ret) { ++ dev_err(priv->dev, "failed disabling MAC06 exchange"); ++ return ret; ++ } ++ + /* Enable CPU Port */ + ret = qca8k_reg_set(priv, QCA8K_REG_GLOBAL_FW_CTRL0, + QCA8K_GLOBAL_FW_CTRL0_CPU_PORT_EN); +--- a/drivers/net/dsa/qca8k.h ++++ b/drivers/net/dsa/qca8k.h +@@ -34,6 +34,7 @@ + #define QCA8K_MASK_CTRL_DEVICE_ID_MASK GENMASK(15, 8) + #define QCA8K_MASK_CTRL_DEVICE_ID(x) ((x) >> 8) + #define QCA8K_REG_PORT0_PAD_CTRL 0x004 ++#define QCA8K_PORT0_PAD_MAC06_EXCHANGE_EN BIT(31) + #define QCA8K_PORT0_PAD_SGMII_RXCLK_FALLING_EDGE BIT(19) + #define QCA8K_PORT0_PAD_SGMII_TXCLK_FALLING_EDGE BIT(18) + #define QCA8K_REG_PORT5_PAD_CTRL 0x008 diff --git a/target/linux/generic/backport-5.15/751-v5.16-net-dsa-qca8k-fix-internal-delay-applied-to-the-wrong-PAD.patch b/target/linux/generic/backport-5.15/751-v5.16-net-dsa-qca8k-fix-internal-delay-applied-to-the-wrong-PAD.patch new file mode 100644 index 0000000000..df9518d86c --- /dev/null +++ b/target/linux/generic/backport-5.15/751-v5.16-net-dsa-qca8k-fix-internal-delay-applied-to-the-wrong-PAD.patch @@ -0,0 +1,48 @@ +From 3b00a07c2443745d62babfe08dbb2ad8e649526e Mon Sep 17 00:00:00 2001 +From: Ansuel Smith <ansuelsmth@gmail.com> +Date: Fri, 19 Nov 2021 03:03:49 +0100 +Subject: [PATCH] net: dsa: qca8k: fix internal delay applied to the wrong PAD + config + +With SGMII phy the internal delay is always applied to the PAD0 config. +This is caused by the falling edge configuration that hardcode the reg +to PAD0 (as the falling edge bits are present only in PAD0 reg) +Move the delay configuration before the reg overwrite to correctly apply +the delay. + +Fixes: cef08115846e ("net: dsa: qca8k: set internal delay also for sgmii") +Signed-off-by: Ansuel Smith <ansuelsmth@gmail.com> +Reviewed-by: Vladimir Oltean <olteanv@gmail.com> +Signed-off-by: David S. Miller <davem@davemloft.net> +--- + drivers/net/dsa/qca8k.c | 12 ++++++------ + 1 file changed, 6 insertions(+), 6 deletions(-) + +--- a/drivers/net/dsa/qca8k.c ++++ b/drivers/net/dsa/qca8k.c +@@ -1433,6 +1433,12 @@ qca8k_phylink_mac_config(struct dsa_swit + + qca8k_write(priv, QCA8K_REG_SGMII_CTRL, val); + ++ /* From original code is reported port instability as SGMII also ++ * require delay set. Apply advised values here or take them from DT. ++ */ ++ if (state->interface == PHY_INTERFACE_MODE_SGMII) ++ qca8k_mac_config_setup_internal_delay(priv, cpu_port_index, reg); ++ + /* For qca8327/qca8328/qca8334/qca8338 sgmii is unique and + * falling edge is set writing in the PORT0 PAD reg + */ +@@ -1455,12 +1461,6 @@ qca8k_phylink_mac_config(struct dsa_swit + QCA8K_PORT0_PAD_SGMII_TXCLK_FALLING_EDGE, + val); + +- /* From original code is reported port instability as SGMII also +- * require delay set. Apply advised values here or take them from DT. +- */ +- if (state->interface == PHY_INTERFACE_MODE_SGMII) +- qca8k_mac_config_setup_internal_delay(priv, cpu_port_index, reg); +- + break; + default: + dev_err(ds->dev, "xMII mode %s not supported for port %d\n", diff --git a/target/linux/generic/backport-5.15/752-v5.16-net-dsa-qca8k-fix-MTU-calculation.patch b/target/linux/generic/backport-5.15/752-v5.16-net-dsa-qca8k-fix-MTU-calculation.patch new file mode 100644 index 0000000000..7348d93ec4 --- /dev/null +++ b/target/linux/generic/backport-5.15/752-v5.16-net-dsa-qca8k-fix-MTU-calculation.patch @@ -0,0 +1,46 @@ +From 65258b9d8cde45689bdc86ca39b50f01f983733b Mon Sep 17 00:00:00 2001 +From: Robert Marko <robert.marko@sartura.hr> +Date: Fri, 19 Nov 2021 03:03:50 +0100 +Subject: [PATCH] net: dsa: qca8k: fix MTU calculation + +qca8k has a global MTU, so its tracking the MTU per port to make sure +that the largest MTU gets applied. +Since it uses the frame size instead of MTU the driver MTU change function +will then add the size of Ethernet header and checksum on top of MTU. + +The driver currently populates the per port MTU size as Ethernet frame +length + checksum which equals 1518. + +The issue is that then MTU change function will go through all of the +ports, find the largest MTU and apply the Ethernet header + checksum on +top of it again, so for a desired MTU of 1500 you will end up with 1536. + +This is obviously incorrect, so to correct it populate the per port struct +MTU with just the MTU and not include the Ethernet header + checksum size +as those will be added by the MTU change function. + +Fixes: f58d2598cf70 ("net: dsa: qca8k: implement the port MTU callbacks") +Signed-off-by: Robert Marko <robert.marko@sartura.hr> +Signed-off-by: Ansuel Smith <ansuelsmth@gmail.com> +Reviewed-by: Vladimir Oltean <olteanv@gmail.com> +Signed-off-by: David S. Miller <davem@davemloft.net> +--- + drivers/net/dsa/qca8k.c | 6 +++++- + 1 file changed, 5 insertions(+), 1 deletion(-) + +--- a/drivers/net/dsa/qca8k.c ++++ b/drivers/net/dsa/qca8k.c +@@ -1256,8 +1256,12 @@ qca8k_setup(struct dsa_switch *ds) + /* Set initial MTU for every port. + * We have only have a general MTU setting. So track + * every port and set the max across all port. ++ * Set per port MTU to 1500 as the MTU change function ++ * will add the overhead and if its set to 1518 then it ++ * will apply the overhead again and we will end up with ++ * MTU of 1536 instead of 1518 + */ +- priv->port_mtu[i] = ETH_FRAME_LEN + ETH_FCS_LEN; ++ priv->port_mtu[i] = ETH_DATA_LEN; + } + + /* Special GLOBAL_FC_THRESH value are needed for ar8327 switch */ diff --git a/target/linux/generic/backport-5.15/753-net-next-net-dsa-qca8k-remove-redundant-check-in-parse_port_config.patch b/target/linux/generic/backport-5.15/753-net-next-net-dsa-qca8k-remove-redundant-check-in-parse_port_config.patch new file mode 100644 index 0000000000..f477b1b929 --- /dev/null +++ b/target/linux/generic/backport-5.15/753-net-next-net-dsa-qca8k-remove-redundant-check-in-parse_port_config.patch @@ -0,0 +1,29 @@ +From b9133f3ef5a2659730cf47a74bd0a9259f1cf8ff Mon Sep 17 00:00:00 2001 +From: Ansuel Smith <ansuelsmth@gmail.com> +Date: Mon, 22 Nov 2021 16:23:40 +0100 +Subject: net: dsa: qca8k: remove redundant check in parse_port_config + +The very next check for port 0 and 6 already makes sure we don't go out +of bounds with the ports_config delay table. +Remove the redundant check. + +Reported-by: kernel test robot <lkp@intel.com> +Reported-by: Dan Carpenter <dan.carpenter@oracle.com> +Signed-off-by: Ansuel Smith <ansuelsmth@gmail.com> +Reviewed-by: Vladimir Oltean <olteanv@gmail.com> +Signed-off-by: David S. Miller <davem@davemloft.net> +--- + drivers/net/dsa/qca8k.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +--- a/drivers/net/dsa/qca8k.c ++++ b/drivers/net/dsa/qca8k.c +@@ -983,7 +983,7 @@ qca8k_parse_port_config(struct qca8k_pri + u32 delay; + + /* We have 2 CPU port. Check them */ +- for (port = 0; port < QCA8K_NUM_PORTS && cpu_port_index < QCA8K_NUM_CPU_PORTS; port++) { ++ for (port = 0; port < QCA8K_NUM_PORTS; port++) { + /* Skip every other port */ + if (port != 0 && port != 6) + continue; diff --git a/target/linux/generic/backport-5.15/754-net-next-net-dsa-qca8k-convert-to-GENMASK_FIELD_PREP_FIELD_GET.patch b/target/linux/generic/backport-5.15/754-net-next-net-dsa-qca8k-convert-to-GENMASK_FIELD_PREP_FIELD_GET.patch new file mode 100644 index 0000000000..c1489fd9a8 --- /dev/null +++ b/target/linux/generic/backport-5.15/754-net-next-net-dsa-qca8k-convert-to-GENMASK_FIELD_PREP_FIELD_GET.patch @@ -0,0 +1,508 @@ +From 90ae68bfc2ffcb54a4ba4f64edbeb84a80cbb57c Mon Sep 17 00:00:00 2001 +From: Ansuel Smith <ansuelsmth@gmail.com> +Date: Mon, 22 Nov 2021 16:23:41 +0100 +Subject: net: dsa: qca8k: convert to GENMASK/FIELD_PREP/FIELD_GET + +Convert and try to standardize bit fields using +GENMASK/FIELD_PREP/FIELD_GET macros. Rework some logic to support the +standard macro and tidy things up. No functional change intended. + +Signed-off-by: Ansuel Smith <ansuelsmth@gmail.com> +Signed-off-by: David S. Miller <davem@davemloft.net> +--- + drivers/net/dsa/qca8k.c | 98 +++++++++++++++---------------- + drivers/net/dsa/qca8k.h | 153 ++++++++++++++++++++++++++---------------------- + 2 files changed, 130 insertions(+), 121 deletions(-) + +--- a/drivers/net/dsa/qca8k.c ++++ b/drivers/net/dsa/qca8k.c +@@ -9,6 +9,7 @@ + #include <linux/module.h> + #include <linux/phy.h> + #include <linux/netdevice.h> ++#include <linux/bitfield.h> + #include <net/dsa.h> + #include <linux/of_net.h> + #include <linux/of_mdio.h> +@@ -319,18 +320,18 @@ qca8k_fdb_read(struct qca8k_priv *priv, + } + + /* vid - 83:72 */ +- fdb->vid = (reg[2] >> QCA8K_ATU_VID_S) & QCA8K_ATU_VID_M; ++ fdb->vid = FIELD_GET(QCA8K_ATU_VID_MASK, reg[2]); + /* aging - 67:64 */ +- fdb->aging = reg[2] & QCA8K_ATU_STATUS_M; ++ fdb->aging = FIELD_GET(QCA8K_ATU_STATUS_MASK, reg[2]); + /* portmask - 54:48 */ +- fdb->port_mask = (reg[1] >> QCA8K_ATU_PORT_S) & QCA8K_ATU_PORT_M; ++ fdb->port_mask = FIELD_GET(QCA8K_ATU_PORT_MASK, reg[1]); + /* mac - 47:0 */ +- fdb->mac[0] = (reg[1] >> QCA8K_ATU_ADDR0_S) & 0xff; +- fdb->mac[1] = reg[1] & 0xff; +- fdb->mac[2] = (reg[0] >> QCA8K_ATU_ADDR2_S) & 0xff; +- fdb->mac[3] = (reg[0] >> QCA8K_ATU_ADDR3_S) & 0xff; +- fdb->mac[4] = (reg[0] >> QCA8K_ATU_ADDR4_S) & 0xff; +- fdb->mac[5] = reg[0] & 0xff; ++ fdb->mac[0] = FIELD_GET(QCA8K_ATU_ADDR0_MASK, reg[1]); ++ fdb->mac[1] = FIELD_GET(QCA8K_ATU_ADDR1_MASK, reg[1]); ++ fdb->mac[2] = FIELD_GET(QCA8K_ATU_ADDR2_MASK, reg[0]); ++ fdb->mac[3] = FIELD_GET(QCA8K_ATU_ADDR3_MASK, reg[0]); ++ fdb->mac[4] = FIELD_GET(QCA8K_ATU_ADDR4_MASK, reg[0]); ++ fdb->mac[5] = FIELD_GET(QCA8K_ATU_ADDR5_MASK, reg[0]); + + return 0; + } +@@ -343,18 +344,18 @@ qca8k_fdb_write(struct qca8k_priv *priv, + int i; + + /* vid - 83:72 */ +- reg[2] = (vid & QCA8K_ATU_VID_M) << QCA8K_ATU_VID_S; ++ reg[2] = FIELD_PREP(QCA8K_ATU_VID_MASK, vid); + /* aging - 67:64 */ +- reg[2] |= aging & QCA8K_ATU_STATUS_M; ++ reg[2] |= FIELD_PREP(QCA8K_ATU_STATUS_MASK, aging); + /* portmask - 54:48 */ +- reg[1] = (port_mask & QCA8K_ATU_PORT_M) << QCA8K_ATU_PORT_S; ++ reg[1] = FIELD_PREP(QCA8K_ATU_PORT_MASK, port_mask); + /* mac - 47:0 */ +- reg[1] |= mac[0] << QCA8K_ATU_ADDR0_S; +- reg[1] |= mac[1]; +- reg[0] |= mac[2] << QCA8K_ATU_ADDR2_S; +- reg[0] |= mac[3] << QCA8K_ATU_ADDR3_S; +- reg[0] |= mac[4] << QCA8K_ATU_ADDR4_S; +- reg[0] |= mac[5]; ++ reg[1] |= FIELD_PREP(QCA8K_ATU_ADDR0_MASK, mac[0]); ++ reg[1] |= FIELD_PREP(QCA8K_ATU_ADDR1_MASK, mac[1]); ++ reg[0] |= FIELD_PREP(QCA8K_ATU_ADDR2_MASK, mac[2]); ++ reg[0] |= FIELD_PREP(QCA8K_ATU_ADDR3_MASK, mac[3]); ++ reg[0] |= FIELD_PREP(QCA8K_ATU_ADDR4_MASK, mac[4]); ++ reg[0] |= FIELD_PREP(QCA8K_ATU_ADDR5_MASK, mac[5]); + + /* load the array into the ARL table */ + for (i = 0; i < 3; i++) +@@ -372,7 +373,7 @@ qca8k_fdb_access(struct qca8k_priv *priv + reg |= cmd; + if (port >= 0) { + reg |= QCA8K_ATU_FUNC_PORT_EN; +- reg |= (port & QCA8K_ATU_FUNC_PORT_M) << QCA8K_ATU_FUNC_PORT_S; ++ reg |= FIELD_PREP(QCA8K_ATU_FUNC_PORT_MASK, port); + } + + /* Write the function register triggering the table access */ +@@ -454,7 +455,7 @@ qca8k_vlan_access(struct qca8k_priv *pri + /* Set the command and VLAN index */ + reg = QCA8K_VTU_FUNC1_BUSY; + reg |= cmd; +- reg |= vid << QCA8K_VTU_FUNC1_VID_S; ++ reg |= FIELD_PREP(QCA8K_VTU_FUNC1_VID_MASK, vid); + + /* Write the function register triggering the table access */ + ret = qca8k_write(priv, QCA8K_REG_VTU_FUNC1, reg); +@@ -500,13 +501,11 @@ qca8k_vlan_add(struct qca8k_priv *priv, + if (ret < 0) + goto out; + reg |= QCA8K_VTU_FUNC0_VALID | QCA8K_VTU_FUNC0_IVL_EN; +- reg &= ~(QCA8K_VTU_FUNC0_EG_MODE_MASK << QCA8K_VTU_FUNC0_EG_MODE_S(port)); ++ reg &= ~QCA8K_VTU_FUNC0_EG_MODE_PORT_MASK(port); + if (untagged) +- reg |= QCA8K_VTU_FUNC0_EG_MODE_UNTAG << +- QCA8K_VTU_FUNC0_EG_MODE_S(port); ++ reg |= QCA8K_VTU_FUNC0_EG_MODE_PORT_UNTAG(port); + else +- reg |= QCA8K_VTU_FUNC0_EG_MODE_TAG << +- QCA8K_VTU_FUNC0_EG_MODE_S(port); ++ reg |= QCA8K_VTU_FUNC0_EG_MODE_PORT_TAG(port); + + ret = qca8k_write(priv, QCA8K_REG_VTU_FUNC0, reg); + if (ret) +@@ -534,15 +533,13 @@ qca8k_vlan_del(struct qca8k_priv *priv, + ret = qca8k_read(priv, QCA8K_REG_VTU_FUNC0, ®); + if (ret < 0) + goto out; +- reg &= ~(3 << QCA8K_VTU_FUNC0_EG_MODE_S(port)); +- reg |= QCA8K_VTU_FUNC0_EG_MODE_NOT << +- QCA8K_VTU_FUNC0_EG_MODE_S(port); ++ reg &= ~QCA8K_VTU_FUNC0_EG_MODE_PORT_MASK(port); ++ reg |= QCA8K_VTU_FUNC0_EG_MODE_PORT_NOT(port); + + /* Check if we're the last member to be removed */ + del = true; + for (i = 0; i < QCA8K_NUM_PORTS; i++) { +- mask = QCA8K_VTU_FUNC0_EG_MODE_NOT; +- mask <<= QCA8K_VTU_FUNC0_EG_MODE_S(i); ++ mask = QCA8K_VTU_FUNC0_EG_MODE_PORT_NOT(i); + + if ((reg & mask) != mask) { + del = false; +@@ -1014,7 +1011,7 @@ qca8k_parse_port_config(struct qca8k_pri + mode == PHY_INTERFACE_MODE_RGMII_TXID) + delay = 1; + +- if (delay > QCA8K_MAX_DELAY) { ++ if (!FIELD_FIT(QCA8K_PORT_PAD_RGMII_TX_DELAY_MASK, delay)) { + dev_err(priv->dev, "rgmii tx delay is limited to a max value of 3ns, setting to the max value"); + delay = 3; + } +@@ -1030,7 +1027,7 @@ qca8k_parse_port_config(struct qca8k_pri + mode == PHY_INTERFACE_MODE_RGMII_RXID) + delay = 2; + +- if (delay > QCA8K_MAX_DELAY) { ++ if (!FIELD_FIT(QCA8K_PORT_PAD_RGMII_RX_DELAY_MASK, delay)) { + dev_err(priv->dev, "rgmii rx delay is limited to a max value of 3ns, setting to the max value"); + delay = 3; + } +@@ -1141,8 +1138,8 @@ qca8k_setup(struct dsa_switch *ds) + /* Enable QCA header mode on all cpu ports */ + if (dsa_is_cpu_port(ds, i)) { + ret = qca8k_write(priv, QCA8K_REG_PORT_HDR_CTRL(i), +- QCA8K_PORT_HDR_CTRL_ALL << QCA8K_PORT_HDR_CTRL_TX_S | +- QCA8K_PORT_HDR_CTRL_ALL << QCA8K_PORT_HDR_CTRL_RX_S); ++ FIELD_PREP(QCA8K_PORT_HDR_CTRL_TX_MASK, QCA8K_PORT_HDR_CTRL_ALL) | ++ FIELD_PREP(QCA8K_PORT_HDR_CTRL_RX_MASK, QCA8K_PORT_HDR_CTRL_ALL)); + if (ret) { + dev_err(priv->dev, "failed enabling QCA header mode"); + return ret; +@@ -1159,10 +1156,10 @@ qca8k_setup(struct dsa_switch *ds) + * for igmp, unknown, multicast and broadcast packet + */ + ret = qca8k_write(priv, QCA8K_REG_GLOBAL_FW_CTRL1, +- BIT(cpu_port) << QCA8K_GLOBAL_FW_CTRL1_IGMP_DP_S | +- BIT(cpu_port) << QCA8K_GLOBAL_FW_CTRL1_BC_DP_S | +- BIT(cpu_port) << QCA8K_GLOBAL_FW_CTRL1_MC_DP_S | +- BIT(cpu_port) << QCA8K_GLOBAL_FW_CTRL1_UC_DP_S); ++ FIELD_PREP(QCA8K_GLOBAL_FW_CTRL1_IGMP_DP_MASK, BIT(cpu_port)) | ++ FIELD_PREP(QCA8K_GLOBAL_FW_CTRL1_BC_DP_MASK, BIT(cpu_port)) | ++ FIELD_PREP(QCA8K_GLOBAL_FW_CTRL1_MC_DP_MASK, BIT(cpu_port)) | ++ FIELD_PREP(QCA8K_GLOBAL_FW_CTRL1_UC_DP_MASK, BIT(cpu_port))); + if (ret) + return ret; + +@@ -1180,8 +1177,6 @@ qca8k_setup(struct dsa_switch *ds) + + /* Individual user ports get connected to CPU port only */ + if (dsa_is_user_port(ds, i)) { +- int shift = 16 * (i % 2); +- + ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(i), + QCA8K_PORT_LOOKUP_MEMBER, + BIT(cpu_port)); +@@ -1198,8 +1193,8 @@ qca8k_setup(struct dsa_switch *ds) + * default egress vid + */ + ret = qca8k_rmw(priv, QCA8K_EGRESS_VLAN(i), +- 0xfff << shift, +- QCA8K_PORT_VID_DEF << shift); ++ QCA8K_EGREES_VLAN_PORT_MASK(i), ++ QCA8K_EGREES_VLAN_PORT(i, QCA8K_PORT_VID_DEF)); + if (ret) + return ret; + +@@ -1246,7 +1241,7 @@ qca8k_setup(struct dsa_switch *ds) + QCA8K_PORT_HOL_CTRL1_EG_PORT_BUF_EN | + QCA8K_PORT_HOL_CTRL1_WRED_EN; + qca8k_rmw(priv, QCA8K_REG_PORT_HOL_CTRL1(i), +- QCA8K_PORT_HOL_CTRL1_ING_BUF | ++ QCA8K_PORT_HOL_CTRL1_ING_BUF_MASK | + QCA8K_PORT_HOL_CTRL1_EG_PRI_BUF_EN | + QCA8K_PORT_HOL_CTRL1_EG_PORT_BUF_EN | + QCA8K_PORT_HOL_CTRL1_WRED_EN, +@@ -1269,8 +1264,8 @@ qca8k_setup(struct dsa_switch *ds) + mask = QCA8K_GLOBAL_FC_GOL_XON_THRES(288) | + QCA8K_GLOBAL_FC_GOL_XOFF_THRES(496); + qca8k_rmw(priv, QCA8K_REG_GLOBAL_FC_THRESH, +- QCA8K_GLOBAL_FC_GOL_XON_THRES_S | +- QCA8K_GLOBAL_FC_GOL_XOFF_THRES_S, ++ QCA8K_GLOBAL_FC_GOL_XON_THRES_MASK | ++ QCA8K_GLOBAL_FC_GOL_XOFF_THRES_MASK, + mask); + } + +@@ -1918,11 +1913,11 @@ qca8k_port_vlan_filtering(struct dsa_swi + + if (vlan_filtering) { + qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port), +- QCA8K_PORT_LOOKUP_VLAN_MODE, ++ QCA8K_PORT_LOOKUP_VLAN_MODE_MASK, + QCA8K_PORT_LOOKUP_VLAN_MODE_SECURE); + } else { + qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port), +- QCA8K_PORT_LOOKUP_VLAN_MODE, ++ QCA8K_PORT_LOOKUP_VLAN_MODE_MASK, + QCA8K_PORT_LOOKUP_VLAN_MODE_NONE); + } + +@@ -1953,11 +1948,9 @@ qca8k_port_vlan_add(struct dsa_switch *d + dev_err(priv->dev, "Failed to add VLAN to port %d (%d)", port, ret); + + if (pvid) { +- int shift = 16 * (port % 2); +- + qca8k_rmw(priv, QCA8K_EGRESS_VLAN(port), +- 0xfff << shift, +- vlan->vid_end << shift); ++ QCA8K_EGREES_VLAN_PORT_MASK(port), ++ QCA8K_EGREES_VLAN_PORT(port, vlan->vid_end)); + qca8k_write(priv, QCA8K_REG_PORT_VLAN_CTRL0(port), + QCA8K_PORT_VLAN_CVID(vlan->vid_end) | + QCA8K_PORT_VLAN_SVID(vlan->vid_end)); +@@ -2050,7 +2043,7 @@ static int qca8k_read_switch_id(struct q + if (ret < 0) + return -ENODEV; + +- id = QCA8K_MASK_CTRL_DEVICE_ID(val & QCA8K_MASK_CTRL_DEVICE_ID_MASK); ++ id = QCA8K_MASK_CTRL_DEVICE_ID(val); + if (id != data->id) { + dev_err(priv->dev, "Switch id detected %x but expected %x", id, data->id); + return -ENODEV; +@@ -2059,7 +2052,7 @@ static int qca8k_read_switch_id(struct q + priv->switch_id = id; + + /* Save revision to communicate to the internal PHY driver */ +- priv->switch_revision = (val & QCA8K_MASK_CTRL_REV_ID_MASK); ++ priv->switch_revision = QCA8K_MASK_CTRL_REV_ID(val); + + return 0; + } +--- a/drivers/net/dsa/qca8k.h ++++ b/drivers/net/dsa/qca8k.h +@@ -30,9 +30,9 @@ + /* Global control registers */ + #define QCA8K_REG_MASK_CTRL 0x000 + #define QCA8K_MASK_CTRL_REV_ID_MASK GENMASK(7, 0) +-#define QCA8K_MASK_CTRL_REV_ID(x) ((x) >> 0) ++#define QCA8K_MASK_CTRL_REV_ID(x) FIELD_GET(QCA8K_MASK_CTRL_REV_ID_MASK, x) + #define QCA8K_MASK_CTRL_DEVICE_ID_MASK GENMASK(15, 8) +-#define QCA8K_MASK_CTRL_DEVICE_ID(x) ((x) >> 8) ++#define QCA8K_MASK_CTRL_DEVICE_ID(x) FIELD_GET(QCA8K_MASK_CTRL_DEVICE_ID_MASK, x) + #define QCA8K_REG_PORT0_PAD_CTRL 0x004 + #define QCA8K_PORT0_PAD_MAC06_EXCHANGE_EN BIT(31) + #define QCA8K_PORT0_PAD_SGMII_RXCLK_FALLING_EDGE BIT(19) +@@ -41,12 +41,11 @@ + #define QCA8K_REG_PORT6_PAD_CTRL 0x00c + #define QCA8K_PORT_PAD_RGMII_EN BIT(26) + #define QCA8K_PORT_PAD_RGMII_TX_DELAY_MASK GENMASK(23, 22) +-#define QCA8K_PORT_PAD_RGMII_TX_DELAY(x) ((x) << 22) ++#define QCA8K_PORT_PAD_RGMII_TX_DELAY(x) FIELD_PREP(QCA8K_PORT_PAD_RGMII_TX_DELAY_MASK, x) + #define QCA8K_PORT_PAD_RGMII_RX_DELAY_MASK GENMASK(21, 20) +-#define QCA8K_PORT_PAD_RGMII_RX_DELAY(x) ((x) << 20) ++#define QCA8K_PORT_PAD_RGMII_RX_DELAY(x) FIELD_PREP(QCA8K_PORT_PAD_RGMII_RX_DELAY_MASK, x) + #define QCA8K_PORT_PAD_RGMII_TX_DELAY_EN BIT(25) + #define QCA8K_PORT_PAD_RGMII_RX_DELAY_EN BIT(24) +-#define QCA8K_MAX_DELAY 3 + #define QCA8K_PORT_PAD_SGMII_EN BIT(7) + #define QCA8K_REG_PWS 0x010 + #define QCA8K_PWS_POWER_ON_SEL BIT(31) +@@ -68,10 +67,12 @@ + #define QCA8K_MDIO_MASTER_READ BIT(27) + #define QCA8K_MDIO_MASTER_WRITE 0 + #define QCA8K_MDIO_MASTER_SUP_PRE BIT(26) +-#define QCA8K_MDIO_MASTER_PHY_ADDR(x) ((x) << 21) +-#define QCA8K_MDIO_MASTER_REG_ADDR(x) ((x) << 16) +-#define QCA8K_MDIO_MASTER_DATA(x) (x) ++#define QCA8K_MDIO_MASTER_PHY_ADDR_MASK GENMASK(25, 21) ++#define QCA8K_MDIO_MASTER_PHY_ADDR(x) FIELD_PREP(QCA8K_MDIO_MASTER_PHY_ADDR_MASK, x) ++#define QCA8K_MDIO_MASTER_REG_ADDR_MASK GENMASK(20, 16) ++#define QCA8K_MDIO_MASTER_REG_ADDR(x) FIELD_PREP(QCA8K_MDIO_MASTER_REG_ADDR_MASK, x) + #define QCA8K_MDIO_MASTER_DATA_MASK GENMASK(15, 0) ++#define QCA8K_MDIO_MASTER_DATA(x) FIELD_PREP(QCA8K_MDIO_MASTER_DATA_MASK, x) + #define QCA8K_MDIO_MASTER_MAX_PORTS 5 + #define QCA8K_MDIO_MASTER_MAX_REG 32 + #define QCA8K_GOL_MAC_ADDR0 0x60 +@@ -93,9 +94,7 @@ + #define QCA8K_PORT_STATUS_FLOW_AUTO BIT(12) + #define QCA8K_REG_PORT_HDR_CTRL(_i) (0x9c + (_i * 4)) + #define QCA8K_PORT_HDR_CTRL_RX_MASK GENMASK(3, 2) +-#define QCA8K_PORT_HDR_CTRL_RX_S 2 + #define QCA8K_PORT_HDR_CTRL_TX_MASK GENMASK(1, 0) +-#define QCA8K_PORT_HDR_CTRL_TX_S 0 + #define QCA8K_PORT_HDR_CTRL_ALL 2 + #define QCA8K_PORT_HDR_CTRL_MGMT 1 + #define QCA8K_PORT_HDR_CTRL_NONE 0 +@@ -105,10 +104,11 @@ + #define QCA8K_SGMII_EN_TX BIT(3) + #define QCA8K_SGMII_EN_SD BIT(4) + #define QCA8K_SGMII_CLK125M_DELAY BIT(7) +-#define QCA8K_SGMII_MODE_CTRL_MASK (BIT(22) | BIT(23)) +-#define QCA8K_SGMII_MODE_CTRL_BASEX (0 << 22) +-#define QCA8K_SGMII_MODE_CTRL_PHY (1 << 22) +-#define QCA8K_SGMII_MODE_CTRL_MAC (2 << 22) ++#define QCA8K_SGMII_MODE_CTRL_MASK GENMASK(23, 22) ++#define QCA8K_SGMII_MODE_CTRL(x) FIELD_PREP(QCA8K_SGMII_MODE_CTRL_MASK, x) ++#define QCA8K_SGMII_MODE_CTRL_BASEX QCA8K_SGMII_MODE_CTRL(0x0) ++#define QCA8K_SGMII_MODE_CTRL_PHY QCA8K_SGMII_MODE_CTRL(0x1) ++#define QCA8K_SGMII_MODE_CTRL_MAC QCA8K_SGMII_MODE_CTRL(0x2) + + /* MAC_PWR_SEL registers */ + #define QCA8K_REG_MAC_PWR_SEL 0x0e4 +@@ -121,100 +121,115 @@ + + /* ACL registers */ + #define QCA8K_REG_PORT_VLAN_CTRL0(_i) (0x420 + (_i * 8)) +-#define QCA8K_PORT_VLAN_CVID(x) (x << 16) +-#define QCA8K_PORT_VLAN_SVID(x) x ++#define QCA8K_PORT_VLAN_CVID_MASK GENMASK(27, 16) ++#define QCA8K_PORT_VLAN_CVID(x) FIELD_PREP(QCA8K_PORT_VLAN_CVID_MASK, x) ++#define QCA8K_PORT_VLAN_SVID_MASK GENMASK(11, 0) ++#define QCA8K_PORT_VLAN_SVID(x) FIELD_PREP(QCA8K_PORT_VLAN_SVID_MASK, x) + #define QCA8K_REG_PORT_VLAN_CTRL1(_i) (0x424 + (_i * 8)) + #define QCA8K_REG_IPV4_PRI_BASE_ADDR 0x470 + #define QCA8K_REG_IPV4_PRI_ADDR_MASK 0x474 + + /* Lookup registers */ + #define QCA8K_REG_ATU_DATA0 0x600 +-#define QCA8K_ATU_ADDR2_S 24 +-#define QCA8K_ATU_ADDR3_S 16 +-#define QCA8K_ATU_ADDR4_S 8 ++#define QCA8K_ATU_ADDR2_MASK GENMASK(31, 24) ++#define QCA8K_ATU_ADDR3_MASK GENMASK(23, 16) ++#define QCA8K_ATU_ADDR4_MASK GENMASK(15, 8) ++#define QCA8K_ATU_ADDR5_MASK GENMASK(7, 0) + #define QCA8K_REG_ATU_DATA1 0x604 +-#define QCA8K_ATU_PORT_M 0x7f +-#define QCA8K_ATU_PORT_S 16 +-#define QCA8K_ATU_ADDR0_S 8 ++#define QCA8K_ATU_PORT_MASK GENMASK(22, 16) ++#define QCA8K_ATU_ADDR0_MASK GENMASK(15, 8) ++#define QCA8K_ATU_ADDR1_MASK GENMASK(7, 0) + #define QCA8K_REG_ATU_DATA2 0x608 +-#define QCA8K_ATU_VID_M 0xfff +-#define QCA8K_ATU_VID_S 8 +-#define QCA8K_ATU_STATUS_M 0xf ++#define QCA8K_ATU_VID_MASK GENMASK(19, 8) ++#define QCA8K_ATU_STATUS_MASK GENMASK(3, 0) + #define QCA8K_ATU_STATUS_STATIC 0xf + #define QCA8K_REG_ATU_FUNC 0x60c + #define QCA8K_ATU_FUNC_BUSY BIT(31) + #define QCA8K_ATU_FUNC_PORT_EN BIT(14) + #define QCA8K_ATU_FUNC_MULTI_EN BIT(13) + #define QCA8K_ATU_FUNC_FULL BIT(12) +-#define QCA8K_ATU_FUNC_PORT_M 0xf +-#define QCA8K_ATU_FUNC_PORT_S 8 ++#define QCA8K_ATU_FUNC_PORT_MASK GENMASK(11, 8) + #define QCA8K_REG_VTU_FUNC0 0x610 + #define QCA8K_VTU_FUNC0_VALID BIT(20) + #define QCA8K_VTU_FUNC0_IVL_EN BIT(19) +-#define QCA8K_VTU_FUNC0_EG_MODE_S(_i) (4 + (_i) * 2) +-#define QCA8K_VTU_FUNC0_EG_MODE_MASK 3 +-#define QCA8K_VTU_FUNC0_EG_MODE_UNMOD 0 +-#define QCA8K_VTU_FUNC0_EG_MODE_UNTAG 1 +-#define QCA8K_VTU_FUNC0_EG_MODE_TAG 2 +-#define QCA8K_VTU_FUNC0_EG_MODE_NOT 3 ++/* QCA8K_VTU_FUNC0_EG_MODE_MASK GENMASK(17, 4) ++ * It does contain VLAN_MODE for each port [5:4] for port0, ++ * [7:6] for port1 ... [17:16] for port6. Use virtual port ++ * define to handle this. ++ */ ++#define QCA8K_VTU_FUNC0_EG_MODE_PORT_SHIFT(_i) (4 + (_i) * 2) ++#define QCA8K_VTU_FUNC0_EG_MODE_MASK GENMASK(1, 0) ++#define QCA8K_VTU_FUNC0_EG_MODE_PORT_MASK(_i) (GENMASK(1, 0) << QCA8K_VTU_FUNC0_EG_MODE_PORT_SHIFT(_i)) ++#define QCA8K_VTU_FUNC0_EG_MODE_UNMOD FIELD_PREP(QCA8K_VTU_FUNC0_EG_MODE_MASK, 0x0) ++#define QCA8K_VTU_FUNC0_EG_MODE_PORT_UNMOD(_i) (QCA8K_VTU_FUNC0_EG_MODE_UNMOD << QCA8K_VTU_FUNC0_EG_MODE_PORT_SHIFT(_i)) ++#define QCA8K_VTU_FUNC0_EG_MODE_UNTAG FIELD_PREP(QCA8K_VTU_FUNC0_EG_MODE_MASK, 0x1) ++#define QCA8K_VTU_FUNC0_EG_MODE_PORT_UNTAG(_i) (QCA8K_VTU_FUNC0_EG_MODE_UNTAG << QCA8K_VTU_FUNC0_EG_MODE_PORT_SHIFT(_i)) ++#define QCA8K_VTU_FUNC0_EG_MODE_TAG FIELD_PREP(QCA8K_VTU_FUNC0_EG_MODE_MASK, 0x2) ++#define QCA8K_VTU_FUNC0_EG_MODE_PORT_TAG(_i) (QCA8K_VTU_FUNC0_EG_MODE_TAG << QCA8K_VTU_FUNC0_EG_MODE_PORT_SHIFT(_i)) ++#define QCA8K_VTU_FUNC0_EG_MODE_NOT FIELD_PREP(QCA8K_VTU_FUNC0_EG_MODE_MASK, 0x3) ++#define QCA8K_VTU_FUNC0_EG_MODE_PORT_NOT(_i) (QCA8K_VTU_FUNC0_EG_MODE_NOT << QCA8K_VTU_FUNC0_EG_MODE_PORT_SHIFT(_i)) + #define QCA8K_REG_VTU_FUNC1 0x614 + #define QCA8K_VTU_FUNC1_BUSY BIT(31) +-#define QCA8K_VTU_FUNC1_VID_S 16 ++#define QCA8K_VTU_FUNC1_VID_MASK GENMASK(27, 16) + #define QCA8K_VTU_FUNC1_FULL BIT(4) + #define QCA8K_REG_GLOBAL_FW_CTRL0 0x620 + #define QCA8K_GLOBAL_FW_CTRL0_CPU_PORT_EN BIT(10) + #define QCA8K_REG_GLOBAL_FW_CTRL1 0x624 +-#define QCA8K_GLOBAL_FW_CTRL1_IGMP_DP_S 24 +-#define QCA8K_GLOBAL_FW_CTRL1_BC_DP_S 16 +-#define QCA8K_GLOBAL_FW_CTRL1_MC_DP_S 8 +-#define QCA8K_GLOBAL_FW_CTRL1_UC_DP_S 0 ++#define QCA8K_GLOBAL_FW_CTRL1_IGMP_DP_MASK GENMASK(30, 24) ++#define QCA8K_GLOBAL_FW_CTRL1_BC_DP_MASK GENMASK(22, 16) ++#define QCA8K_GLOBAL_FW_CTRL1_MC_DP_MASK GENMASK(14, 8) ++#define QCA8K_GLOBAL_FW_CTRL1_UC_DP_MASK GENMASK(6, 0) + #define QCA8K_PORT_LOOKUP_CTRL(_i) (0x660 + (_i) * 0xc) + #define QCA8K_PORT_LOOKUP_MEMBER GENMASK(6, 0) +-#define QCA8K_PORT_LOOKUP_VLAN_MODE GENMASK(9, 8) +-#define QCA8K_PORT_LOOKUP_VLAN_MODE_NONE (0 << 8) +-#define QCA8K_PORT_LOOKUP_VLAN_MODE_FALLBACK (1 << 8) +-#define QCA8K_PORT_LOOKUP_VLAN_MODE_CHECK (2 << 8) +-#define QCA8K_PORT_LOOKUP_VLAN_MODE_SECURE (3 << 8) ++#define QCA8K_PORT_LOOKUP_VLAN_MODE_MASK GENMASK(9, 8) ++#define QCA8K_PORT_LOOKUP_VLAN_MODE(x) FIELD_PREP(QCA8K_PORT_LOOKUP_VLAN_MODE_MASK, x) ++#define QCA8K_PORT_LOOKUP_VLAN_MODE_NONE QCA8K_PORT_LOOKUP_VLAN_MODE(0x0) ++#define QCA8K_PORT_LOOKUP_VLAN_MODE_FALLBACK QCA8K_PORT_LOOKUP_VLAN_MODE(0x1) ++#define QCA8K_PORT_LOOKUP_VLAN_MODE_CHECK QCA8K_PORT_LOOKUP_VLAN_MODE(0x2) ++#define QCA8K_PORT_LOOKUP_VLAN_MODE_SECURE QCA8K_PORT_LOOKUP_VLAN_MODE(0x3) + #define QCA8K_PORT_LOOKUP_STATE_MASK GENMASK(18, 16) +-#define QCA8K_PORT_LOOKUP_STATE_DISABLED (0 << 16) +-#define QCA8K_PORT_LOOKUP_STATE_BLOCKING (1 << 16) +-#define QCA8K_PORT_LOOKUP_STATE_LISTENING (2 << 16) +-#define QCA8K_PORT_LOOKUP_STATE_LEARNING (3 << 16) +-#define QCA8K_PORT_LOOKUP_STATE_FORWARD (4 << 16) +-#define QCA8K_PORT_LOOKUP_STATE GENMASK(18, 16) ++#define QCA8K_PORT_LOOKUP_STATE(x) FIELD_PREP(QCA8K_PORT_LOOKUP_STATE_MASK, x) ++#define QCA8K_PORT_LOOKUP_STATE_DISABLED QCA8K_PORT_LOOKUP_STATE(0x0) ++#define QCA8K_PORT_LOOKUP_STATE_BLOCKING QCA8K_PORT_LOOKUP_STATE(0x1) ++#define QCA8K_PORT_LOOKUP_STATE_LISTENING QCA8K_PORT_LOOKUP_STATE(0x2) ++#define QCA8K_PORT_LOOKUP_STATE_LEARNING QCA8K_PORT_LOOKUP_STATE(0x3) ++#define QCA8K_PORT_LOOKUP_STATE_FORWARD QCA8K_PORT_LOOKUP_STATE(0x4) + #define QCA8K_PORT_LOOKUP_LEARN BIT(20) + + #define QCA8K_REG_GLOBAL_FC_THRESH 0x800 +-#define QCA8K_GLOBAL_FC_GOL_XON_THRES(x) ((x) << 16) +-#define QCA8K_GLOBAL_FC_GOL_XON_THRES_S GENMASK(24, 16) +-#define QCA8K_GLOBAL_FC_GOL_XOFF_THRES(x) ((x) << 0) +-#define QCA8K_GLOBAL_FC_GOL_XOFF_THRES_S GENMASK(8, 0) ++#define QCA8K_GLOBAL_FC_GOL_XON_THRES_MASK GENMASK(24, 16) ++#define QCA8K_GLOBAL_FC_GOL_XON_THRES(x) FIELD_PREP(QCA8K_GLOBAL_FC_GOL_XON_THRES_MASK, x) ++#define QCA8K_GLOBAL_FC_GOL_XOFF_THRES_MASK GENMASK(8, 0) ++#define QCA8K_GLOBAL_FC_GOL_XOFF_THRES(x) FIELD_PREP(QCA8K_GLOBAL_FC_GOL_XOFF_THRES_MASK, x) + + #define QCA8K_REG_PORT_HOL_CTRL0(_i) (0x970 + (_i) * 0x8) +-#define QCA8K_PORT_HOL_CTRL0_EG_PRI0_BUF GENMASK(3, 0) +-#define QCA8K_PORT_HOL_CTRL0_EG_PRI0(x) ((x) << 0) +-#define QCA8K_PORT_HOL_CTRL0_EG_PRI1_BUF GENMASK(7, 4) +-#define QCA8K_PORT_HOL_CTRL0_EG_PRI1(x) ((x) << 4) +-#define QCA8K_PORT_HOL_CTRL0_EG_PRI2_BUF GENMASK(11, 8) +-#define QCA8K_PORT_HOL_CTRL0_EG_PRI2(x) ((x) << 8) +-#define QCA8K_PORT_HOL_CTRL0_EG_PRI3_BUF GENMASK(15, 12) +-#define QCA8K_PORT_HOL_CTRL0_EG_PRI3(x) ((x) << 12) +-#define QCA8K_PORT_HOL_CTRL0_EG_PRI4_BUF GENMASK(19, 16) +-#define QCA8K_PORT_HOL_CTRL0_EG_PRI4(x) ((x) << 16) +-#define QCA8K_PORT_HOL_CTRL0_EG_PRI5_BUF GENMASK(23, 20) +-#define QCA8K_PORT_HOL_CTRL0_EG_PRI5(x) ((x) << 20) +-#define QCA8K_PORT_HOL_CTRL0_EG_PORT_BUF GENMASK(29, 24) +-#define QCA8K_PORT_HOL_CTRL0_EG_PORT(x) ((x) << 24) ++#define QCA8K_PORT_HOL_CTRL0_EG_PRI0_BUF_MASK GENMASK(3, 0) ++#define QCA8K_PORT_HOL_CTRL0_EG_PRI0(x) FIELD_PREP(QCA8K_PORT_HOL_CTRL0_EG_PRI0_BUF_MASK, x) ++#define QCA8K_PORT_HOL_CTRL0_EG_PRI1_BUF_MASK GENMASK(7, 4) ++#define QCA8K_PORT_HOL_CTRL0_EG_PRI1(x) FIELD_PREP(QCA8K_PORT_HOL_CTRL0_EG_PRI1_BUF_MASK, x) ++#define QCA8K_PORT_HOL_CTRL0_EG_PRI2_BUF_MASK GENMASK(11, 8) ++#define QCA8K_PORT_HOL_CTRL0_EG_PRI2(x) FIELD_PREP(QCA8K_PORT_HOL_CTRL0_EG_PRI2_BUF_MASK, x) ++#define QCA8K_PORT_HOL_CTRL0_EG_PRI3_BUF_MASK GENMASK(15, 12) ++#define QCA8K_PORT_HOL_CTRL0_EG_PRI3(x) FIELD_PREP(QCA8K_PORT_HOL_CTRL0_EG_PRI3_BUF_MASK, x) ++#define QCA8K_PORT_HOL_CTRL0_EG_PRI4_BUF_MASK GENMASK(19, 16) ++#define QCA8K_PORT_HOL_CTRL0_EG_PRI4(x) FIELD_PREP(QCA8K_PORT_HOL_CTRL0_EG_PRI4_BUF_MASK, x) ++#define QCA8K_PORT_HOL_CTRL0_EG_PRI5_BUF_MASK GENMASK(23, 20) ++#define QCA8K_PORT_HOL_CTRL0_EG_PRI5(x) FIELD_PREP(QCA8K_PORT_HOL_CTRL0_EG_PRI5_BUF_MASK, x) ++#define QCA8K_PORT_HOL_CTRL0_EG_PORT_BUF_MASK GENMASK(29, 24) ++#define QCA8K_PORT_HOL_CTRL0_EG_PORT(x) FIELD_PREP(QCA8K_PORT_HOL_CTRL0_EG_PORT_BUF_MASK, x) + + #define QCA8K_REG_PORT_HOL_CTRL1(_i) (0x974 + (_i) * 0x8) +-#define QCA8K_PORT_HOL_CTRL1_ING_BUF GENMASK(3, 0) +-#define QCA8K_PORT_HOL_CTRL1_ING(x) ((x) << 0) ++#define QCA8K_PORT_HOL_CTRL1_ING_BUF_MASK GENMASK(3, 0) ++#define QCA8K_PORT_HOL_CTRL1_ING(x) FIELD_PREP(QCA8K_PORT_HOL_CTRL1_ING_BUF_MASK, x) + #define QCA8K_PORT_HOL_CTRL1_EG_PRI_BUF_EN BIT(6) + #define QCA8K_PORT_HOL_CTRL1_EG_PORT_BUF_EN BIT(7) + #define QCA8K_PORT_HOL_CTRL1_WRED_EN BIT(8) + #define QCA8K_PORT_HOL_CTRL1_EG_MIRROR_EN BIT(16) + + /* Pkt edit registers */ ++#define QCA8K_EGREES_VLAN_PORT_SHIFT(_i) (16 * ((_i) % 2)) ++#define QCA8K_EGREES_VLAN_PORT_MASK(_i) (GENMASK(11, 0) << QCA8K_EGREES_VLAN_PORT_SHIFT(_i)) ++#define QCA8K_EGREES_VLAN_PORT(_i, x) ((x) << QCA8K_EGREES_VLAN_PORT_SHIFT(_i)) + #define QCA8K_EGRESS_VLAN(x) (0x0c70 + (4 * (x / 2))) + + /* L3 registers */ diff --git a/target/linux/generic/backport-5.15/755-net-next-net-dsa-qca8k-remove-extra-mutex_init-in-qca8k_setup.patch b/target/linux/generic/backport-5.15/755-net-next-net-dsa-qca8k-remove-extra-mutex_init-in-qca8k_setup.patch new file mode 100644 index 0000000000..8c39b8ea29 --- /dev/null +++ b/target/linux/generic/backport-5.15/755-net-next-net-dsa-qca8k-remove-extra-mutex_init-in-qca8k_setup.patch @@ -0,0 +1,25 @@ +From 994c28b6f971fa5db8ae977daea37eee87d93d51 Mon Sep 17 00:00:00 2001 +From: Ansuel Smith <ansuelsmth@gmail.com> +Date: Mon, 22 Nov 2021 16:23:42 +0100 +Subject: net: dsa: qca8k: remove extra mutex_init in qca8k_setup + +Mutex is already init in sw_probe. Remove the extra init in qca8k_setup. + +Signed-off-by: Ansuel Smith <ansuelsmth@gmail.com> +Reviewed-by: Vladimir Oltean <olteanv@gmail.com> +Signed-off-by: David S. Miller <davem@davemloft.net> +--- + drivers/net/dsa/qca8k.c | 2 -- + 1 file changed, 2 deletions(-) + +--- a/drivers/net/dsa/qca8k.c ++++ b/drivers/net/dsa/qca8k.c +@@ -1086,8 +1086,6 @@ qca8k_setup(struct dsa_switch *ds) + if (ret) + return ret; + +- mutex_init(&priv->reg_mutex); +- + /* Start by setting up the register mapping */ + priv->regmap = devm_regmap_init(ds->dev, NULL, priv, + &qca8k_regmap_config); diff --git a/target/linux/generic/backport-5.15/756-net-next-net-dsa-qca8k-move-regmap-init-in-probe-and-set-it.patch b/target/linux/generic/backport-5.15/756-net-next-net-dsa-qca8k-move-regmap-init-in-probe-and-set-it.patch new file mode 100644 index 0000000000..9fcc74a7ce --- /dev/null +++ b/target/linux/generic/backport-5.15/756-net-next-net-dsa-qca8k-move-regmap-init-in-probe-and-set-it.patch @@ -0,0 +1,46 @@ +From 36b8af12f424e7a7f60a935c60a0fd4aa0822378 Mon Sep 17 00:00:00 2001 +From: Ansuel Smith <ansuelsmth@gmail.com> +Date: Mon, 22 Nov 2021 16:23:43 +0100 +Subject: net: dsa: qca8k: move regmap init in probe and set it mandatory + +In preparation for regmap conversion, move regmap init in the probe +function and make it mandatory as any read/write/rmw operation will be +converted to regmap API. + +Signed-off-by: Ansuel Smith <ansuelsmth@gmail.com> +Reviewed-by: Vladimir Oltean <olteanv@gmail.com> +Signed-off-by: David S. Miller <davem@davemloft.net> +--- + drivers/net/dsa/qca8k.c | 14 ++++++++------ + 1 file changed, 8 insertions(+), 6 deletions(-) + +--- a/drivers/net/dsa/qca8k.c ++++ b/drivers/net/dsa/qca8k.c +@@ -1086,12 +1086,6 @@ qca8k_setup(struct dsa_switch *ds) + if (ret) + return ret; + +- /* Start by setting up the register mapping */ +- priv->regmap = devm_regmap_init(ds->dev, NULL, priv, +- &qca8k_regmap_config); +- if (IS_ERR(priv->regmap)) +- dev_warn(priv->dev, "regmap initialization failed"); +- + ret = qca8k_setup_mdio_bus(priv); + if (ret) + return ret; +@@ -2085,6 +2079,14 @@ qca8k_sw_probe(struct mdio_device *mdiod + gpiod_set_value_cansleep(priv->reset_gpio, 0); + } + ++ /* Start by setting up the register mapping */ ++ priv->regmap = devm_regmap_init(&mdiodev->dev, NULL, priv, ++ &qca8k_regmap_config); ++ if (IS_ERR(priv->regmap)) { ++ dev_err(priv->dev, "regmap initialization failed"); ++ return PTR_ERR(priv->regmap); ++ } ++ + /* Check the detected switch id */ + ret = qca8k_read_switch_id(priv); + if (ret) diff --git a/target/linux/generic/backport-5.15/757-net-next-net-dsa-qca8k-initial-conversion-to-regmap-heper.patch b/target/linux/generic/backport-5.15/757-net-next-net-dsa-qca8k-initial-conversion-to-regmap-heper.patch new file mode 100644 index 0000000000..4ca9c8ba41 --- /dev/null +++ b/target/linux/generic/backport-5.15/757-net-next-net-dsa-qca8k-initial-conversion-to-regmap-heper.patch @@ -0,0 +1,249 @@ +From 8b5f3f29a81a71934d004e21a1292c1148b05926 Mon Sep 17 00:00:00 2001 +From: Ansuel Smith <ansuelsmth@gmail.com> +Date: Mon, 22 Nov 2021 16:23:44 +0100 +Subject: net: dsa: qca8k: initial conversion to regmap helper + +Convert any qca8k set/clear/pool to regmap helper and add +missing config to regmap_config struct. +Read/write/rmw operation are reworked to use the regmap helper +internally to keep the delta of this patch low. These additional +function will then be dropped when the code split will be proposed. + +Ipq40xx SoC have the internal switch based on the qca8k regmap but use +mmio for read/write/rmw operation instead of mdio. +In preparation for the support of this internal switch, convert the +driver to regmap API to later split the driver to common and specific +code. The overhead introduced by the use of regamp API is marginal as the +internal mdio will bypass it by using its direct access and regmap will be +used only by configuration functions or fdb access. + +Signed-off-by: Ansuel Smith <ansuelsmth@gmail.com> +Signed-off-by: David S. Miller <davem@davemloft.net> +--- + drivers/net/dsa/qca8k.c | 107 +++++++++++++++++++++--------------------------- + 1 file changed, 47 insertions(+), 60 deletions(-) + +--- a/drivers/net/dsa/qca8k.c ++++ b/drivers/net/dsa/qca8k.c +@@ -10,6 +10,7 @@ + #include <linux/phy.h> + #include <linux/netdevice.h> + #include <linux/bitfield.h> ++#include <linux/regmap.h> + #include <net/dsa.h> + #include <linux/of_net.h> + #include <linux/of_mdio.h> +@@ -152,6 +153,25 @@ qca8k_set_page(struct mii_bus *bus, u16 + static int + qca8k_read(struct qca8k_priv *priv, u32 reg, u32 *val) + { ++ return regmap_read(priv->regmap, reg, val); ++} ++ ++static int ++qca8k_write(struct qca8k_priv *priv, u32 reg, u32 val) ++{ ++ return regmap_write(priv->regmap, reg, val); ++} ++ ++static int ++qca8k_rmw(struct qca8k_priv *priv, u32 reg, u32 mask, u32 write_val) ++{ ++ return regmap_update_bits(priv->regmap, reg, mask, write_val); ++} ++ ++static int ++qca8k_regmap_read(void *ctx, uint32_t reg, uint32_t *val) ++{ ++ struct qca8k_priv *priv = (struct qca8k_priv *)ctx; + struct mii_bus *bus = priv->bus; + u16 r1, r2, page; + int ret; +@@ -172,8 +192,9 @@ exit: + } + + static int +-qca8k_write(struct qca8k_priv *priv, u32 reg, u32 val) ++qca8k_regmap_write(void *ctx, uint32_t reg, uint32_t val) + { ++ struct qca8k_priv *priv = (struct qca8k_priv *)ctx; + struct mii_bus *bus = priv->bus; + u16 r1, r2, page; + int ret; +@@ -194,8 +215,9 @@ exit: + } + + static int +-qca8k_rmw(struct qca8k_priv *priv, u32 reg, u32 mask, u32 write_val) ++qca8k_regmap_update_bits(void *ctx, uint32_t reg, uint32_t mask, uint32_t write_val) + { ++ struct qca8k_priv *priv = (struct qca8k_priv *)ctx; + struct mii_bus *bus = priv->bus; + u16 r1, r2, page; + u32 val; +@@ -223,34 +245,6 @@ exit: + return ret; + } + +-static int +-qca8k_reg_set(struct qca8k_priv *priv, u32 reg, u32 val) +-{ +- return qca8k_rmw(priv, reg, 0, val); +-} +- +-static int +-qca8k_reg_clear(struct qca8k_priv *priv, u32 reg, u32 val) +-{ +- return qca8k_rmw(priv, reg, val, 0); +-} +- +-static int +-qca8k_regmap_read(void *ctx, uint32_t reg, uint32_t *val) +-{ +- struct qca8k_priv *priv = (struct qca8k_priv *)ctx; +- +- return qca8k_read(priv, reg, val); +-} +- +-static int +-qca8k_regmap_write(void *ctx, uint32_t reg, uint32_t val) +-{ +- struct qca8k_priv *priv = (struct qca8k_priv *)ctx; +- +- return qca8k_write(priv, reg, val); +-} +- + static const struct regmap_range qca8k_readable_ranges[] = { + regmap_reg_range(0x0000, 0x00e4), /* Global control */ + regmap_reg_range(0x0100, 0x0168), /* EEE control */ +@@ -282,26 +276,19 @@ static struct regmap_config qca8k_regmap + .max_register = 0x16ac, /* end MIB - Port6 range */ + .reg_read = qca8k_regmap_read, + .reg_write = qca8k_regmap_write, ++ .reg_update_bits = qca8k_regmap_update_bits, + .rd_table = &qca8k_readable_table, ++ .disable_locking = true, /* Locking is handled by qca8k read/write */ ++ .cache_type = REGCACHE_NONE, /* Explicitly disable CACHE */ + }; + + static int + qca8k_busy_wait(struct qca8k_priv *priv, u32 reg, u32 mask) + { +- int ret, ret1; + u32 val; + +- ret = read_poll_timeout(qca8k_read, ret1, !(val & mask), +- 0, QCA8K_BUSY_WAIT_TIMEOUT * USEC_PER_MSEC, false, +- priv, reg, &val); +- +- /* Check if qca8k_read has failed for a different reason +- * before returning -ETIMEDOUT +- */ +- if (ret < 0 && ret1 < 0) +- return ret1; +- +- return ret; ++ return regmap_read_poll_timeout(priv->regmap, reg, val, !(val & mask), 0, ++ QCA8K_BUSY_WAIT_TIMEOUT * USEC_PER_MSEC); + } + + static int +@@ -568,7 +555,7 @@ qca8k_mib_init(struct qca8k_priv *priv) + int ret; + + mutex_lock(&priv->reg_mutex); +- ret = qca8k_reg_set(priv, QCA8K_REG_MIB, QCA8K_MIB_FLUSH | QCA8K_MIB_BUSY); ++ ret = regmap_set_bits(priv->regmap, QCA8K_REG_MIB, QCA8K_MIB_FLUSH | QCA8K_MIB_BUSY); + if (ret) + goto exit; + +@@ -576,7 +563,7 @@ qca8k_mib_init(struct qca8k_priv *priv) + if (ret) + goto exit; + +- ret = qca8k_reg_set(priv, QCA8K_REG_MIB, QCA8K_MIB_CPU_KEEP); ++ ret = regmap_set_bits(priv->regmap, QCA8K_REG_MIB, QCA8K_MIB_CPU_KEEP); + if (ret) + goto exit; + +@@ -597,9 +584,9 @@ qca8k_port_set_status(struct qca8k_priv + mask |= QCA8K_PORT_STATUS_LINK_AUTO; + + if (enable) +- qca8k_reg_set(priv, QCA8K_REG_PORT_STATUS(port), mask); ++ regmap_set_bits(priv->regmap, QCA8K_REG_PORT_STATUS(port), mask); + else +- qca8k_reg_clear(priv, QCA8K_REG_PORT_STATUS(port), mask); ++ regmap_clear_bits(priv->regmap, QCA8K_REG_PORT_STATUS(port), mask); + } + + static u32 +@@ -861,8 +848,8 @@ qca8k_setup_mdio_bus(struct qca8k_priv * + * a dt-overlay and driver reload changed the configuration + */ + +- return qca8k_reg_clear(priv, QCA8K_MDIO_MASTER_CTRL, +- QCA8K_MDIO_MASTER_EN); ++ return regmap_clear_bits(priv->regmap, QCA8K_MDIO_MASTER_CTRL, ++ QCA8K_MDIO_MASTER_EN); + } + + /* Check if the devicetree declare the port:phy mapping */ +@@ -1099,16 +1086,16 @@ qca8k_setup(struct dsa_switch *ds) + return ret; + + /* Make sure MAC06 is disabled */ +- ret = qca8k_reg_clear(priv, QCA8K_REG_PORT0_PAD_CTRL, +- QCA8K_PORT0_PAD_MAC06_EXCHANGE_EN); ++ ret = regmap_clear_bits(priv->regmap, QCA8K_REG_PORT0_PAD_CTRL, ++ QCA8K_PORT0_PAD_MAC06_EXCHANGE_EN); + if (ret) { + dev_err(priv->dev, "failed disabling MAC06 exchange"); + return ret; + } + + /* Enable CPU Port */ +- ret = qca8k_reg_set(priv, QCA8K_REG_GLOBAL_FW_CTRL0, +- QCA8K_GLOBAL_FW_CTRL0_CPU_PORT_EN); ++ ret = regmap_set_bits(priv->regmap, QCA8K_REG_GLOBAL_FW_CTRL0, ++ QCA8K_GLOBAL_FW_CTRL0_CPU_PORT_EN); + if (ret) { + dev_err(priv->dev, "failed enabling CPU port"); + return ret; +@@ -1176,8 +1163,8 @@ qca8k_setup(struct dsa_switch *ds) + return ret; + + /* Enable ARP Auto-learning by default */ +- ret = qca8k_reg_set(priv, QCA8K_PORT_LOOKUP_CTRL(i), +- QCA8K_PORT_LOOKUP_LEARN); ++ ret = regmap_set_bits(priv->regmap, QCA8K_PORT_LOOKUP_CTRL(i), ++ QCA8K_PORT_LOOKUP_LEARN); + if (ret) + return ret; + +@@ -1745,9 +1732,9 @@ qca8k_port_bridge_join(struct dsa_switch + /* Add this port to the portvlan mask of the other ports + * in the bridge + */ +- ret = qca8k_reg_set(priv, +- QCA8K_PORT_LOOKUP_CTRL(i), +- BIT(port)); ++ ret = regmap_set_bits(priv->regmap, ++ QCA8K_PORT_LOOKUP_CTRL(i), ++ BIT(port)); + if (ret) + return ret; + if (i != port) +@@ -1777,9 +1764,9 @@ qca8k_port_bridge_leave(struct dsa_switc + /* Remove this port to the portvlan mask of the other ports + * in the bridge + */ +- qca8k_reg_clear(priv, +- QCA8K_PORT_LOOKUP_CTRL(i), +- BIT(port)); ++ regmap_clear_bits(priv->regmap, ++ QCA8K_PORT_LOOKUP_CTRL(i), ++ BIT(port)); + } + + /* Set the cpu port to be the only one in the portvlan mask of diff --git a/target/linux/generic/backport-5.15/758-net-next-net-dsa-qca8k-add-additional-MIB-counter-and-.patch b/target/linux/generic/backport-5.15/758-net-next-net-dsa-qca8k-add-additional-MIB-counter-and-.patch new file mode 100644 index 0000000000..78bdf7f77d --- /dev/null +++ b/target/linux/generic/backport-5.15/758-net-next-net-dsa-qca8k-add-additional-MIB-counter-and-.patch @@ -0,0 +1,120 @@ +From c126f118b330ccf0db0dda4a4bd6c729865a205f Mon Sep 17 00:00:00 2001 +From: Ansuel Smith <ansuelsmth@gmail.com> +Date: Mon, 22 Nov 2021 16:23:45 +0100 +Subject: net: dsa: qca8k: add additional MIB counter and make it dynamic + +We are currently missing 2 additionals MIB counter present in QCA833x +switch. +QC832x switch have 39 MIB counter and QCA833X have 41 MIB counter. +Add the additional MIB counter and rework the MIB function to print the +correct supported counter from the match_data struct. + +Signed-off-by: Ansuel Smith <ansuelsmth@gmail.com> +Reviewed-by: Vladimir Oltean <olteanv@gmail.com> +Signed-off-by: David S. Miller <davem@davemloft.net> +--- + drivers/net/dsa/qca8k.c | 23 ++++++++++++++++++++--- + drivers/net/dsa/qca8k.h | 4 ++++ + 2 files changed, 24 insertions(+), 3 deletions(-) + +--- a/drivers/net/dsa/qca8k.c ++++ b/drivers/net/dsa/qca8k.c +@@ -70,6 +70,8 @@ static const struct qca8k_mib_desc ar832 + MIB_DESC(1, 0x9c, "TxExcDefer"), + MIB_DESC(1, 0xa0, "TxDefer"), + MIB_DESC(1, 0xa4, "TxLateCol"), ++ MIB_DESC(1, 0xa8, "RXUnicast"), ++ MIB_DESC(1, 0xac, "TXUnicast"), + }; + + /* The 32bit switch registers are accessed indirectly. To achieve this we need +@@ -1605,12 +1607,16 @@ qca8k_phylink_mac_link_up(struct dsa_swi + static void + qca8k_get_strings(struct dsa_switch *ds, int port, u32 stringset, uint8_t *data) + { ++ const struct qca8k_match_data *match_data; ++ struct qca8k_priv *priv = ds->priv; + int i; + + if (stringset != ETH_SS_STATS) + return; + +- for (i = 0; i < ARRAY_SIZE(ar8327_mib); i++) ++ match_data = of_device_get_match_data(priv->dev); ++ ++ for (i = 0; i < match_data->mib_count; i++) + strncpy(data + i * ETH_GSTRING_LEN, ar8327_mib[i].name, + ETH_GSTRING_LEN); + } +@@ -1620,12 +1626,15 @@ qca8k_get_ethtool_stats(struct dsa_switc + uint64_t *data) + { + struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv; ++ const struct qca8k_match_data *match_data; + const struct qca8k_mib_desc *mib; + u32 reg, i, val; + u32 hi = 0; + int ret; + +- for (i = 0; i < ARRAY_SIZE(ar8327_mib); i++) { ++ match_data = of_device_get_match_data(priv->dev); ++ ++ for (i = 0; i < match_data->mib_count; i++) { + mib = &ar8327_mib[i]; + reg = QCA8K_PORT_MIB_COUNTER(port) + mib->offset; + +@@ -1648,10 +1657,15 @@ qca8k_get_ethtool_stats(struct dsa_switc + static int + qca8k_get_sset_count(struct dsa_switch *ds, int port, int sset) + { ++ const struct qca8k_match_data *match_data; ++ struct qca8k_priv *priv = ds->priv; ++ + if (sset != ETH_SS_STATS) + return 0; + +- return ARRAY_SIZE(ar8327_mib); ++ match_data = of_device_get_match_data(priv->dev); ++ ++ return match_data->mib_count; + } + + static int +@@ -2146,14 +2160,17 @@ static SIMPLE_DEV_PM_OPS(qca8k_pm_ops, + static const struct qca8k_match_data qca8327 = { + .id = QCA8K_ID_QCA8327, + .reduced_package = true, ++ .mib_count = QCA8K_QCA832X_MIB_COUNT, + }; + + static const struct qca8k_match_data qca8328 = { + .id = QCA8K_ID_QCA8327, ++ .mib_count = QCA8K_QCA832X_MIB_COUNT, + }; + + static const struct qca8k_match_data qca833x = { + .id = QCA8K_ID_QCA8337, ++ .mib_count = QCA8K_QCA833X_MIB_COUNT, + }; + + static const struct of_device_id qca8k_of_match[] = { +--- a/drivers/net/dsa/qca8k.h ++++ b/drivers/net/dsa/qca8k.h +@@ -21,6 +21,9 @@ + #define PHY_ID_QCA8337 0x004dd036 + #define QCA8K_ID_QCA8337 0x13 + ++#define QCA8K_QCA832X_MIB_COUNT 39 ++#define QCA8K_QCA833X_MIB_COUNT 41 ++ + #define QCA8K_BUSY_WAIT_TIMEOUT 2000 + + #define QCA8K_NUM_FDB_RECORDS 2048 +@@ -279,6 +282,7 @@ struct ar8xxx_port_status { + struct qca8k_match_data { + u8 id; + bool reduced_package; ++ u8 mib_count; + }; + + enum { diff --git a/target/linux/generic/backport-5.15/759-net-next-net-dsa-qca8k-add-support-for-port-fast-aging.patch b/target/linux/generic/backport-5.15/759-net-next-net-dsa-qca8k-add-support-for-port-fast-aging.patch new file mode 100644 index 0000000000..41efa89b5e --- /dev/null +++ b/target/linux/generic/backport-5.15/759-net-next-net-dsa-qca8k-add-support-for-port-fast-aging.patch @@ -0,0 +1,53 @@ +From 4592538bfb0d5d3c3c8a1d7071724d081412ac91 Mon Sep 17 00:00:00 2001 +From: Ansuel Smith <ansuelsmth@gmail.com> +Date: Mon, 22 Nov 2021 16:23:46 +0100 +Subject: net: dsa: qca8k: add support for port fast aging + +The switch supports fast aging by flushing any rule in the ARL +table for a specific port. + +Signed-off-by: Ansuel Smith <ansuelsmth@gmail.com> +Reviewed-by: Vladimir Oltean <olteanv@gmail.com> +Signed-off-by: David S. Miller <davem@davemloft.net> +--- + drivers/net/dsa/qca8k.c | 11 +++++++++++ + drivers/net/dsa/qca8k.h | 1 + + 2 files changed, 12 insertions(+) + +--- a/drivers/net/dsa/qca8k.c ++++ b/drivers/net/dsa/qca8k.c +@@ -1790,6 +1790,16 @@ qca8k_port_bridge_leave(struct dsa_switc + QCA8K_PORT_LOOKUP_MEMBER, BIT(cpu_port)); + } + ++static void ++qca8k_port_fast_age(struct dsa_switch *ds, int port) ++{ ++ struct qca8k_priv *priv = ds->priv; ++ ++ mutex_lock(&priv->reg_mutex); ++ qca8k_fdb_access(priv, QCA8K_FDB_FLUSH_PORT, port); ++ mutex_unlock(&priv->reg_mutex); ++} ++ + static int + qca8k_port_enable(struct dsa_switch *ds, int port, + struct phy_device *phy) +@@ -2005,6 +2015,7 @@ static const struct dsa_switch_ops qca8k + .port_stp_state_set = qca8k_port_stp_state_set, + .port_bridge_join = qca8k_port_bridge_join, + .port_bridge_leave = qca8k_port_bridge_leave, ++ .port_fast_age = qca8k_port_fast_age, + .port_fdb_add = qca8k_port_fdb_add, + .port_fdb_del = qca8k_port_fdb_del, + .port_fdb_dump = qca8k_port_fdb_dump, +--- a/drivers/net/dsa/qca8k.h ++++ b/drivers/net/dsa/qca8k.h +@@ -262,6 +262,7 @@ enum qca8k_fdb_cmd { + QCA8K_FDB_FLUSH = 1, + QCA8K_FDB_LOAD = 2, + QCA8K_FDB_PURGE = 3, ++ QCA8K_FDB_FLUSH_PORT = 5, + QCA8K_FDB_NEXT = 6, + QCA8K_FDB_SEARCH = 7, + }; diff --git a/target/linux/generic/backport-5.15/760-net-next-net-dsa-qca8k-add-set_ageing_time-support.patch b/target/linux/generic/backport-5.15/760-net-next-net-dsa-qca8k-add-set_ageing_time-support.patch new file mode 100644 index 0000000000..f32e6ae93a --- /dev/null +++ b/target/linux/generic/backport-5.15/760-net-next-net-dsa-qca8k-add-set_ageing_time-support.patch @@ -0,0 +1,78 @@ +From 6a3bdc5209f45d2af83aa92433ab6e5cf2297aa4 Mon Sep 17 00:00:00 2001 +From: Ansuel Smith <ansuelsmth@gmail.com> +Date: Mon, 22 Nov 2021 16:23:47 +0100 +Subject: net: dsa: qca8k: add set_ageing_time support + +qca8k support setting ageing time in step of 7s. Add support for it and +set the max value accepted of 7645m. +Documentation talks about support for 10000m but that values doesn't +make sense as the value doesn't match the max value in the reg. + +Signed-off-by: Ansuel Smith <ansuelsmth@gmail.com> +Reviewed-by: Vladimir Oltean <olteanv@gmail.com> +Signed-off-by: David S. Miller <davem@davemloft.net> +--- + drivers/net/dsa/qca8k.c | 25 +++++++++++++++++++++++++ + drivers/net/dsa/qca8k.h | 3 +++ + 2 files changed, 28 insertions(+) + +--- a/drivers/net/dsa/qca8k.c ++++ b/drivers/net/dsa/qca8k.c +@@ -1261,6 +1261,10 @@ qca8k_setup(struct dsa_switch *ds) + /* We don't have interrupts for link changes, so we need to poll */ + ds->pcs_poll = true; + ++ /* Set min a max ageing value supported */ ++ ds->ageing_time_min = 7000; ++ ds->ageing_time_max = 458745000; ++ + return 0; + } + +@@ -1801,6 +1805,26 @@ qca8k_port_fast_age(struct dsa_switch *d + } + + static int ++qca8k_set_ageing_time(struct dsa_switch *ds, unsigned int msecs) ++{ ++ struct qca8k_priv *priv = ds->priv; ++ unsigned int secs = msecs / 1000; ++ u32 val; ++ ++ /* AGE_TIME reg is set in 7s step */ ++ val = secs / 7; ++ ++ /* Handle case with 0 as val to NOT disable ++ * learning ++ */ ++ if (!val) ++ val = 1; ++ ++ return regmap_update_bits(priv->regmap, QCA8K_REG_ATU_CTRL, QCA8K_ATU_AGE_TIME_MASK, ++ QCA8K_ATU_AGE_TIME(val)); ++} ++ ++static int + qca8k_port_enable(struct dsa_switch *ds, int port, + struct phy_device *phy) + { +@@ -2006,6 +2030,7 @@ static const struct dsa_switch_ops qca8k + .get_strings = qca8k_get_strings, + .get_ethtool_stats = qca8k_get_ethtool_stats, + .get_sset_count = qca8k_get_sset_count, ++ .set_ageing_time = qca8k_set_ageing_time, + .get_mac_eee = qca8k_get_mac_eee, + .set_mac_eee = qca8k_set_mac_eee, + .port_enable = qca8k_port_enable, +--- a/drivers/net/dsa/qca8k.h ++++ b/drivers/net/dsa/qca8k.h +@@ -175,6 +175,9 @@ + #define QCA8K_VTU_FUNC1_BUSY BIT(31) + #define QCA8K_VTU_FUNC1_VID_MASK GENMASK(27, 16) + #define QCA8K_VTU_FUNC1_FULL BIT(4) ++#define QCA8K_REG_ATU_CTRL 0x618 ++#define QCA8K_ATU_AGE_TIME_MASK GENMASK(15, 0) ++#define QCA8K_ATU_AGE_TIME(x) FIELD_PREP(QCA8K_ATU_AGE_TIME_MASK, (x)) + #define QCA8K_REG_GLOBAL_FW_CTRL0 0x620 + #define QCA8K_GLOBAL_FW_CTRL0_CPU_PORT_EN BIT(10) + #define QCA8K_REG_GLOBAL_FW_CTRL1 0x624 diff --git a/target/linux/generic/backport-5.15/761-net-next-net-dsa-qca8k-add-support-for-mdb_add-del.patch b/target/linux/generic/backport-5.15/761-net-next-net-dsa-qca8k-add-support-for-mdb_add-del.patch new file mode 100644 index 0000000000..e0daa88c31 --- /dev/null +++ b/target/linux/generic/backport-5.15/761-net-next-net-dsa-qca8k-add-support-for-mdb_add-del.patch @@ -0,0 +1,142 @@ +From ba8f870dfa635113ce6e8095a5eb1835ecde2e9e Mon Sep 17 00:00:00 2001 +From: Ansuel Smith <ansuelsmth@gmail.com> +Date: Mon, 22 Nov 2021 16:23:48 +0100 +Subject: net: dsa: qca8k: add support for mdb_add/del + +Add support for mdb add/del function. The ARL table is used to insert +the rule. The rule will be searched, deleted and reinserted with the +port mask updated. The function will check if the rule has to be updated +or insert directly with no deletion of the old rule. +If every port is removed from the port mask, the rule is removed. +The rule is set STATIC in the ARL table (aka it doesn't age) to not be +flushed by fast age function. + +Signed-off-by: Ansuel Smith <ansuelsmth@gmail.com> +Reviewed-by: Vladimir Oltean <olteanv@gmail.com> +Signed-off-by: David S. Miller <davem@davemloft.net> +--- + drivers/net/dsa/qca8k.c | 99 +++++++++++++++++++++++++++++++++++++++++++++++++ + 1 file changed, 99 insertions(+) + +--- a/drivers/net/dsa/qca8k.c ++++ b/drivers/net/dsa/qca8k.c +@@ -436,6 +436,81 @@ qca8k_fdb_flush(struct qca8k_priv *priv) + } + + static int ++qca8k_fdb_search_and_insert(struct qca8k_priv *priv, u8 port_mask, ++ const u8 *mac, u16 vid) ++{ ++ struct qca8k_fdb fdb = { 0 }; ++ int ret; ++ ++ mutex_lock(&priv->reg_mutex); ++ ++ qca8k_fdb_write(priv, vid, 0, mac, 0); ++ ret = qca8k_fdb_access(priv, QCA8K_FDB_SEARCH, -1); ++ if (ret < 0) ++ goto exit; ++ ++ ret = qca8k_fdb_read(priv, &fdb); ++ if (ret < 0) ++ goto exit; ++ ++ /* Rule exist. Delete first */ ++ if (!fdb.aging) { ++ ret = qca8k_fdb_access(priv, QCA8K_FDB_PURGE, -1); ++ if (ret) ++ goto exit; ++ } ++ ++ /* Add port to fdb portmask */ ++ fdb.port_mask |= port_mask; ++ ++ qca8k_fdb_write(priv, vid, fdb.port_mask, mac, fdb.aging); ++ ret = qca8k_fdb_access(priv, QCA8K_FDB_LOAD, -1); ++ ++exit: ++ mutex_unlock(&priv->reg_mutex); ++ return ret; ++} ++ ++static int ++qca8k_fdb_search_and_del(struct qca8k_priv *priv, u8 port_mask, ++ const u8 *mac, u16 vid) ++{ ++ struct qca8k_fdb fdb = { 0 }; ++ int ret; ++ ++ mutex_lock(&priv->reg_mutex); ++ ++ qca8k_fdb_write(priv, vid, 0, mac, 0); ++ ret = qca8k_fdb_access(priv, QCA8K_FDB_SEARCH, -1); ++ if (ret < 0) ++ goto exit; ++ ++ /* Rule doesn't exist. Why delete? */ ++ if (!fdb.aging) { ++ ret = -EINVAL; ++ goto exit; ++ } ++ ++ ret = qca8k_fdb_access(priv, QCA8K_FDB_PURGE, -1); ++ if (ret) ++ goto exit; ++ ++ /* Only port in the rule is this port. Don't re insert */ ++ if (fdb.port_mask == port_mask) ++ goto exit; ++ ++ /* Remove port from port mask */ ++ fdb.port_mask &= ~port_mask; ++ ++ qca8k_fdb_write(priv, vid, fdb.port_mask, mac, fdb.aging); ++ ret = qca8k_fdb_access(priv, QCA8K_FDB_LOAD, -1); ++ ++exit: ++ mutex_unlock(&priv->reg_mutex); ++ return ret; ++} ++ ++static int + qca8k_vlan_access(struct qca8k_priv *priv, enum qca8k_vlan_cmd cmd, u16 vid) + { + u32 reg; +@@ -1929,6 +2004,28 @@ qca8k_port_fdb_dump(struct dsa_switch *d + return 0; + } + ++static void ++qca8k_port_mdb_add(struct dsa_switch *ds, int port, ++ const struct switchdev_obj_port_mdb *mdb) ++{ ++ struct qca8k_priv *priv = ds->priv; ++ const u8 *addr = mdb->addr; ++ u16 vid = mdb->vid; ++ ++ qca8k_fdb_search_and_insert(priv, BIT(port), addr, vid); ++} ++ ++static int ++qca8k_port_mdb_del(struct dsa_switch *ds, int port, ++ const struct switchdev_obj_port_mdb *mdb) ++{ ++ struct qca8k_priv *priv = ds->priv; ++ const u8 *addr = mdb->addr; ++ u16 vid = mdb->vid; ++ ++ return qca8k_fdb_search_and_del(priv, BIT(port), addr, vid); ++} ++ + static int + qca8k_port_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering, + struct switchdev_trans *trans) +@@ -2044,6 +2141,8 @@ static const struct dsa_switch_ops qca8k + .port_fdb_add = qca8k_port_fdb_add, + .port_fdb_del = qca8k_port_fdb_del, + .port_fdb_dump = qca8k_port_fdb_dump, ++ .port_mdb_add = qca8k_port_mdb_add, ++ .port_mdb_del = qca8k_port_mdb_del, + .port_vlan_filtering = qca8k_port_vlan_filtering, + .port_vlan_prepare = qca8k_port_vlan_prepare, + .port_vlan_add = qca8k_port_vlan_add, diff --git a/target/linux/generic/backport-5.15/762-v5.11-net-dsa-mt7530-support-setting-MTU.patch b/target/linux/generic/backport-5.15/762-v5.11-net-dsa-mt7530-support-setting-MTU.patch new file mode 100644 index 0000000000..eb487cdfb5 --- /dev/null +++ b/target/linux/generic/backport-5.15/762-v5.11-net-dsa-mt7530-support-setting-MTU.patch @@ -0,0 +1,112 @@ +From 9470174e7581e75a8ebd78964997314dfc2e706c Mon Sep 17 00:00:00 2001 +From: DENG Qingfang <dqfext@gmail.com> +Date: Tue, 3 Nov 2020 13:06:18 +0800 +Subject: [PATCH] net: dsa: mt7530: support setting MTU + +MT7530/7531 has a global RX packet length register, which can be used +to set MTU. + +Supported packet length values are 1522 (1518 if untagged), 1536, +1552, and multiple of 1024 (from 2048 to 15360). + +Signed-off-by: DENG Qingfang <dqfext@gmail.com> +Link: https://lore.kernel.org/r/20201103050618.11419-1-dqfext@gmail.com +Signed-off-by: Jakub Kicinski <kuba@kernel.org> +--- + drivers/net/dsa/mt7530.c | 49 ++++++++++++++++++++++++++++++++++++++++ + drivers/net/dsa/mt7530.h | 12 ++++++++++ + 2 files changed, 61 insertions(+) + +--- a/drivers/net/dsa/mt7530.c ++++ b/drivers/net/dsa/mt7530.c +@@ -1015,6 +1015,53 @@ mt7530_port_disable(struct dsa_switch *d + mutex_unlock(&priv->reg_mutex); + } + ++static int ++mt7530_port_change_mtu(struct dsa_switch *ds, int port, int new_mtu) ++{ ++ struct mt7530_priv *priv = ds->priv; ++ struct mii_bus *bus = priv->bus; ++ int length; ++ u32 val; ++ ++ /* When a new MTU is set, DSA always set the CPU port's MTU to the ++ * largest MTU of the slave ports. Because the switch only has a global ++ * RX length register, only allowing CPU port here is enough. ++ */ ++ if (!dsa_is_cpu_port(ds, port)) ++ return 0; ++ ++ mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED); ++ ++ val = mt7530_mii_read(priv, MT7530_GMACCR); ++ val &= ~MAX_RX_PKT_LEN_MASK; ++ ++ /* RX length also includes Ethernet header, MTK tag, and FCS length */ ++ length = new_mtu + ETH_HLEN + MTK_HDR_LEN + ETH_FCS_LEN; ++ if (length <= 1522) { ++ val |= MAX_RX_PKT_LEN_1522; ++ } else if (length <= 1536) { ++ val |= MAX_RX_PKT_LEN_1536; ++ } else if (length <= 1552) { ++ val |= MAX_RX_PKT_LEN_1552; ++ } else { ++ val &= ~MAX_RX_JUMBO_MASK; ++ val |= MAX_RX_JUMBO(DIV_ROUND_UP(length, 1024)); ++ val |= MAX_RX_PKT_LEN_JUMBO; ++ } ++ ++ mt7530_mii_write(priv, MT7530_GMACCR, val); ++ ++ mutex_unlock(&bus->mdio_lock); ++ ++ return 0; ++} ++ ++static int ++mt7530_port_max_mtu(struct dsa_switch *ds, int port) ++{ ++ return MT7530_MAX_MTU; ++} ++ + static void + mt7530_stp_state_set(struct dsa_switch *ds, int port, u8 state) + { +@@ -2652,6 +2699,8 @@ static const struct dsa_switch_ops mt753 + .get_sset_count = mt7530_get_sset_count, + .port_enable = mt7530_port_enable, + .port_disable = mt7530_port_disable, ++ .port_change_mtu = mt7530_port_change_mtu, ++ .port_max_mtu = mt7530_port_max_mtu, + .port_stp_state_set = mt7530_stp_state_set, + .port_bridge_join = mt7530_port_bridge_join, + .port_bridge_leave = mt7530_port_bridge_leave, +--- a/drivers/net/dsa/mt7530.h ++++ b/drivers/net/dsa/mt7530.h +@@ -11,6 +11,9 @@ + #define MT7530_NUM_FDB_RECORDS 2048 + #define MT7530_ALL_MEMBERS 0xff + ++#define MTK_HDR_LEN 4 ++#define MT7530_MAX_MTU (15 * 1024 - ETH_HLEN - ETH_FCS_LEN - MTK_HDR_LEN) ++ + enum mt753x_id { + ID_MT7530 = 0, + ID_MT7621 = 1, +@@ -301,6 +304,15 @@ enum mt7530_vlan_port_attr { + #define MT7531_DBG_CNT(x) (0x3018 + (x) * 0x100) + #define MT7531_DIS_CLR BIT(31) + ++#define MT7530_GMACCR 0x30e0 ++#define MAX_RX_JUMBO(x) ((x) << 2) ++#define MAX_RX_JUMBO_MASK GENMASK(5, 2) ++#define MAX_RX_PKT_LEN_MASK GENMASK(1, 0) ++#define MAX_RX_PKT_LEN_1522 0x0 ++#define MAX_RX_PKT_LEN_1536 0x1 ++#define MAX_RX_PKT_LEN_1552 0x2 ++#define MAX_RX_PKT_LEN_JUMBO 0x3 ++ + /* Register for MIB */ + #define MT7530_PORT_MIB_COUNTER(x) (0x4000 + (x) * 0x100) + #define MT7530_MIB_CCR 0x4fe0 diff --git a/target/linux/generic/backport-5.15/763-v5.11-net-dsa-mt7530-enable-MTU-normalization.patch b/target/linux/generic/backport-5.15/763-v5.11-net-dsa-mt7530-enable-MTU-normalization.patch new file mode 100644 index 0000000000..b0ad7b9f87 --- /dev/null +++ b/target/linux/generic/backport-5.15/763-v5.11-net-dsa-mt7530-enable-MTU-normalization.patch @@ -0,0 +1,36 @@ +From 771c8901568dd8776a260aa93db41be88a60389e Mon Sep 17 00:00:00 2001 +From: DENG Qingfang <dqfext@gmail.com> +Date: Fri, 11 Dec 2020 01:03:22 +0800 +Subject: [PATCH] net: dsa: mt7530: enable MTU normalization + +MT7530 has a global RX length register, so we are actually changing its +MRU. +Enable MTU normalization for this reason. + +Signed-off-by: DENG Qingfang <dqfext@gmail.com> +Acked-by: Landen Chao <landen.chao@mediatek.com> +Reviewed-by: Vladimir Oltean <olteanv@gmail.com> +Link: https://lore.kernel.org/r/20201210170322.3433-1-dqfext@gmail.com +Signed-off-by: Jakub Kicinski <kuba@kernel.org> +--- + drivers/net/dsa/mt7530.c | 2 ++ + 1 file changed, 2 insertions(+) + +--- a/drivers/net/dsa/mt7530.c ++++ b/drivers/net/dsa/mt7530.c +@@ -1703,6 +1703,7 @@ mt7530_setup(struct dsa_switch *ds) + */ + dn = dsa_to_port(ds, MT7530_CPU_PORT)->master->dev.of_node->parent; + ds->configure_vlan_while_not_filtering = true; ++ ds->mtu_enforcement_ingress = true; + + if (priv->id == ID_MT7530) { + regulator_set_voltage(priv->core_pwr, 1000000, 1000000); +@@ -1947,6 +1948,7 @@ mt7531_setup(struct dsa_switch *ds) + } + + ds->configure_vlan_while_not_filtering = true; ++ ds->mtu_enforcement_ingress = true; + + /* Flush the FDB table */ + ret = mt7530_fdb_cmd(priv, MT7530_FDB_FLUSH, NULL); diff --git a/target/linux/generic/backport-5.15/764-v5.11-net-dsa-mt7530-support-setting-ageing-time.patch b/target/linux/generic/backport-5.15/764-v5.11-net-dsa-mt7530-support-setting-ageing-time.patch new file mode 100644 index 0000000000..44fa40eabe --- /dev/null +++ b/target/linux/generic/backport-5.15/764-v5.11-net-dsa-mt7530-support-setting-ageing-time.patch @@ -0,0 +1,99 @@ +From ea6d5c924e391872d402acac38461a5f8261e57f Mon Sep 17 00:00:00 2001 +From: DENG Qingfang <dqfext@gmail.com> +Date: Tue, 8 Dec 2020 15:00:28 +0800 +Subject: [PATCH] net: dsa: mt7530: support setting ageing time + +MT7530 has a global address age control register, so use it to set +ageing time. + +The applied timer is (AGE_CNT + 1) * (AGE_UNIT + 1) seconds + +Signed-off-by: DENG Qingfang <dqfext@gmail.com> +Reviewed-by: Andrew Lunn <andrew@lunn.ch> +Reviewed-by: Vladimir Oltean <olteanv@gmail.com> +Reviewed-by: Florian Fainelli <f.fainelli@gmail.com> +Signed-off-by: David S. Miller <davem@davemloft.net> +--- + drivers/net/dsa/mt7530.c | 41 ++++++++++++++++++++++++++++++++++++++++ + drivers/net/dsa/mt7530.h | 13 +++++++++++++ + 2 files changed, 54 insertions(+) + +--- a/drivers/net/dsa/mt7530.c ++++ b/drivers/net/dsa/mt7530.c +@@ -870,6 +870,46 @@ mt7530_get_sset_count(struct dsa_switch + return ARRAY_SIZE(mt7530_mib); + } + ++static int ++mt7530_set_ageing_time(struct dsa_switch *ds, unsigned int msecs) ++{ ++ struct mt7530_priv *priv = ds->priv; ++ unsigned int secs = msecs / 1000; ++ unsigned int tmp_age_count; ++ unsigned int error = -1; ++ unsigned int age_count; ++ unsigned int age_unit; ++ ++ /* Applied timer is (AGE_CNT + 1) * (AGE_UNIT + 1) seconds */ ++ if (secs < 1 || secs > (AGE_CNT_MAX + 1) * (AGE_UNIT_MAX + 1)) ++ return -ERANGE; ++ ++ /* iterate through all possible age_count to find the closest pair */ ++ for (tmp_age_count = 0; tmp_age_count <= AGE_CNT_MAX; ++tmp_age_count) { ++ unsigned int tmp_age_unit = secs / (tmp_age_count + 1) - 1; ++ ++ if (tmp_age_unit <= AGE_UNIT_MAX) { ++ unsigned int tmp_error = secs - ++ (tmp_age_count + 1) * (tmp_age_unit + 1); ++ ++ /* found a closer pair */ ++ if (error > tmp_error) { ++ error = tmp_error; ++ age_count = tmp_age_count; ++ age_unit = tmp_age_unit; ++ } ++ ++ /* found the exact match, so break the loop */ ++ if (!error) ++ break; ++ } ++ } ++ ++ mt7530_write(priv, MT7530_AAC, AGE_CNT(age_count) | AGE_UNIT(age_unit)); ++ ++ return 0; ++} ++ + static void mt7530_setup_port5(struct dsa_switch *ds, phy_interface_t interface) + { + struct mt7530_priv *priv = ds->priv; +@@ -2699,6 +2739,7 @@ static const struct dsa_switch_ops mt753 + .phy_write = mt753x_phy_write, + .get_ethtool_stats = mt7530_get_ethtool_stats, + .get_sset_count = mt7530_get_sset_count, ++ .set_ageing_time = mt7530_set_ageing_time, + .port_enable = mt7530_port_enable, + .port_disable = mt7530_port_disable, + .port_change_mtu = mt7530_port_change_mtu, +--- a/drivers/net/dsa/mt7530.h ++++ b/drivers/net/dsa/mt7530.h +@@ -161,6 +161,19 @@ enum mt7530_vlan_egress_attr { + MT7530_VLAN_EGRESS_STACK = 3, + }; + ++/* Register for address age control */ ++#define MT7530_AAC 0xa0 ++/* Disable ageing */ ++#define AGE_DIS BIT(20) ++/* Age count */ ++#define AGE_CNT_MASK GENMASK(19, 12) ++#define AGE_CNT_MAX 0xff ++#define AGE_CNT(x) (AGE_CNT_MASK & ((x) << 12)) ++/* Age unit */ ++#define AGE_UNIT_MASK GENMASK(11, 0) ++#define AGE_UNIT_MAX 0xfff ++#define AGE_UNIT(x) (AGE_UNIT_MASK & (x)) ++ + /* Register for port STP state control */ + #define MT7530_SSP_P(x) (0x2000 + ((x) * 0x100)) + #define FID_PST(x) ((x) & 0x3) diff --git a/target/linux/generic/backport-5.15/765-v5.15-net-dsa-mt7530-disable-learning-on-standalone-ports.patch b/target/linux/generic/backport-5.15/765-v5.15-net-dsa-mt7530-disable-learning-on-standalone-ports.patch new file mode 100644 index 0000000000..a0bb7fa888 --- /dev/null +++ b/target/linux/generic/backport-5.15/765-v5.15-net-dsa-mt7530-disable-learning-on-standalone-ports.patch @@ -0,0 +1,65 @@ +From ba2203f36b981235556504fb7b62baee28512a40 Mon Sep 17 00:00:00 2001 +From: DENG Qingfang <dqfext@gmail.com> +Date: Tue, 24 Aug 2021 11:37:50 +0800 +Subject: [PATCH] net: dsa: mt7530: disable learning on standalone ports + +This is a partial backport of commit 5a30833b9a16f8d1aa15de06636f9317ca51f9df +("net: dsa: mt7530: support MDB and bridge flag operations") upstream. + +Make sure that the standalone ports start up with learning disabled. + +Signed-off-by: DENG Qingfang <dqfext@gmail.com> +--- + drivers/net/dsa/mt7530.c | 16 ++++++++++++++-- + 1 file changed, 14 insertions(+), 2 deletions(-) + +--- a/drivers/net/dsa/mt7530.c ++++ b/drivers/net/dsa/mt7530.c +@@ -1163,6 +1163,8 @@ mt7530_port_bridge_join(struct dsa_switc + PCR_MATRIX_MASK, PCR_MATRIX(port_bitmap)); + priv->ports[port].pm |= PCR_MATRIX(port_bitmap); + ++ mt7530_clear(priv, MT7530_PSC_P(port), SA_DIS); ++ + mutex_unlock(&priv->reg_mutex); + + return 0; +@@ -1260,6 +1262,8 @@ mt7530_port_bridge_leave(struct dsa_swit + PCR_MATRIX(BIT(MT7530_CPU_PORT))); + priv->ports[port].pm = PCR_MATRIX(BIT(MT7530_CPU_PORT)); + ++ mt7530_set(priv, MT7530_PSC_P(port), SA_DIS); ++ + mutex_unlock(&priv->reg_mutex); + } + +@@ -1817,9 +1821,13 @@ mt7530_setup(struct dsa_switch *ds) + ret = mt753x_cpu_port_enable(ds, i); + if (ret) + return ret; +- } else ++ } else { + mt7530_port_disable(ds, i); + ++ /* Disable learning by default on all user ports */ ++ mt7530_set(priv, MT7530_PSC_P(i), SA_DIS); ++ } ++ + /* Enable consistent egress tag */ + mt7530_rmw(priv, MT7530_PVC_P(i), PVC_EG_TAG_MASK, + PVC_EG_TAG(MT7530_VLAN_EG_CONSISTENT)); +@@ -1979,9 +1987,13 @@ mt7531_setup(struct dsa_switch *ds) + ret = mt753x_cpu_port_enable(ds, i); + if (ret) + return ret; +- } else ++ } else { + mt7530_port_disable(ds, i); + ++ /* Disable learning by default on all user ports */ ++ mt7530_set(priv, MT7530_PSC_P(i), SA_DIS); ++ } ++ + /* Enable consistent egress tag */ + mt7530_rmw(priv, MT7530_PVC_P(i), PVC_EG_TAG_MASK, + PVC_EG_TAG(MT7530_VLAN_EG_CONSISTENT)); diff --git a/target/linux/generic/backport-5.15/766-v5.15-net-dsa-mt7530-enable-assisted-learning-on-CPU-port.patch b/target/linux/generic/backport-5.15/766-v5.15-net-dsa-mt7530-enable-assisted-learning-on-CPU-port.patch new file mode 100644 index 0000000000..f376ff949a --- /dev/null +++ b/target/linux/generic/backport-5.15/766-v5.15-net-dsa-mt7530-enable-assisted-learning-on-CPU-port.patch @@ -0,0 +1,102 @@ +From 59c8adbc8e2c7f6b46385f36962eadaad3ea2daa Mon Sep 17 00:00:00 2001 +From: DENG Qingfang <dqfext@gmail.com> +Date: Wed, 4 Aug 2021 00:04:01 +0800 +Subject: [PATCH] net: dsa: mt7530: enable assisted learning on CPU port + +Consider the following bridge configuration, where bond0 is not +offloaded: + + +-- br0 --+ + / / | \ + / / | \ + / | | bond0 + / | | / \ + swp0 swp1 swp2 swp3 swp4 + . . . + . . . + A B C + +Address learning is enabled on offloaded ports (swp0~2) and the CPU +port, so when client A sends a packet to C, the following will happen: + +1. The switch learns that client A can be reached at swp0. +2. The switch probably already knows that client C can be reached at the + CPU port, so it forwards the packet to the CPU. +3. The bridge core knows client C can be reached at bond0, so it + forwards the packet back to the switch. +4. The switch learns that client A can be reached at the CPU port. +5. The switch forwards the packet to either swp3 or swp4, according to + the packet's tag. + +That makes client A's MAC address flap between swp0 and the CPU port. If +client B sends a packet to A, it is possible that the packet is +forwarded to the CPU. With offload_fwd_mark = 1, the bridge core won't +forward it back to the switch, resulting in packet loss. + +As we have the assisted_learning_on_cpu_port in DSA core now, enable +that and disable hardware learning on the CPU port. + +Signed-off-by: DENG Qingfang <dqfext@gmail.com> +Reviewed-by: Vladimir Oltean <oltean@gmail.com> +Signed-off-by: David S. Miller <davem@davemloft.net> +--- + drivers/net/dsa/mt7530.c | 14 ++++++++------ + 1 file changed, 8 insertions(+), 6 deletions(-) + +--- a/drivers/net/dsa/mt7530.c ++++ b/drivers/net/dsa/mt7530.c +@@ -1747,6 +1747,7 @@ mt7530_setup(struct dsa_switch *ds) + */ + dn = dsa_to_port(ds, MT7530_CPU_PORT)->master->dev.of_node->parent; + ds->configure_vlan_while_not_filtering = true; ++ ds->assisted_learning_on_cpu_port = true; + ds->mtu_enforcement_ingress = true; + + if (priv->id == ID_MT7530) { +@@ -1817,15 +1818,15 @@ mt7530_setup(struct dsa_switch *ds) + mt7530_rmw(priv, MT7530_PCR_P(i), PCR_MATRIX_MASK, + PCR_MATRIX_CLR); + ++ /* Disable learning by default on all ports */ ++ mt7530_set(priv, MT7530_PSC_P(i), SA_DIS); ++ + if (dsa_is_cpu_port(ds, i)) { + ret = mt753x_cpu_port_enable(ds, i); + if (ret) + return ret; + } else { + mt7530_port_disable(ds, i); +- +- /* Disable learning by default on all user ports */ +- mt7530_set(priv, MT7530_PSC_P(i), SA_DIS); + } + + /* Enable consistent egress tag */ +@@ -1981,6 +1982,9 @@ mt7531_setup(struct dsa_switch *ds) + mt7530_rmw(priv, MT7530_PCR_P(i), PCR_MATRIX_MASK, + PCR_MATRIX_CLR); + ++ /* Disable learning by default on all ports */ ++ mt7530_set(priv, MT7530_PSC_P(i), SA_DIS); ++ + mt7530_set(priv, MT7531_DBG_CNT(i), MT7531_DIS_CLR); + + if (dsa_is_cpu_port(ds, i)) { +@@ -1989,9 +1993,6 @@ mt7531_setup(struct dsa_switch *ds) + return ret; + } else { + mt7530_port_disable(ds, i); +- +- /* Disable learning by default on all user ports */ +- mt7530_set(priv, MT7530_PSC_P(i), SA_DIS); + } + + /* Enable consistent egress tag */ +@@ -2000,6 +2001,7 @@ mt7531_setup(struct dsa_switch *ds) + } + + ds->configure_vlan_while_not_filtering = true; ++ ds->assisted_learning_on_cpu_port = true; + ds->mtu_enforcement_ingress = true; + + /* Flush the FDB table */ diff --git a/target/linux/generic/backport-5.15/767-v5.15-net-dsa-mt7530-use-independent-VLAN-learning-on-VLAN.patch b/target/linux/generic/backport-5.15/767-v5.15-net-dsa-mt7530-use-independent-VLAN-learning-on-VLAN.patch new file mode 100644 index 0000000000..f9fe0ef858 --- /dev/null +++ b/target/linux/generic/backport-5.15/767-v5.15-net-dsa-mt7530-use-independent-VLAN-learning-on-VLAN.patch @@ -0,0 +1,262 @@ +From e3a402764c5753698e7a9e45d4d21f093faa7852 Mon Sep 17 00:00:00 2001 +From: DENG Qingfang <dqfext@gmail.com> +Date: Wed, 4 Aug 2021 00:04:02 +0800 +Subject: [PATCH] net: dsa: mt7530: use independent VLAN learning on + VLAN-unaware bridges + +Consider the following bridge configuration, where bond0 is not +offloaded: + + +-- br0 --+ + / / | \ + / / | \ + / | | bond0 + / | | / \ + swp0 swp1 swp2 swp3 swp4 + . . . + . . . + A B C + +Ideally, when the switch receives a packet from swp3 or swp4, it should +forward the packet to the CPU, according to the port matrix and unknown +unicast flood settings. + +But packet loss will happen if the destination address is at one of the +offloaded ports (swp0~2). For example, when client C sends a packet to +A, the FDB lookup will indicate that it should be forwarded to swp0, but +the port matrix of swp3 and swp4 is configured to only allow the CPU to +be its destination, so it is dropped. + +However, this issue does not happen if the bridge is VLAN-aware. That is +because VLAN-aware bridges use independent VLAN learning, i.e. use VID +for FDB lookup, on offloaded ports. As swp3 and swp4 are not offloaded, +shared VLAN learning with default filter ID of 0 is used instead. So the +lookup for A with filter ID 0 never hits and the packet can be forwarded +to the CPU. + +In the current code, only two combinations were used to toggle user +ports' VLAN awareness: one is PCR.PORT_VLAN set to port matrix mode with +PVC.VLAN_ATTR set to transparent port, the other is PCR.PORT_VLAN set to +security mode with PVC.VLAN_ATTR set to user port. + +It turns out that only PVC.VLAN_ATTR contributes to VLAN awareness, and +port matrix mode just skips the VLAN table lookup. The reference manual +is somehow misleading when describing PORT_VLAN modes. It states that +PORT_MEM (VLAN port member) is used for destination if the VLAN table +lookup hits, but actually **PORT_MEM & PORT_MATRIX** (bitwise AND of +VLAN port member and port matrix) is used instead, which means we can +have two or more separate VLAN-aware bridges with the same PVID and +traffic won't leak between them. + +Therefore, to solve this, enable independent VLAN learning with PVID 0 +on VLAN-unaware bridges, by setting their PCR.PORT_VLAN to fallback +mode, while leaving standalone ports in port matrix mode. The CPU port +is always set to fallback mode to serve those bridges. + +During testing, it is found that FDB lookup with filter ID of 0 will +also hit entries with VID 0 even with independent VLAN learning. To +avoid that, install all VLANs with filter ID of 1. + +Signed-off-by: DENG Qingfang <dqfext@gmail.com> +Reviewed-by: Vladimir Oltean <olteanv@gmail.com> +Signed-off-by: David S. Miller <davem@davemloft.net> +--- + drivers/net/dsa/mt7530.c | 72 +++++++++++++++++++++++++++++----------- + drivers/net/dsa/mt7530.h | 9 ++++- + 2 files changed, 60 insertions(+), 21 deletions(-) + +--- a/drivers/net/dsa/mt7530.c ++++ b/drivers/net/dsa/mt7530.c +@@ -1011,6 +1011,10 @@ mt753x_cpu_port_enable(struct dsa_switch + mt7530_write(priv, MT7530_PCR_P(port), + PCR_MATRIX(dsa_user_ports(priv->ds))); + ++ /* Set to fallback mode for independent VLAN learning */ ++ mt7530_rmw(priv, MT7530_PCR_P(port), PCR_PORT_VLAN_MASK, ++ MT7530_PORT_FALLBACK_MODE); ++ + return 0; + } + +@@ -1165,6 +1169,10 @@ mt7530_port_bridge_join(struct dsa_switc + + mt7530_clear(priv, MT7530_PSC_P(port), SA_DIS); + ++ /* Set to fallback mode for independent VLAN learning */ ++ mt7530_rmw(priv, MT7530_PCR_P(port), PCR_PORT_VLAN_MASK, ++ MT7530_PORT_FALLBACK_MODE); ++ + mutex_unlock(&priv->reg_mutex); + + return 0; +@@ -1177,16 +1185,21 @@ mt7530_port_set_vlan_unaware(struct dsa_ + bool all_user_ports_removed = true; + int i; + +- /* When a port is removed from the bridge, the port would be set up +- * back to the default as is at initial boot which is a VLAN-unaware +- * port. ++ /* This is called after .port_bridge_leave when leaving a VLAN-aware ++ * bridge. Don't set standalone ports to fallback mode. + */ +- mt7530_rmw(priv, MT7530_PCR_P(port), PCR_PORT_VLAN_MASK, +- MT7530_PORT_MATRIX_MODE); ++ if (dsa_to_port(ds, port)->bridge_dev) ++ mt7530_rmw(priv, MT7530_PCR_P(port), PCR_PORT_VLAN_MASK, ++ MT7530_PORT_FALLBACK_MODE); ++ + mt7530_rmw(priv, MT7530_PVC_P(port), VLAN_ATTR_MASK | PVC_EG_TAG_MASK, + VLAN_ATTR(MT7530_VLAN_TRANSPARENT) | + PVC_EG_TAG(MT7530_VLAN_EG_CONSISTENT)); + ++ /* Set PVID to 0 */ ++ mt7530_rmw(priv, MT7530_PPBV1_P(port), G0_PORT_VID_MASK, ++ G0_PORT_VID_DEF); ++ + for (i = 0; i < MT7530_NUM_PORTS; i++) { + if (dsa_is_user_port(ds, i) && + dsa_port_is_vlan_filtering(dsa_to_port(ds, i))) { +@@ -1212,15 +1225,14 @@ mt7530_port_set_vlan_aware(struct dsa_sw + struct mt7530_priv *priv = ds->priv; + + /* Trapped into security mode allows packet forwarding through VLAN +- * table lookup. CPU port is set to fallback mode to let untagged +- * frames pass through. ++ * table lookup. + */ +- if (dsa_is_cpu_port(ds, port)) +- mt7530_rmw(priv, MT7530_PCR_P(port), PCR_PORT_VLAN_MASK, +- MT7530_PORT_FALLBACK_MODE); +- else ++ if (dsa_is_user_port(ds, port)) { + mt7530_rmw(priv, MT7530_PCR_P(port), PCR_PORT_VLAN_MASK, + MT7530_PORT_SECURITY_MODE); ++ mt7530_rmw(priv, MT7530_PPBV1_P(port), G0_PORT_VID_MASK, ++ G0_PORT_VID(priv->ports[port].pvid)); ++ } + + /* Set the port as a user port which is to be able to recognize VID + * from incoming packets before fetching entry within the VLAN table. +@@ -1264,6 +1276,13 @@ mt7530_port_bridge_leave(struct dsa_swit + + mt7530_set(priv, MT7530_PSC_P(port), SA_DIS); + ++ /* When a port is removed from the bridge, the port would be set up ++ * back to the default as is at initial boot which is a VLAN-unaware ++ * port. ++ */ ++ mt7530_rmw(priv, MT7530_PCR_P(port), PCR_PORT_VLAN_MASK, ++ MT7530_PORT_MATRIX_MODE); ++ + mutex_unlock(&priv->reg_mutex); + } + +@@ -1406,7 +1425,8 @@ mt7530_hw_vlan_add(struct mt7530_priv *p + /* Validate the entry with independent learning, create egress tag per + * VLAN and joining the port as one of the port members. + */ +- val = IVL_MAC | VTAG_EN | PORT_MEM(new_members) | VLAN_VALID; ++ val = IVL_MAC | VTAG_EN | PORT_MEM(new_members) | FID(FID_BRIDGED) | ++ VLAN_VALID; + mt7530_write(priv, MT7530_VAWD1, val); + + /* Decide whether adding tag or not for those outgoing packets from the +@@ -1499,9 +1519,13 @@ mt7530_port_vlan_add(struct dsa_switch * + } + + if (pvid) { +- mt7530_rmw(priv, MT7530_PPBV1_P(port), G0_PORT_VID_MASK, +- G0_PORT_VID(vlan->vid_end)); + priv->ports[port].pvid = vlan->vid_end; ++ ++ /* Only configure PVID if VLAN filtering is enabled */ ++ if (dsa_port_is_vlan_filtering(dsa_to_port(ds, port))) ++ mt7530_rmw(priv, MT7530_PPBV1_P(port), ++ G0_PORT_VID_MASK, ++ G0_PORT_VID(vlan->vid_end)); + } + + mutex_unlock(&priv->reg_mutex); +@@ -1513,11 +1537,10 @@ mt7530_port_vlan_del(struct dsa_switch * + { + struct mt7530_hw_vlan_entry target_entry; + struct mt7530_priv *priv = ds->priv; +- u16 vid, pvid; ++ u16 vid; + + mutex_lock(&priv->reg_mutex); + +- pvid = priv->ports[port].pvid; + for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid) { + mt7530_hw_vlan_entry_init(&target_entry, port, 0); + mt7530_hw_vlan_update(priv, vid, &target_entry, +@@ -1526,12 +1549,13 @@ mt7530_port_vlan_del(struct dsa_switch * + /* PVID is being restored to the default whenever the PVID port + * is being removed from the VLAN. + */ +- if (pvid == vid) +- pvid = G0_PORT_VID_DEF; ++ if (priv->ports[port].pvid == vid) { ++ priv->ports[port].pvid = G0_PORT_VID_DEF; ++ mt7530_rmw(priv, MT7530_PPBV1_P(port), G0_PORT_VID_MASK, ++ G0_PORT_VID_DEF); ++ } + } + +- mt7530_rmw(priv, MT7530_PPBV1_P(port), G0_PORT_VID_MASK, pvid); +- priv->ports[port].pvid = pvid; + + mutex_unlock(&priv->reg_mutex); + +@@ -1827,6 +1851,10 @@ mt7530_setup(struct dsa_switch *ds) + return ret; + } else { + mt7530_port_disable(ds, i); ++ ++ /* Set default PVID to 0 on all user ports */ ++ mt7530_rmw(priv, MT7530_PPBV1_P(i), G0_PORT_VID_MASK, ++ G0_PORT_VID_DEF); + } + + /* Enable consistent egress tag */ +@@ -1993,6 +2021,10 @@ mt7531_setup(struct dsa_switch *ds) + return ret; + } else { + mt7530_port_disable(ds, i); ++ ++ /* Set default PVID to 0 on all user ports */ ++ mt7530_rmw(priv, MT7530_PPBV1_P(i), G0_PORT_VID_MASK, ++ G0_PORT_VID_DEF); + } + + /* Enable consistent egress tag */ +--- a/drivers/net/dsa/mt7530.h ++++ b/drivers/net/dsa/mt7530.h +@@ -145,11 +145,18 @@ enum mt7530_vlan_cmd { + #define VTAG_EN BIT(28) + /* VLAN Member Control */ + #define PORT_MEM(x) (((x) & 0xff) << 16) ++/* Filter ID */ ++#define FID(x) (((x) & 0x7) << 1) + /* VLAN Entry Valid */ + #define VLAN_VALID BIT(0) + #define PORT_MEM_SHFT 16 + #define PORT_MEM_MASK 0xff + ++enum mt7530_fid { ++ FID_STANDALONE = 0, ++ FID_BRIDGED = 1, ++}; ++ + #define MT7530_VAWD2 0x98 + /* Egress Tag Control */ + #define ETAG_CTRL_P(p, x) (((x) & 0x3) << ((p) << 1)) +@@ -244,7 +251,7 @@ enum mt7530_vlan_port_attr { + #define MT7530_PPBV1_P(x) (0x2014 + ((x) * 0x100)) + #define G0_PORT_VID(x) (((x) & 0xfff) << 0) + #define G0_PORT_VID_MASK G0_PORT_VID(0xfff) +-#define G0_PORT_VID_DEF G0_PORT_VID(1) ++#define G0_PORT_VID_DEF G0_PORT_VID(0) + + /* Register for port MAC control register */ + #define MT7530_PMCR_P(x) (0x3000 + ((x) * 0x100)) diff --git a/target/linux/generic/backport-5.15/768-v5.15-net-dsa-mt7530-set-STP-state-on-filter-ID-1.patch b/target/linux/generic/backport-5.15/768-v5.15-net-dsa-mt7530-set-STP-state-on-filter-ID-1.patch new file mode 100644 index 0000000000..2c1958bd43 --- /dev/null +++ b/target/linux/generic/backport-5.15/768-v5.15-net-dsa-mt7530-set-STP-state-on-filter-ID-1.patch @@ -0,0 +1,40 @@ +From c5ffcefcb40420528d04c63e7dfc88f2845c9831 Mon Sep 17 00:00:00 2001 +From: DENG Qingfang <dqfext@gmail.com> +Date: Wed, 4 Aug 2021 00:04:03 +0800 +Subject: [PATCH] net: dsa: mt7530: set STP state on filter ID 1 + +As filter ID 1 is the only one used for bridges, set STP state on it. + +Signed-off-by: DENG Qingfang <dqfext@gmail.com> +Reviewed-by: Vladimir Oltean <olteanv@gmail.com> +Signed-off-by: David S. Miller <davem@davemloft.net> +--- + drivers/net/dsa/mt7530.c | 3 ++- + drivers/net/dsa/mt7530.h | 4 ++-- + 2 files changed, 4 insertions(+), 3 deletions(-) + +--- a/drivers/net/dsa/mt7530.c ++++ b/drivers/net/dsa/mt7530.c +@@ -1131,7 +1131,8 @@ mt7530_stp_state_set(struct dsa_switch * + break; + } + +- mt7530_rmw(priv, MT7530_SSP_P(port), FID_PST_MASK, stp_state); ++ mt7530_rmw(priv, MT7530_SSP_P(port), FID_PST_MASK(FID_BRIDGED), ++ FID_PST(FID_BRIDGED, stp_state)); + } + + static int +--- a/drivers/net/dsa/mt7530.h ++++ b/drivers/net/dsa/mt7530.h +@@ -183,8 +183,8 @@ enum mt7530_vlan_egress_attr { + + /* Register for port STP state control */ + #define MT7530_SSP_P(x) (0x2000 + ((x) * 0x100)) +-#define FID_PST(x) ((x) & 0x3) +-#define FID_PST_MASK FID_PST(0x3) ++#define FID_PST(fid, state) (((state) & 0x3) << ((fid) * 2)) ++#define FID_PST_MASK(fid) FID_PST(fid, 0x3) + + enum mt7530_stp_state { + MT7530_STP_DISABLED = 0, diff --git a/target/linux/generic/backport-5.15/769-v5.15-net-dsa-mt7530-always-install-FDB-entries-with-IVL-a.patch b/target/linux/generic/backport-5.15/769-v5.15-net-dsa-mt7530-always-install-FDB-entries-with-IVL-a.patch new file mode 100644 index 0000000000..97824068f0 --- /dev/null +++ b/target/linux/generic/backport-5.15/769-v5.15-net-dsa-mt7530-always-install-FDB-entries-with-IVL-a.patch @@ -0,0 +1,54 @@ +From 138c126a33f7564edb66b1da5b847e4a60740bfc Mon Sep 17 00:00:00 2001 +From: DENG Qingfang <dqfext@gmail.com> +Date: Wed, 4 Aug 2021 00:04:04 +0800 +Subject: [PATCH] net: dsa: mt7530: always install FDB entries with IVL and FID + 1 + +This reverts commit 7e777021780e ("mt7530 mt7530_fdb_write only set ivl +bit vid larger than 1"). + +Before this series, the default value of all ports' PVID is 1, which is +copied into the FDB entry, even if the ports are VLAN unaware. So +`bridge fdb show` will show entries like `dev swp0 vlan 1 self` even on +a VLAN-unaware bridge. + +The blamed commit does not solve that issue completely, instead it may +cause a new issue that FDB is inaccessible in a VLAN-aware bridge with +PVID 1. + +This series sets PVID to 0 on VLAN-unaware ports, so `bridge fdb show` +will no longer print `vlan 1` on VLAN-unaware bridges, and that special +case in fdb_write is not required anymore. + +Set FDB entries' filter ID to 1 to match the VLAN table. + +Signed-off-by: DENG Qingfang <dqfext@gmail.com> +Reviewed-by: Vladimir Oltean <olteanv@gmail.com> +Signed-off-by: David S. Miller <davem@davemloft.net> +--- + drivers/net/dsa/mt7530.c | 2 ++ + drivers/net/dsa/mt7530.h | 2 ++ + 2 files changed, 4 insertions(+) + +--- a/drivers/net/dsa/mt7530.c ++++ b/drivers/net/dsa/mt7530.c +@@ -361,6 +361,8 @@ mt7530_fdb_write(struct mt7530_priv *pri + int i; + + reg[1] |= vid & CVID_MASK; ++ reg[1] |= ATA2_IVL; ++ reg[1] |= ATA2_FID(FID_BRIDGED); + reg[2] |= (aging & AGE_TIMER_MASK) << AGE_TIMER; + reg[2] |= (port_mask & PORT_MAP_MASK) << PORT_MAP; + /* STATIC_ENT indicate that entry is static wouldn't +--- a/drivers/net/dsa/mt7530.h ++++ b/drivers/net/dsa/mt7530.h +@@ -77,6 +77,8 @@ enum mt753x_bpdu_port_fw { + #define STATIC_EMP 0 + #define STATIC_ENT 3 + #define MT7530_ATA2 0x78 ++#define ATA2_IVL BIT(15) ++#define ATA2_FID(x) (((x) & 0x7) << 12) + + /* Register for address table write data */ + #define MT7530_ATWD 0x7c diff --git a/target/linux/generic/backport-5.15/770-v5.15-net-dsa-mt7530-support-MDB-operations.patch b/target/linux/generic/backport-5.15/770-v5.15-net-dsa-mt7530-support-MDB-operations.patch new file mode 100644 index 0000000000..5d91d5a657 --- /dev/null +++ b/target/linux/generic/backport-5.15/770-v5.15-net-dsa-mt7530-support-MDB-operations.patch @@ -0,0 +1,171 @@ +From 1f11a07a33bc26997c18b633d63f088bf75d11f2 Mon Sep 17 00:00:00 2001 +From: DENG Qingfang <dqfext@gmail.com> +Date: Tue, 24 Aug 2021 11:37:50 +0800 +Subject: [PATCH] net: dsa: mt7530: support MDB operations + +This is a partial backport of commit 5a30833b9a16f8d1aa15de06636f9317ca51f9df +("net: dsa: mt7530: support MDB and bridge flag operations") upstream. + +Signed-off-by: DENG Qingfang <dqfext@gmail.com> +--- + drivers/net/dsa/mt7530.c | 78 ++++++++++++++++++++++++++++++++++++++-- + net/dsa/tag_mtk.c | 14 +------- + 2 files changed, 76 insertions(+), 16 deletions(-) + +--- a/drivers/net/dsa/mt7530.c ++++ b/drivers/net/dsa/mt7530.c +@@ -1000,9 +1000,6 @@ mt753x_cpu_port_enable(struct dsa_switch + mt7530_write(priv, MT7530_PVC_P(port), + PORT_SPEC_TAG); + +- /* Unknown multicast frame forwarding to the cpu port */ +- mt7530_rmw(priv, MT7530_MFC, UNM_FFP_MASK, UNM_FFP(BIT(port))); +- + /* Set CPU port number */ + if (priv->id == ID_MT7621) + mt7530_rmw(priv, MT7530_MFC, CPU_MASK, CPU_EN | CPU_PORT(port)); +@@ -1138,6 +1135,20 @@ mt7530_stp_state_set(struct dsa_switch * + } + + static int ++mt7530_port_egress_floods(struct dsa_switch *ds, int port, ++ bool unicast, bool multicast) ++{ ++ struct mt7530_priv *priv = ds->priv; ++ ++ mt7530_rmw(priv, MT7530_MFC, ++ UNU_FFP(BIT(port)) | UNM_FFP(BIT(port)), ++ (unicast ? UNU_FFP(BIT(port)) : 0) | ++ (multicast ? UNM_FFP(BIT(port)) : 0)); ++ ++ return 0; ++} ++ ++static int + mt7530_port_bridge_join(struct dsa_switch *ds, int port, + struct net_device *bridge) + { +@@ -1357,6 +1368,63 @@ err: + } + + static int ++mt7530_port_mdb_prepare(struct dsa_switch *ds, int port, ++ const struct switchdev_obj_port_mdb *mdb) ++{ ++ return 0; ++} ++ ++static void ++mt7530_port_mdb_add(struct dsa_switch *ds, int port, ++ const struct switchdev_obj_port_mdb *mdb) ++{ ++ struct mt7530_priv *priv = ds->priv; ++ const u8 *addr = mdb->addr; ++ u16 vid = mdb->vid; ++ u8 port_mask = 0; ++ ++ mutex_lock(&priv->reg_mutex); ++ ++ mt7530_fdb_write(priv, vid, 0, addr, 0, STATIC_EMP); ++ if (!mt7530_fdb_cmd(priv, MT7530_FDB_READ, NULL)) ++ port_mask = (mt7530_read(priv, MT7530_ATRD) >> PORT_MAP) ++ & PORT_MAP_MASK; ++ ++ port_mask |= BIT(port); ++ mt7530_fdb_write(priv, vid, port_mask, addr, -1, STATIC_ENT); ++ mt7530_fdb_cmd(priv, MT7530_FDB_WRITE, NULL); ++ ++ mutex_unlock(&priv->reg_mutex); ++} ++ ++static int ++mt7530_port_mdb_del(struct dsa_switch *ds, int port, ++ const struct switchdev_obj_port_mdb *mdb) ++{ ++ struct mt7530_priv *priv = ds->priv; ++ const u8 *addr = mdb->addr; ++ u16 vid = mdb->vid; ++ u8 port_mask = 0; ++ int ret; ++ ++ mutex_lock(&priv->reg_mutex); ++ ++ mt7530_fdb_write(priv, vid, 0, addr, 0, STATIC_EMP); ++ if (!mt7530_fdb_cmd(priv, MT7530_FDB_READ, NULL)) ++ port_mask = (mt7530_read(priv, MT7530_ATRD) >> PORT_MAP) ++ & PORT_MAP_MASK; ++ ++ port_mask &= ~BIT(port); ++ mt7530_fdb_write(priv, vid, port_mask, addr, -1, ++ port_mask ? STATIC_ENT : STATIC_EMP); ++ ret = mt7530_fdb_cmd(priv, MT7530_FDB_WRITE, NULL); ++ ++ mutex_unlock(&priv->reg_mutex); ++ ++ return ret; ++} ++ ++static int + mt7530_vlan_cmd(struct mt7530_priv *priv, enum mt7530_vlan_cmd cmd, u16 vid) + { + struct mt7530_dummy_poll p; +@@ -2794,11 +2862,15 @@ static const struct dsa_switch_ops mt753 + .port_change_mtu = mt7530_port_change_mtu, + .port_max_mtu = mt7530_port_max_mtu, + .port_stp_state_set = mt7530_stp_state_set, ++ .port_egress_floods = mt7530_port_egress_floods, + .port_bridge_join = mt7530_port_bridge_join, + .port_bridge_leave = mt7530_port_bridge_leave, + .port_fdb_add = mt7530_port_fdb_add, + .port_fdb_del = mt7530_port_fdb_del, + .port_fdb_dump = mt7530_port_fdb_dump, ++ .port_mdb_prepare = mt7530_port_mdb_prepare, ++ .port_mdb_add = mt7530_port_mdb_add, ++ .port_mdb_del = mt7530_port_mdb_del, + .port_vlan_filtering = mt7530_port_vlan_filtering, + .port_vlan_prepare = mt7530_port_vlan_prepare, + .port_vlan_add = mt7530_port_vlan_add, +--- a/net/dsa/tag_mtk.c ++++ b/net/dsa/tag_mtk.c +@@ -24,9 +24,6 @@ static struct sk_buff *mtk_tag_xmit(stru + struct dsa_port *dp = dsa_slave_to_port(dev); + u8 xmit_tpid; + u8 *mtk_tag; +- unsigned char *dest = eth_hdr(skb)->h_dest; +- bool is_multicast_skb = is_multicast_ether_addr(dest) && +- !is_broadcast_ether_addr(dest); + + /* Build the special tag after the MAC Source Address. If VLAN header + * is present, it's required that VLAN header and special tag is +@@ -55,10 +52,6 @@ static struct sk_buff *mtk_tag_xmit(stru + mtk_tag[0] = xmit_tpid; + mtk_tag[1] = (1 << dp->index) & MTK_HDR_XMIT_DP_BIT_MASK; + +- /* Disable SA learning for multicast frames */ +- if (unlikely(is_multicast_skb)) +- mtk_tag[1] |= MTK_HDR_XMIT_SA_DIS; +- + /* Tag control information is kept for 802.1Q */ + if (xmit_tpid == MTK_HDR_XMIT_UNTAGGED) { + mtk_tag[2] = 0; +@@ -74,9 +67,6 @@ static struct sk_buff *mtk_tag_rcv(struc + u16 hdr; + int port; + __be16 *phdr; +- unsigned char *dest = eth_hdr(skb)->h_dest; +- bool is_multicast_skb = is_multicast_ether_addr(dest) && +- !is_broadcast_ether_addr(dest); + + if (unlikely(!pskb_may_pull(skb, MTK_HDR_LEN))) + return NULL; +@@ -102,9 +92,7 @@ static struct sk_buff *mtk_tag_rcv(struc + if (!skb->dev) + return NULL; + +- /* Only unicast or broadcast frames are offloaded */ +- if (likely(!is_multicast_skb)) +- dsa_default_offload_fwd_mark(skb); ++ dsa_default_offload_fwd_mark(skb); + + return skb; + } diff --git a/target/linux/generic/backport-5.15/771-v5.14-net-phy-add-MediaTek-Gigabit-Ethernet-PHY-driver.patch b/target/linux/generic/backport-5.15/771-v5.14-net-phy-add-MediaTek-Gigabit-Ethernet-PHY-driver.patch new file mode 100644 index 0000000000..67e3ca91ed --- /dev/null +++ b/target/linux/generic/backport-5.15/771-v5.14-net-phy-add-MediaTek-Gigabit-Ethernet-PHY-driver.patch @@ -0,0 +1,159 @@ +From e40d2cca01893c1941f5959b14bb0cd0d4f4d099 Mon Sep 17 00:00:00 2001 +From: DENG Qingfang <dqfext@gmail.com> +Date: Wed, 19 May 2021 11:31:59 +0800 +Subject: [PATCH] net: phy: add MediaTek Gigabit Ethernet PHY driver + +Add support for MediaTek Gigabit Ethernet PHYs found in MT7530 and +MT7531 switches. +The initialization procedure is from the vendor driver, but due to lack +of documentation, the function of some register values remains unknown. + +Signed-off-by: DENG Qingfang <dqfext@gmail.com> +Signed-off-by: David S. Miller <davem@davemloft.net> +--- + drivers/net/phy/Kconfig | 5 ++ + drivers/net/phy/Makefile | 1 + + drivers/net/phy/mediatek-ge.c | 116 ++++++++++++++++++++++++++++++++++ + 3 files changed, 122 insertions(+) + create mode 100644 drivers/net/phy/mediatek-ge.c + +--- a/drivers/net/phy/Kconfig ++++ b/drivers/net/phy/Kconfig +@@ -201,6 +201,11 @@ config MARVELL_10G_PHY + help + Support for the Marvell Alaska MV88X3310 and compatible PHYs. + ++config MEDIATEK_GE_PHY ++ tristate "MediaTek PHYs" ++ help ++ Supports the MediaTek switch integrated PHYs. ++ + config MICREL_PHY + tristate "Micrel PHYs" + help +--- a/drivers/net/phy/Makefile ++++ b/drivers/net/phy/Makefile +@@ -63,6 +63,7 @@ obj-$(CONFIG_LSI_ET1011C_PHY) += et1011c + obj-$(CONFIG_LXT_PHY) += lxt.o + obj-$(CONFIG_MARVELL_10G_PHY) += marvell10g.o + obj-$(CONFIG_MARVELL_PHY) += marvell.o ++obj-$(CONFIG_MEDIATEK_GE_PHY) += mediatek-ge.o + obj-$(CONFIG_MESON_GXL_PHY) += meson-gxl.o + obj-$(CONFIG_MICREL_KS8995MA) += spi_ks8995.o + obj-$(CONFIG_MICREL_PHY) += micrel.o +--- /dev/null ++++ b/drivers/net/phy/mediatek-ge.c +@@ -0,0 +1,113 @@ ++// SPDX-License-Identifier: GPL-2.0+ ++#include <linux/bitfield.h> ++#include <linux/module.h> ++#include <linux/phy.h> ++ ++#define MTK_T10_TEST_CONTROL 0x145 ++#define MTK_PHY_TP_MASK GENMASK(4, 3) ++#define MTK_PHY_TP_AUTO 0 ++#define MTK_PHY_TP_MDI 2 ++#define MTK_PHY_TP_MDIX 3 ++ ++#define MTK_EXT_PAGE_ACCESS 0x1f ++#define MTK_PHY_PAGE_STANDARD 0x0000 ++#define MTK_PHY_PAGE_EXTENDED 0x0001 ++#define MTK_PHY_PAGE_EXTENDED_2 0x0002 ++#define MTK_PHY_PAGE_EXTENDED_3 0x0003 ++#define MTK_PHY_PAGE_EXTENDED_2A30 0x2a30 ++#define MTK_PHY_PAGE_EXTENDED_52B5 0x52b5 ++ ++static int mtk_gephy_read_page(struct phy_device *phydev) ++{ ++ return __phy_read(phydev, MTK_EXT_PAGE_ACCESS); ++} ++ ++static int mtk_gephy_write_page(struct phy_device *phydev, int page) ++{ ++ return __phy_write(phydev, MTK_EXT_PAGE_ACCESS, page); ++} ++ ++static void mtk_gephy_config_init(struct phy_device *phydev) ++{ ++ /* Disable EEE */ ++ phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_EEE_ADV, 0); ++ ++ /* Enable HW auto downshift */ ++ phy_modify_paged(phydev, MTK_PHY_PAGE_EXTENDED, 0x14, 0, BIT(4)); ++ ++ /* Increase SlvDPSready time */ ++ phy_select_page(phydev, MTK_PHY_PAGE_EXTENDED_52B5); ++ __phy_write(phydev, 0x10, 0xafae); ++ __phy_write(phydev, 0x12, 0x2f); ++ __phy_write(phydev, 0x10, 0x8fae); ++ phy_restore_page(phydev, MTK_PHY_PAGE_STANDARD, 0); ++ ++ /* Adjust 100_mse_threshold */ ++ phy_write_mmd(phydev, MDIO_MMD_VEND1, 0x123, 0xffff); ++ ++ /* Disable mcc */ ++ phy_write_mmd(phydev, MDIO_MMD_VEND1, 0xa6, 0x300); ++} ++ ++static int mt7530_phy_config_init(struct phy_device *phydev) ++{ ++ mtk_gephy_config_init(phydev); ++ ++ /* Increase post_update_timer */ ++ phy_write_paged(phydev, MTK_PHY_PAGE_EXTENDED_3, 0x11, 0x4b); ++ ++ return 0; ++} ++ ++static int mt7531_phy_config_init(struct phy_device *phydev) ++{ ++ mtk_gephy_config_init(phydev); ++ ++ /* PHY link down power saving enable */ ++ phy_set_bits(phydev, 0x17, BIT(4)); ++ phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, 0xc6, 0x300); ++ ++ /* Set TX Pair delay selection */ ++ phy_write_mmd(phydev, MDIO_MMD_VEND1, 0x13, 0x404); ++ phy_write_mmd(phydev, MDIO_MMD_VEND1, 0x14, 0x404); ++ ++ return 0; ++} ++ ++static struct phy_driver mtk_gephy_driver[] = { ++ { ++ PHY_ID_MATCH_EXACT(0x03a29412), ++ .name = "MediaTek MT7530 PHY", ++ .config_init = mt7530_phy_config_init, ++ /* Interrupts are handled by the switch, not the PHY ++ * itself. ++ */ ++ .config_intr = genphy_no_config_intr, ++ .ack_interrupt = genphy_no_ack_interrupt, ++ .suspend = genphy_suspend, ++ .resume = genphy_resume, ++ .read_page = mtk_gephy_read_page, ++ .write_page = mtk_gephy_write_page, ++ }, ++ { ++ PHY_ID_MATCH_EXACT(0x03a29441), ++ .name = "MediaTek MT7531 PHY", ++ .config_init = mt7531_phy_config_init, ++ /* Interrupts are handled by the switch, not the PHY ++ * itself. ++ */ ++ .config_intr = genphy_no_config_intr, ++ .ack_interrupt = genphy_no_ack_interrupt, ++ .suspend = genphy_suspend, ++ .resume = genphy_resume, ++ .read_page = mtk_gephy_read_page, ++ .write_page = mtk_gephy_write_page, ++ }, ++}; ++ ++module_phy_driver(mtk_gephy_driver); ++ ++static struct mdio_device_id __maybe_unused mtk_gephy_tbl[] = { ++ { PHY_ID_MATCH_VENDOR(0x03a29400) }, ++ { } ++}; diff --git a/target/linux/generic/backport-5.15/772-v5.14-net-dsa-mt7530-add-interrupt-support.patch b/target/linux/generic/backport-5.15/772-v5.14-net-dsa-mt7530-add-interrupt-support.patch new file mode 100644 index 0000000000..9890015301 --- /dev/null +++ b/target/linux/generic/backport-5.15/772-v5.14-net-dsa-mt7530-add-interrupt-support.patch @@ -0,0 +1,425 @@ +From ba751e28d44255744a30190faad0ca09b455c44d Mon Sep 17 00:00:00 2001 +From: DENG Qingfang <dqfext@gmail.com> +Date: Wed, 19 May 2021 11:32:00 +0800 +Subject: [PATCH] net: dsa: mt7530: add interrupt support + +Add support for MT7530 interrupt controller to handle internal PHYs. +In order to assign an IRQ number to each PHY, the registration of MDIO bus +is also done in this driver. + +Signed-off-by: DENG Qingfang <dqfext@gmail.com> +Reviewed-by: Andrew Lunn <andrew@lunn.ch> +Reviewed-by: Florian Fainelli <f.fainelli@gmail.com> +Reviewed-by: Vladimir Oltean <olteanv@gmail.com> +Signed-off-by: David S. Miller <davem@davemloft.net> +--- + drivers/net/dsa/mt7530.c | 264 +++++++++++++++++++++++++++++++++++---- + drivers/net/dsa/mt7530.h | 20 ++- + 2 files changed, 256 insertions(+), 28 deletions(-) + +--- a/drivers/net/dsa/mt7530.c ++++ b/drivers/net/dsa/mt7530.c +@@ -10,6 +10,7 @@ + #include <linux/mfd/syscon.h> + #include <linux/module.h> + #include <linux/netdevice.h> ++#include <linux/of_irq.h> + #include <linux/of_mdio.h> + #include <linux/of_net.h> + #include <linux/of_platform.h> +@@ -602,18 +603,14 @@ mt7530_mib_reset(struct dsa_switch *ds) + mt7530_write(priv, MT7530_MIB_CCR, CCR_MIB_ACTIVATE); + } + +-static int mt7530_phy_read(struct dsa_switch *ds, int port, int regnum) ++static int mt7530_phy_read(struct mt7530_priv *priv, int port, int regnum) + { +- struct mt7530_priv *priv = ds->priv; +- + return mdiobus_read_nested(priv->bus, port, regnum); + } + +-static int mt7530_phy_write(struct dsa_switch *ds, int port, int regnum, ++static int mt7530_phy_write(struct mt7530_priv *priv, int port, int regnum, + u16 val) + { +- struct mt7530_priv *priv = ds->priv; +- + return mdiobus_write_nested(priv->bus, port, regnum, val); + } + +@@ -791,9 +788,8 @@ out: + } + + static int +-mt7531_ind_phy_read(struct dsa_switch *ds, int port, int regnum) ++mt7531_ind_phy_read(struct mt7530_priv *priv, int port, int regnum) + { +- struct mt7530_priv *priv = ds->priv; + int devad; + int ret; + +@@ -809,10 +805,9 @@ mt7531_ind_phy_read(struct dsa_switch *d + } + + static int +-mt7531_ind_phy_write(struct dsa_switch *ds, int port, int regnum, ++mt7531_ind_phy_write(struct mt7530_priv *priv, int port, int regnum, + u16 data) + { +- struct mt7530_priv *priv = ds->priv; + int devad; + int ret; + +@@ -828,6 +823,22 @@ mt7531_ind_phy_write(struct dsa_switch * + return ret; + } + ++static int ++mt753x_phy_read(struct mii_bus *bus, int port, int regnum) ++{ ++ struct mt7530_priv *priv = bus->priv; ++ ++ return priv->info->phy_read(priv, port, regnum); ++} ++ ++static int ++mt753x_phy_write(struct mii_bus *bus, int port, int regnum, u16 val) ++{ ++ struct mt7530_priv *priv = bus->priv; ++ ++ return priv->info->phy_write(priv, port, regnum, val); ++} ++ + static void + mt7530_get_strings(struct dsa_switch *ds, int port, u32 stringset, + uint8_t *data) +@@ -1824,6 +1835,210 @@ mt7530_setup_gpio(struct mt7530_priv *pr + return devm_gpiochip_add_data(dev, gc, priv); + } + ++static irqreturn_t ++mt7530_irq_thread_fn(int irq, void *dev_id) ++{ ++ struct mt7530_priv *priv = dev_id; ++ bool handled = false; ++ u32 val; ++ int p; ++ ++ mutex_lock_nested(&priv->bus->mdio_lock, MDIO_MUTEX_NESTED); ++ val = mt7530_mii_read(priv, MT7530_SYS_INT_STS); ++ mt7530_mii_write(priv, MT7530_SYS_INT_STS, val); ++ mutex_unlock(&priv->bus->mdio_lock); ++ ++ for (p = 0; p < MT7530_NUM_PHYS; p++) { ++ if (BIT(p) & val) { ++ unsigned int irq; ++ ++ irq = irq_find_mapping(priv->irq_domain, p); ++ handle_nested_irq(irq); ++ handled = true; ++ } ++ } ++ ++ return IRQ_RETVAL(handled); ++} ++ ++static void ++mt7530_irq_mask(struct irq_data *d) ++{ ++ struct mt7530_priv *priv = irq_data_get_irq_chip_data(d); ++ ++ priv->irq_enable &= ~BIT(d->hwirq); ++} ++ ++static void ++mt7530_irq_unmask(struct irq_data *d) ++{ ++ struct mt7530_priv *priv = irq_data_get_irq_chip_data(d); ++ ++ priv->irq_enable |= BIT(d->hwirq); ++} ++ ++static void ++mt7530_irq_bus_lock(struct irq_data *d) ++{ ++ struct mt7530_priv *priv = irq_data_get_irq_chip_data(d); ++ ++ mutex_lock_nested(&priv->bus->mdio_lock, MDIO_MUTEX_NESTED); ++} ++ ++static void ++mt7530_irq_bus_sync_unlock(struct irq_data *d) ++{ ++ struct mt7530_priv *priv = irq_data_get_irq_chip_data(d); ++ ++ mt7530_mii_write(priv, MT7530_SYS_INT_EN, priv->irq_enable); ++ mutex_unlock(&priv->bus->mdio_lock); ++} ++ ++static struct irq_chip mt7530_irq_chip = { ++ .name = KBUILD_MODNAME, ++ .irq_mask = mt7530_irq_mask, ++ .irq_unmask = mt7530_irq_unmask, ++ .irq_bus_lock = mt7530_irq_bus_lock, ++ .irq_bus_sync_unlock = mt7530_irq_bus_sync_unlock, ++}; ++ ++static int ++mt7530_irq_map(struct irq_domain *domain, unsigned int irq, ++ irq_hw_number_t hwirq) ++{ ++ irq_set_chip_data(irq, domain->host_data); ++ irq_set_chip_and_handler(irq, &mt7530_irq_chip, handle_simple_irq); ++ irq_set_nested_thread(irq, true); ++ irq_set_noprobe(irq); ++ ++ return 0; ++} ++ ++static const struct irq_domain_ops mt7530_irq_domain_ops = { ++ .map = mt7530_irq_map, ++ .xlate = irq_domain_xlate_onecell, ++}; ++ ++static void ++mt7530_setup_mdio_irq(struct mt7530_priv *priv) ++{ ++ struct dsa_switch *ds = priv->ds; ++ int p; ++ ++ for (p = 0; p < MT7530_NUM_PHYS; p++) { ++ if (BIT(p) & ds->phys_mii_mask) { ++ unsigned int irq; ++ ++ irq = irq_create_mapping(priv->irq_domain, p); ++ ds->slave_mii_bus->irq[p] = irq; ++ } ++ } ++} ++ ++static int ++mt7530_setup_irq(struct mt7530_priv *priv) ++{ ++ struct device *dev = priv->dev; ++ struct device_node *np = dev->of_node; ++ int ret; ++ ++ if (!of_property_read_bool(np, "interrupt-controller")) { ++ dev_info(dev, "no interrupt support\n"); ++ return 0; ++ } ++ ++ priv->irq = of_irq_get(np, 0); ++ if (priv->irq <= 0) { ++ dev_err(dev, "failed to get parent IRQ: %d\n", priv->irq); ++ return priv->irq ? : -EINVAL; ++ } ++ ++ priv->irq_domain = irq_domain_add_linear(np, MT7530_NUM_PHYS, ++ &mt7530_irq_domain_ops, priv); ++ if (!priv->irq_domain) { ++ dev_err(dev, "failed to create IRQ domain\n"); ++ return -ENOMEM; ++ } ++ ++ /* This register must be set for MT7530 to properly fire interrupts */ ++ if (priv->id != ID_MT7531) ++ mt7530_set(priv, MT7530_TOP_SIG_CTRL, TOP_SIG_CTRL_NORMAL); ++ ++ ret = request_threaded_irq(priv->irq, NULL, mt7530_irq_thread_fn, ++ IRQF_ONESHOT, KBUILD_MODNAME, priv); ++ if (ret) { ++ irq_domain_remove(priv->irq_domain); ++ dev_err(dev, "failed to request IRQ: %d\n", ret); ++ return ret; ++ } ++ ++ return 0; ++} ++ ++static void ++mt7530_free_mdio_irq(struct mt7530_priv *priv) ++{ ++ int p; ++ ++ for (p = 0; p < MT7530_NUM_PHYS; p++) { ++ if (BIT(p) & priv->ds->phys_mii_mask) { ++ unsigned int irq; ++ ++ irq = irq_find_mapping(priv->irq_domain, p); ++ irq_dispose_mapping(irq); ++ } ++ } ++} ++ ++static void ++mt7530_free_irq_common(struct mt7530_priv *priv) ++{ ++ free_irq(priv->irq, priv); ++ irq_domain_remove(priv->irq_domain); ++} ++ ++static void ++mt7530_free_irq(struct mt7530_priv *priv) ++{ ++ mt7530_free_mdio_irq(priv); ++ mt7530_free_irq_common(priv); ++} ++ ++static int ++mt7530_setup_mdio(struct mt7530_priv *priv) ++{ ++ struct dsa_switch *ds = priv->ds; ++ struct device *dev = priv->dev; ++ struct mii_bus *bus; ++ static int idx; ++ int ret; ++ ++ bus = devm_mdiobus_alloc(dev); ++ if (!bus) ++ return -ENOMEM; ++ ++ ds->slave_mii_bus = bus; ++ bus->priv = priv; ++ bus->name = KBUILD_MODNAME "-mii"; ++ snprintf(bus->id, MII_BUS_ID_SIZE, KBUILD_MODNAME "-%d", idx++); ++ bus->read = mt753x_phy_read; ++ bus->write = mt753x_phy_write; ++ bus->parent = dev; ++ bus->phy_mask = ~ds->phys_mii_mask; ++ ++ if (priv->irq) ++ mt7530_setup_mdio_irq(priv); ++ ++ ret = devm_mdiobus_register(dev, bus); ++ if (ret) { ++ dev_err(dev, "failed to register MDIO bus: %d\n", ret); ++ if (priv->irq) ++ mt7530_free_mdio_irq(priv); ++ } ++ ++ return ret; ++} ++ + static int + mt7530_setup(struct dsa_switch *ds) + { +@@ -2798,24 +3013,20 @@ static int + mt753x_setup(struct dsa_switch *ds) + { + struct mt7530_priv *priv = ds->priv; ++ int ret = priv->info->sw_setup(ds); + +- return priv->info->sw_setup(ds); +-} +- +-static int +-mt753x_phy_read(struct dsa_switch *ds, int port, int regnum) +-{ +- struct mt7530_priv *priv = ds->priv; ++ if (ret) ++ return ret; + +- return priv->info->phy_read(ds, port, regnum); +-} ++ ret = mt7530_setup_irq(priv); ++ if (ret) ++ return ret; + +-static int +-mt753x_phy_write(struct dsa_switch *ds, int port, int regnum, u16 val) +-{ +- struct mt7530_priv *priv = ds->priv; ++ ret = mt7530_setup_mdio(priv); ++ if (ret && priv->irq) ++ mt7530_free_irq_common(priv); + +- return priv->info->phy_write(ds, port, regnum, val); ++ return ret; + } + + static int mt753x_get_mac_eee(struct dsa_switch *ds, int port, +@@ -2852,8 +3063,6 @@ static const struct dsa_switch_ops mt753 + .get_tag_protocol = mtk_get_tag_protocol, + .setup = mt753x_setup, + .get_strings = mt7530_get_strings, +- .phy_read = mt753x_phy_read, +- .phy_write = mt753x_phy_write, + .get_ethtool_stats = mt7530_get_ethtool_stats, + .get_sset_count = mt7530_get_sset_count, + .set_ageing_time = mt7530_set_ageing_time, +@@ -3036,6 +3245,9 @@ mt7530_remove(struct mdio_device *mdiode + dev_err(priv->dev, "Failed to disable io pwr: %d\n", + ret); + ++ if (priv->irq) ++ mt7530_free_irq(priv); ++ + dsa_unregister_switch(priv->ds); + mutex_destroy(&priv->reg_mutex); + } +--- a/drivers/net/dsa/mt7530.h ++++ b/drivers/net/dsa/mt7530.h +@@ -7,6 +7,7 @@ + #define __MT7530_H + + #define MT7530_NUM_PORTS 7 ++#define MT7530_NUM_PHYS 5 + #define MT7530_CPU_PORT 6 + #define MT7530_NUM_FDB_RECORDS 2048 + #define MT7530_ALL_MEMBERS 0xff +@@ -401,6 +402,12 @@ enum mt7531_sgmii_force_duplex { + #define SYS_CTRL_SW_RST BIT(1) + #define SYS_CTRL_REG_RST BIT(0) + ++/* Register for system interrupt */ ++#define MT7530_SYS_INT_EN 0x7008 ++ ++/* Register for system interrupt status */ ++#define MT7530_SYS_INT_STS 0x700c ++ + /* Register for PHY Indirect Access Control */ + #define MT7531_PHY_IAC 0x701C + #define MT7531_PHY_ACS_ST BIT(31) +@@ -722,6 +729,8 @@ static const char *p5_intf_modes(unsigne + } + } + ++struct mt7530_priv; ++ + /* struct mt753x_info - This is the main data structure for holding the specific + * part for each supported device + * @sw_setup: Holding the handler to a device initialization +@@ -746,8 +755,8 @@ struct mt753x_info { + enum mt753x_id id; + + int (*sw_setup)(struct dsa_switch *ds); +- int (*phy_read)(struct dsa_switch *ds, int port, int regnum); +- int (*phy_write)(struct dsa_switch *ds, int port, int regnum, u16 val); ++ int (*phy_read)(struct mt7530_priv *priv, int port, int regnum); ++ int (*phy_write)(struct mt7530_priv *priv, int port, int regnum, u16 val); + int (*pad_setup)(struct dsa_switch *ds, phy_interface_t interface); + int (*cpu_port_config)(struct dsa_switch *ds, int port); + bool (*phy_mode_supported)(struct dsa_switch *ds, int port, +@@ -781,6 +790,10 @@ struct mt753x_info { + * registers + * @p6_interface Holding the current port 6 interface + * @p5_intf_sel: Holding the current port 5 interface select ++ * ++ * @irq: IRQ number of the switch ++ * @irq_domain: IRQ domain of the switch irq_chip ++ * @irq_enable: IRQ enable bits, synced to SYS_INT_EN + */ + struct mt7530_priv { + struct device *dev; +@@ -802,6 +815,9 @@ struct mt7530_priv { + struct mt7530_port ports[MT7530_NUM_PORTS]; + /* protect among processes for registers access*/ + struct mutex reg_mutex; ++ int irq; ++ struct irq_domain *irq_domain; ++ u32 irq_enable; + }; + + struct mt7530_hw_vlan_entry { diff --git a/target/linux/generic/backport-5.15/773-v5.18-1-net-dsa-Move-VLAN-filtering-syncing-out-of-dsa_switc.patch b/target/linux/generic/backport-5.15/773-v5.18-1-net-dsa-Move-VLAN-filtering-syncing-out-of-dsa_switc.patch new file mode 100644 index 0000000000..eb60134a1e --- /dev/null +++ b/target/linux/generic/backport-5.15/773-v5.18-1-net-dsa-Move-VLAN-filtering-syncing-out-of-dsa_switc.patch @@ -0,0 +1,83 @@ +From 7164a8cde4b42f76474088ccaf53f1e463d4e2f6 Mon Sep 17 00:00:00 2001 +From: Tobias Waldekranz <tobias@waldekranz.com> +Date: Mon, 24 Jan 2022 22:09:43 +0100 +Subject: [PATCH 5.10 1/2] net: dsa: Move VLAN filtering syncing out of + dsa_switch_bridge_leave +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +commit 381a730182f1d174e1950cd4e63e885b1c302051 upstream. + +Most of dsa_switch_bridge_leave was, in fact, dealing with the syncing +of VLAN filtering for switches on which that is a global +setting. Separate the two phases to prepare for the cross-chip related +bugfix in the following commit. + +Signed-off-by: Tobias Waldekranz <tobias@waldekranz.com> +Reviewed-by: Vladimir Oltean <olteanv@gmail.com> +Signed-off-by: David S. Miller <davem@davemloft.net> +Signed-off-by: Marek Behún <kabel@kernel.org> +--- + net/dsa/switch.c | 39 ++++++++++++++++++++++++++------------- + 1 file changed, 26 insertions(+), 13 deletions(-) + +--- a/net/dsa/switch.c ++++ b/net/dsa/switch.c +@@ -104,23 +104,12 @@ static int dsa_switch_bridge_join(struct + return 0; + } + +-static int dsa_switch_bridge_leave(struct dsa_switch *ds, +- struct dsa_notifier_bridge_info *info) ++static int dsa_switch_sync_vlan_filtering(struct dsa_switch *ds, ++ struct dsa_notifier_bridge_info *info) + { + bool unset_vlan_filtering = br_vlan_enabled(info->br); +- struct dsa_switch_tree *dst = ds->dst; + int err, i; + +- if (dst->index == info->tree_index && ds->index == info->sw_index && +- ds->ops->port_bridge_leave) +- ds->ops->port_bridge_leave(ds, info->port, info->br); +- +- if ((dst->index != info->tree_index || ds->index != info->sw_index) && +- ds->ops->crosschip_bridge_leave) +- ds->ops->crosschip_bridge_leave(ds, info->tree_index, +- info->sw_index, info->port, +- info->br); +- + /* If the bridge was vlan_filtering, the bridge core doesn't trigger an + * event for changing vlan_filtering setting upon slave ports leaving + * it. That is a good thing, because that lets us handle it and also +@@ -153,6 +142,30 @@ static int dsa_switch_bridge_leave(struc + if (err && err != EOPNOTSUPP) + return err; + } ++ ++ return 0; ++} ++ ++static int dsa_switch_bridge_leave(struct dsa_switch *ds, ++ struct dsa_notifier_bridge_info *info) ++{ ++ struct dsa_switch_tree *dst = ds->dst; ++ int err; ++ ++ if (dst->index == info->tree_index && ds->index == info->sw_index && ++ ds->ops->port_bridge_leave) ++ ds->ops->port_bridge_leave(ds, info->port, info->br); ++ ++ if ((dst->index != info->tree_index || ds->index != info->sw_index) && ++ ds->ops->crosschip_bridge_leave) ++ ds->ops->crosschip_bridge_leave(ds, info->tree_index, ++ info->sw_index, info->port, ++ info->br); ++ ++ err = dsa_switch_sync_vlan_filtering(ds, info); ++ if (err) ++ return err; ++ + return 0; + } + diff --git a/target/linux/generic/backport-5.15/773-v5.18-2-net-dsa-Avoid-cross-chip-syncing-of-VLAN-filtering.patch b/target/linux/generic/backport-5.15/773-v5.18-2-net-dsa-Avoid-cross-chip-syncing-of-VLAN-filtering.patch new file mode 100644 index 0000000000..0b36ef7cec --- /dev/null +++ b/target/linux/generic/backport-5.15/773-v5.18-2-net-dsa-Avoid-cross-chip-syncing-of-VLAN-filtering.patch @@ -0,0 +1,58 @@ +From 6948a6654ffc878fc0258b363da77e7fd775b2d9 Mon Sep 17 00:00:00 2001 +From: Tobias Waldekranz <tobias@waldekranz.com> +Date: Mon, 24 Jan 2022 22:09:44 +0100 +Subject: [PATCH 5.10 2/2] net: dsa: Avoid cross-chip syncing of VLAN filtering +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +commit 108dc8741c203e9d6ce4e973367f1bac20c7192b upstream. + +Changes to VLAN filtering are not applicable to cross-chip +notifications. + +On a system like this: + +.-----. .-----. .-----. +| sw1 +---+ sw2 +---+ sw3 | +'-1-2-' '-1-2-' '-1-2-' + +Before this change, upon sw1p1 leaving a bridge, a call to +dsa_port_vlan_filtering would also be made to sw2p1 and sw3p1. + +In this scenario: + +.---------. .-----. .-----. +| sw1 +---+ sw2 +---+ sw3 | +'-1-2-3-4-' '-1-2-' '-1-2-' + +When sw1p4 would leave a bridge, dsa_port_vlan_filtering would be +called for sw2 and sw3 with a non-existing port - leading to array +out-of-bounds accesses and crashes on mv88e6xxx. + +Fixes: d371b7c92d19 ("net: dsa: Unset vlan_filtering when ports leave the bridge") +Signed-off-by: Tobias Waldekranz <tobias@waldekranz.com> +Reviewed-by: Vladimir Oltean <olteanv@gmail.com> +Signed-off-by: David S. Miller <davem@davemloft.net> +Signed-off-by: Marek Behún <kabel@kernel.org> +--- + net/dsa/switch.c | 8 +++++--- + 1 file changed, 5 insertions(+), 3 deletions(-) + +--- a/net/dsa/switch.c ++++ b/net/dsa/switch.c +@@ -162,9 +162,11 @@ static int dsa_switch_bridge_leave(struc + info->sw_index, info->port, + info->br); + +- err = dsa_switch_sync_vlan_filtering(ds, info); +- if (err) +- return err; ++ if (dst->index == info->tree_index && ds->index == info->sw_index) { ++ err = dsa_switch_sync_vlan_filtering(ds, info); ++ if (err) ++ return err; ++ } + + return 0; + } diff --git a/target/linux/generic/backport-5.15/774-v5.15-1-igc-remove-_I_PHY_ID-checking.patch b/target/linux/generic/backport-5.15/774-v5.15-1-igc-remove-_I_PHY_ID-checking.patch new file mode 100644 index 0000000000..d7df0685dd --- /dev/null +++ b/target/linux/generic/backport-5.15/774-v5.15-1-igc-remove-_I_PHY_ID-checking.patch @@ -0,0 +1,73 @@ +From 7c496de538eebd8212dc2a3c9a468386b264d0d4 Mon Sep 17 00:00:00 2001 +From: Sasha Neftin <sasha.neftin@intel.com> +Date: Wed, 7 Jul 2021 08:14:40 +0300 +Subject: igc: Remove _I_PHY_ID checking + +i225 devices have only one PHY vendor. There is no point checking +_I_PHY_ID during the link establishment and auto-negotiation process. +This patch comes to clean up these pointless checkings. + +Signed-off-by: Sasha Neftin <sasha.neftin@intel.com> +Tested-by: Dvora Fuxbrumer <dvorax.fuxbrumer@linux.intel.com> +Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com> +--- + drivers/net/ethernet/intel/igc/igc_base.c | 10 +--------- + drivers/net/ethernet/intel/igc/igc_main.c | 3 +-- + drivers/net/ethernet/intel/igc/igc_phy.c | 6 ++---- + 3 files changed, 4 insertions(+), 15 deletions(-) + +(limited to 'drivers/net/ethernet/intel/igc') + +--- a/drivers/net/ethernet/intel/igc/igc_base.c ++++ b/drivers/net/ethernet/intel/igc/igc_base.c +@@ -187,15 +187,7 @@ static s32 igc_init_phy_params_base(stru + + igc_check_for_copper_link(hw); + +- /* Verify phy id and set remaining function pointers */ +- switch (phy->id) { +- case I225_I_PHY_ID: +- phy->type = igc_phy_i225; +- break; +- default: +- ret_val = -IGC_ERR_PHY; +- goto out; +- } ++ phy->type = igc_phy_i225; + + out: + return ret_val; +--- a/drivers/net/ethernet/intel/igc/igc_main.c ++++ b/drivers/net/ethernet/intel/igc/igc_main.c +@@ -4189,8 +4189,7 @@ bool igc_has_link(struct igc_adapter *ad + break; + } + +- if (hw->mac.type == igc_i225 && +- hw->phy.id == I225_I_PHY_ID) { ++ if (hw->mac.type == igc_i225) { + if (!netif_carrier_ok(adapter->netdev)) { + adapter->flags &= ~IGC_FLAG_NEED_LINK_UPDATE; + } else if (!(adapter->flags & IGC_FLAG_NEED_LINK_UPDATE)) { +--- a/drivers/net/ethernet/intel/igc/igc_phy.c ++++ b/drivers/net/ethernet/intel/igc/igc_phy.c +@@ -249,8 +249,7 @@ static s32 igc_phy_setup_autoneg(struct + return ret_val; + } + +- if ((phy->autoneg_mask & ADVERTISE_2500_FULL) && +- hw->phy.id == I225_I_PHY_ID) { ++ if (phy->autoneg_mask & ADVERTISE_2500_FULL) { + /* Read the MULTI GBT AN Control Register - reg 7.32 */ + ret_val = phy->ops.read_reg(hw, (STANDARD_AN_REG_MASK << + MMD_DEVADDR_SHIFT) | +@@ -390,8 +389,7 @@ static s32 igc_phy_setup_autoneg(struct + ret_val = phy->ops.write_reg(hw, PHY_1000T_CTRL, + mii_1000t_ctrl_reg); + +- if ((phy->autoneg_mask & ADVERTISE_2500_FULL) && +- hw->phy.id == I225_I_PHY_ID) ++ if (phy->autoneg_mask & ADVERTISE_2500_FULL) + ret_val = phy->ops.write_reg(hw, + (STANDARD_AN_REG_MASK << + MMD_DEVADDR_SHIFT) | diff --git a/target/linux/generic/backport-5.15/774-v5.15-2-igc-remove-phy-type-checking.patch b/target/linux/generic/backport-5.15/774-v5.15-2-igc-remove-phy-type-checking.patch new file mode 100644 index 0000000000..ad4d1bb0dc --- /dev/null +++ b/target/linux/generic/backport-5.15/774-v5.15-2-igc-remove-phy-type-checking.patch @@ -0,0 +1,43 @@ +From 47bca7de6a4fb8dcb564c7ca14d885c91ed19e03 Mon Sep 17 00:00:00 2001 +From: Sasha Neftin <sasha.neftin@intel.com> +Date: Sat, 10 Jul 2021 20:57:50 +0300 +Subject: igc: Remove phy->type checking + +i225 devices have only one phy->type: copper. There is no point checking +phy->type during the igc_has_link method from the watchdog that +invoked every 2 seconds. +This patch comes to clean up these pointless checkings. + +Signed-off-by: Sasha Neftin <sasha.neftin@intel.com> +Tested-by: Dvora Fuxbrumer <dvorax.fuxbrumer@linux.intel.com> +Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com> +--- + drivers/net/ethernet/intel/igc/igc_main.c | 15 ++++----------- + 1 file changed, 4 insertions(+), 11 deletions(-) + +(limited to 'drivers/net/ethernet/intel/igc') + +--- a/drivers/net/ethernet/intel/igc/igc_main.c ++++ b/drivers/net/ethernet/intel/igc/igc_main.c +@@ -4177,17 +4177,10 @@ bool igc_has_link(struct igc_adapter *ad + * false until the igc_check_for_link establishes link + * for copper adapters ONLY + */ +- switch (hw->phy.media_type) { +- case igc_media_type_copper: +- if (!hw->mac.get_link_status) +- return true; +- hw->mac.ops.check_for_link(hw); +- link_active = !hw->mac.get_link_status; +- break; +- default: +- case igc_media_type_unknown: +- break; +- } ++ if (!hw->mac.get_link_status) ++ return true; ++ hw->mac.ops.check_for_link(hw); ++ link_active = !hw->mac.get_link_status; + + if (hw->mac.type == igc_i225) { + if (!netif_carrier_ok(adapter->netdev)) { diff --git a/target/linux/generic/backport-5.15/780-v5.11-net-usb-r8152-Provide-missing-documentation-for-some.patch b/target/linux/generic/backport-5.15/780-v5.11-net-usb-r8152-Provide-missing-documentation-for-some.patch new file mode 100644 index 0000000000..bec2867a48 --- /dev/null +++ b/target/linux/generic/backport-5.15/780-v5.11-net-usb-r8152-Provide-missing-documentation-for-some.patch @@ -0,0 +1,72 @@ +From 586f04ce6a391419ca3cc9cef6b6f38570cede88 Mon Sep 17 00:00:00 2001 +From: Lee Jones <lee.jones@linaro.org> +Date: Mon, 2 Nov 2020 11:45:04 +0000 +Subject: [PATCH] net: usb: r8152: Provide missing documentation for + some struct members + +commit 34e653efb602e0651867fb5ab14369b555a61dcd upstream. + +Fixes the following W=1 kernel build warning(s): + + drivers/net/usb/r8152.c:934: warning: Function parameter or member 'blk_hdr' not described in 'fw_mac' + drivers/net/usb/r8152.c:934: warning: Function parameter or member 'reserved' not described in 'fw_mac' + drivers/net/usb/r8152.c:947: warning: Function parameter or member 'blk_hdr' not described in 'fw_phy_patch_key' + drivers/net/usb/r8152.c:947: warning: Function parameter or member 'reserved' not described in 'fw_phy_patch_key' + drivers/net/usb/r8152.c:986: warning: Function parameter or member 'blk_hdr' not described in 'fw_phy_nc' + drivers/net/usb/r8152.c:986: warning: Function parameter or member 'mode_pre' not described in 'fw_phy_nc' + drivers/net/usb/r8152.c:986: warning: Function parameter or member 'mode_post' not described in 'fw_phy_nc' + drivers/net/usb/r8152.c:986: warning: Function parameter or member 'reserved' not described in 'fw_phy_nc' + +Signed-off-by: Lee Jones <lee.jones@linaro.org> +Acked-by: Hayes Wang <hayeswang@realtek.com> +Link: https://lore.kernel.org/r/20201102114512.1062724-23-lee.jones@linaro.org +Signed-off-by: Jakub Kicinski <kuba@kernel.org> +--- + drivers/net/usb/r8152.c | 6 ++++++ + 1 file changed, 6 insertions(+) + +--- a/drivers/net/usb/r8152.c ++++ b/drivers/net/usb/r8152.c +@@ -899,6 +899,7 @@ struct fw_header { + * struct fw_mac - a firmware block used by RTL_FW_PLA and RTL_FW_USB. + * The layout of the firmware block is: + * <struct fw_mac> + <info> + <firmware data>. ++ * @blk_hdr: firmware descriptor (type, length) + * @fw_offset: offset of the firmware binary data. The start address of + * the data would be the address of struct fw_mac + @fw_offset. + * @fw_reg: the register to load the firmware. Depends on chip. +@@ -912,6 +913,7 @@ struct fw_header { + * @bp_num: the break point number which needs to be set for this firmware. + * Depends on the firmware. + * @bp: break points. Depends on firmware. ++ * @reserved: reserved space (unused) + * @fw_ver_reg: the register to store the fw version. + * @fw_ver_data: the firmware version of the current type. + * @info: additional information for debugging, and is followed by the +@@ -937,8 +939,10 @@ struct fw_mac { + /** + * struct fw_phy_patch_key - a firmware block used by RTL_FW_PHY_START. + * This is used to set patch key when loading the firmware of PHY. ++ * @blk_hdr: firmware descriptor (type, length) + * @key_reg: the register to write the patch key. + * @key_data: patch key. ++ * @reserved: reserved space (unused) + */ + struct fw_phy_patch_key { + struct fw_block blk_hdr; +@@ -951,6 +955,7 @@ struct fw_phy_patch_key { + * struct fw_phy_nc - a firmware block used by RTL_FW_PHY_NC. + * The layout of the firmware block is: + * <struct fw_phy_nc> + <info> + <firmware data>. ++ * @blk_hdr: firmware descriptor (type, length) + * @fw_offset: offset of the firmware binary data. The start address of + * the data would be the address of struct fw_phy_nc + @fw_offset. + * @fw_reg: the register to load the firmware. Depends on chip. +@@ -961,6 +966,7 @@ struct fw_phy_patch_key { + * @mode_reg: the regitster of switching the mode. + * @mod_pre: the mode needing to be set before loading the firmware. + * @mod_post: the mode to be set when finishing to load the firmware. ++ * @reserved: reserved space (unused) + * @bp_start: the start register of break points. Depends on chip. + * @bp_num: the break point number which needs to be set for this firmware. + * Depends on the firmware. diff --git a/target/linux/generic/backport-5.15/781-v5.11-net-usb-r8152-Fix-a-couple-of-spelling-errors-in-fw_.patch b/target/linux/generic/backport-5.15/781-v5.11-net-usb-r8152-Fix-a-couple-of-spelling-errors-in-fw_.patch new file mode 100644 index 0000000000..aa2763e7de --- /dev/null +++ b/target/linux/generic/backport-5.15/781-v5.11-net-usb-r8152-Fix-a-couple-of-spelling-errors-in-fw_.patch @@ -0,0 +1,34 @@ +From 5fcfa846181de6676509696c4cd7b60a22e74077 Mon Sep 17 00:00:00 2001 +From: Lee Jones <lee.jones@linaro.org> +Date: Mon, 2 Nov 2020 11:45:09 +0000 +Subject: [PATCH] net: usb: r8152: Fix a couple of spelling errors in + fw_phy_nc's docs + +commit 9f07814d01ad085b2d9f1d55b4ce532fb2c27110 upstream. + +Fixes the following W=1 kernel build warning(s): + + drivers/net/usb/r8152.c:992: warning: Function parameter or member 'mode_pre' not described in 'fw_phy_nc' + drivers/net/usb/r8152.c:992: warning: Function parameter or member 'mode_post' not described in 'fw_phy_nc' + +Signed-off-by: Lee Jones <lee.jones@linaro.org> +Acked-by: Hayes Wang <hayeswang@realtek.com> +Link: https://lore.kernel.org/r/20201102114512.1062724-28-lee.jones@linaro.org +Signed-off-by: Jakub Kicinski <kuba@kernel.org> +--- + drivers/net/usb/r8152.c | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +--- a/drivers/net/usb/r8152.c ++++ b/drivers/net/usb/r8152.c +@@ -964,8 +964,8 @@ struct fw_phy_patch_key { + * @patch_en_addr: the register of enabling patch mode. Depends on chip. + * @patch_en_value: patch mode enabled mask. Depends on the firmware. + * @mode_reg: the regitster of switching the mode. +- * @mod_pre: the mode needing to be set before loading the firmware. +- * @mod_post: the mode to be set when finishing to load the firmware. ++ * @mode_pre: the mode needing to be set before loading the firmware. ++ * @mode_post: the mode to be set when finishing to load the firmware. + * @reserved: reserved space (unused) + * @bp_start: the start register of break points. Depends on chip. + * @bp_num: the break point number which needs to be set for this firmware. diff --git a/target/linux/generic/backport-5.15/782-v5.11-net-usb-r8153_ecm-support-ECM-mode-for-RTL8153.patch b/target/linux/generic/backport-5.15/782-v5.11-net-usb-r8153_ecm-support-ECM-mode-for-RTL8153.patch new file mode 100644 index 0000000000..789de8e6fd --- /dev/null +++ b/target/linux/generic/backport-5.15/782-v5.11-net-usb-r8153_ecm-support-ECM-mode-for-RTL8153.patch @@ -0,0 +1,320 @@ +From 0ef50460f7f053bd2a911ec53e01bfda646a5574 Mon Sep 17 00:00:00 2001 +From: Hayes Wang <hayeswang@realtek.com> +Date: Wed, 4 Nov 2020 10:19:22 +0800 +Subject: [PATCH] net/usb/r8153_ecm: support ECM mode for RTL8153 + +commit c1aedf015ebdd0232757a66e2daccf1246bd609c upstream. + +Support ECM mode based on cdc_ether with relative mii functions, +when CONFIG_USB_RTL8152 is not set, or the device is not supported +by r8152 driver. + +Both r8152 and r8153_ecm would check the return value of +rtl8152_get_version() in porbe(). If rtl8152_get_version() +return none zero value, the r8152 is used for the device +with vendor mode. Otherwise, the r8153_ecm is used for the +device with ECM mode. + +Signed-off-by: Hayes Wang <hayeswang@realtek.com> +Link: https://lore.kernel.org/r/1394712342-15778-392-Taiwan-albertk@realtek.com +Signed-off-by: Jakub Kicinski <kuba@kernel.org> +--- + drivers/net/usb/Makefile | 2 +- + drivers/net/usb/r8152.c | 30 +------ + drivers/net/usb/r8153_ecm.c | 162 ++++++++++++++++++++++++++++++++++++ + include/linux/usb/r8152.h | 37 ++++++++ + 4 files changed, 204 insertions(+), 27 deletions(-) + create mode 100644 drivers/net/usb/r8153_ecm.c + create mode 100644 include/linux/usb/r8152.h + +--- a/drivers/net/usb/Makefile ++++ b/drivers/net/usb/Makefile +@@ -13,7 +13,7 @@ obj-$(CONFIG_USB_LAN78XX) += lan78xx.o + obj-$(CONFIG_USB_NET_AX8817X) += asix.o + asix-y := asix_devices.o asix_common.o ax88172a.o + obj-$(CONFIG_USB_NET_AX88179_178A) += ax88179_178a.o +-obj-$(CONFIG_USB_NET_CDCETHER) += cdc_ether.o ++obj-$(CONFIG_USB_NET_CDCETHER) += cdc_ether.o r8153_ecm.o + obj-$(CONFIG_USB_NET_CDC_EEM) += cdc_eem.o + obj-$(CONFIG_USB_NET_DM9601) += dm9601.o + obj-$(CONFIG_USB_NET_SR9700) += sr9700.o +--- a/drivers/net/usb/r8152.c ++++ b/drivers/net/usb/r8152.c +@@ -27,6 +27,7 @@ + #include <linux/acpi.h> + #include <linux/firmware.h> + #include <crypto/hash.h> ++#include <linux/usb/r8152.h> + + /* Information for net-next */ + #define NETNEXT_VERSION "11" +@@ -654,18 +655,6 @@ enum rtl_register_content { + + #define INTR_LINK 0x0004 + +-#define RTL8152_REQT_READ 0xc0 +-#define RTL8152_REQT_WRITE 0x40 +-#define RTL8152_REQ_GET_REGS 0x05 +-#define RTL8152_REQ_SET_REGS 0x05 +- +-#define BYTE_EN_DWORD 0xff +-#define BYTE_EN_WORD 0x33 +-#define BYTE_EN_BYTE 0x11 +-#define BYTE_EN_SIX_BYTES 0x3f +-#define BYTE_EN_START_MASK 0x0f +-#define BYTE_EN_END_MASK 0xf0 +- + #define RTL8153_MAX_PACKET 9216 /* 9K */ + #define RTL8153_MAX_MTU (RTL8153_MAX_PACKET - VLAN_ETH_HLEN - \ + ETH_FCS_LEN) +@@ -690,21 +679,9 @@ enum rtl8152_flags { + LENOVO_MACPASSTHRU, + }; + +-/* Define these values to match your device */ +-#define VENDOR_ID_REALTEK 0x0bda +-#define VENDOR_ID_MICROSOFT 0x045e +-#define VENDOR_ID_SAMSUNG 0x04e8 +-#define VENDOR_ID_LENOVO 0x17ef +-#define VENDOR_ID_LINKSYS 0x13b1 +-#define VENDOR_ID_NVIDIA 0x0955 +-#define VENDOR_ID_TPLINK 0x2357 +- + #define DEVICE_ID_THINKPAD_THUNDERBOLT3_DOCK_GEN2 0x3082 + #define DEVICE_ID_THINKPAD_USB_C_DOCK_GEN2 0xa387 + +-#define MCU_TYPE_PLA 0x0100 +-#define MCU_TYPE_USB 0x0000 +- + struct tally_counter { + __le64 tx_packets; + __le64 rx_packets; +@@ -6625,7 +6602,7 @@ static int rtl_fw_init(struct r8152 *tp) + return 0; + } + +-static u8 rtl_get_version(struct usb_interface *intf) ++u8 rtl8152_get_version(struct usb_interface *intf) + { + struct usb_device *udev = interface_to_usbdev(intf); + u32 ocp_data = 0; +@@ -6683,12 +6660,13 @@ static u8 rtl_get_version(struct usb_int + + return version; + } ++EXPORT_SYMBOL_GPL(rtl8152_get_version); + + static int rtl8152_probe(struct usb_interface *intf, + const struct usb_device_id *id) + { + struct usb_device *udev = interface_to_usbdev(intf); +- u8 version = rtl_get_version(intf); ++ u8 version = rtl8152_get_version(intf); + struct r8152 *tp; + struct net_device *netdev; + int ret; +--- /dev/null ++++ b/drivers/net/usb/r8153_ecm.c +@@ -0,0 +1,162 @@ ++// SPDX-License-Identifier: GPL-2.0-or-later ++#include <linux/module.h> ++#include <linux/netdevice.h> ++#include <linux/mii.h> ++#include <linux/usb.h> ++#include <linux/usb/cdc.h> ++#include <linux/usb/usbnet.h> ++#include <linux/usb/r8152.h> ++ ++#define OCP_BASE 0xe86c ++ ++static int pla_read_word(struct usbnet *dev, u16 index) ++{ ++ u16 byen = BYTE_EN_WORD; ++ u8 shift = index & 2; ++ __le32 tmp; ++ int ret; ++ ++ if (shift) ++ byen <<= shift; ++ ++ index &= ~3; ++ ++ ret = usbnet_read_cmd(dev, RTL8152_REQ_GET_REGS, RTL8152_REQT_READ, index, ++ MCU_TYPE_PLA | byen, &tmp, sizeof(tmp)); ++ if (ret < 0) ++ goto out; ++ ++ ret = __le32_to_cpu(tmp); ++ ret >>= (shift * 8); ++ ret &= 0xffff; ++ ++out: ++ return ret; ++} ++ ++static int pla_write_word(struct usbnet *dev, u16 index, u32 data) ++{ ++ u32 mask = 0xffff; ++ u16 byen = BYTE_EN_WORD; ++ u8 shift = index & 2; ++ __le32 tmp; ++ int ret; ++ ++ data &= mask; ++ ++ if (shift) { ++ byen <<= shift; ++ mask <<= (shift * 8); ++ data <<= (shift * 8); ++ } ++ ++ index &= ~3; ++ ++ ret = usbnet_read_cmd(dev, RTL8152_REQ_GET_REGS, RTL8152_REQT_READ, index, ++ MCU_TYPE_PLA | byen, &tmp, sizeof(tmp)); ++ ++ if (ret < 0) ++ goto out; ++ ++ data |= __le32_to_cpu(tmp) & ~mask; ++ tmp = __cpu_to_le32(data); ++ ++ ret = usbnet_write_cmd(dev, RTL8152_REQ_SET_REGS, RTL8152_REQT_WRITE, index, ++ MCU_TYPE_PLA | byen, &tmp, sizeof(tmp)); ++ ++out: ++ return ret; ++} ++ ++static int r8153_ecm_mdio_read(struct net_device *netdev, int phy_id, int reg) ++{ ++ struct usbnet *dev = netdev_priv(netdev); ++ int ret; ++ ++ ret = pla_write_word(dev, OCP_BASE, 0xa000); ++ if (ret < 0) ++ goto out; ++ ++ ret = pla_read_word(dev, 0xb400 + reg * 2); ++ ++out: ++ return ret; ++} ++ ++static void r8153_ecm_mdio_write(struct net_device *netdev, int phy_id, int reg, int val) ++{ ++ struct usbnet *dev = netdev_priv(netdev); ++ int ret; ++ ++ ret = pla_write_word(dev, OCP_BASE, 0xa000); ++ if (ret < 0) ++ return; ++ ++ ret = pla_write_word(dev, 0xb400 + reg * 2, val); ++} ++ ++static int r8153_bind(struct usbnet *dev, struct usb_interface *intf) ++{ ++ int status; ++ ++ status = usbnet_cdc_bind(dev, intf); ++ if (status < 0) ++ return status; ++ ++ dev->mii.dev = dev->net; ++ dev->mii.mdio_read = r8153_ecm_mdio_read; ++ dev->mii.mdio_write = r8153_ecm_mdio_write; ++ dev->mii.reg_num_mask = 0x1f; ++ dev->mii.supports_gmii = 1; ++ ++ return status; ++} ++ ++static const struct driver_info r8153_info = { ++ .description = "RTL8153 ECM Device", ++ .flags = FLAG_ETHER, ++ .bind = r8153_bind, ++ .unbind = usbnet_cdc_unbind, ++ .status = usbnet_cdc_status, ++ .manage_power = usbnet_manage_power, ++}; ++ ++static const struct usb_device_id products[] = { ++{ ++ USB_DEVICE_AND_INTERFACE_INFO(VENDOR_ID_REALTEK, 0x8153, USB_CLASS_COMM, ++ USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE), ++ .driver_info = (unsigned long)&r8153_info, ++}, ++ ++ { }, /* END */ ++}; ++MODULE_DEVICE_TABLE(usb, products); ++ ++static int rtl8153_ecm_probe(struct usb_interface *intf, ++ const struct usb_device_id *id) ++{ ++#if IS_REACHABLE(CONFIG_USB_RTL8152) ++ if (rtl8152_get_version(intf)) ++ return -ENODEV; ++#endif ++ ++ return usbnet_probe(intf, id); ++} ++ ++static struct usb_driver r8153_ecm_driver = { ++ .name = "r8153_ecm", ++ .id_table = products, ++ .probe = rtl8153_ecm_probe, ++ .disconnect = usbnet_disconnect, ++ .suspend = usbnet_suspend, ++ .resume = usbnet_resume, ++ .reset_resume = usbnet_resume, ++ .supports_autosuspend = 1, ++ .disable_hub_initiated_lpm = 1, ++}; ++ ++module_usb_driver(r8153_ecm_driver); ++ ++MODULE_AUTHOR("Hayes Wang"); ++MODULE_DESCRIPTION("Realtek USB ECM device"); ++MODULE_LICENSE("GPL"); +--- /dev/null ++++ b/include/linux/usb/r8152.h +@@ -0,0 +1,37 @@ ++/* SPDX-License-Identifier: GPL-2.0-only */ ++/* ++ * Copyright (c) 2020 Realtek Semiconductor Corp. All rights reserved. ++ */ ++ ++#ifndef __LINUX_R8152_H ++#define __LINUX_R8152_H ++ ++#define RTL8152_REQT_READ 0xc0 ++#define RTL8152_REQT_WRITE 0x40 ++#define RTL8152_REQ_GET_REGS 0x05 ++#define RTL8152_REQ_SET_REGS 0x05 ++ ++#define BYTE_EN_DWORD 0xff ++#define BYTE_EN_WORD 0x33 ++#define BYTE_EN_BYTE 0x11 ++#define BYTE_EN_SIX_BYTES 0x3f ++#define BYTE_EN_START_MASK 0x0f ++#define BYTE_EN_END_MASK 0xf0 ++ ++#define MCU_TYPE_PLA 0x0100 ++#define MCU_TYPE_USB 0x0000 ++ ++/* Define these values to match your device */ ++#define VENDOR_ID_REALTEK 0x0bda ++#define VENDOR_ID_MICROSOFT 0x045e ++#define VENDOR_ID_SAMSUNG 0x04e8 ++#define VENDOR_ID_LENOVO 0x17ef ++#define VENDOR_ID_LINKSYS 0x13b1 ++#define VENDOR_ID_NVIDIA 0x0955 ++#define VENDOR_ID_TPLINK 0x2357 ++ ++#if IS_REACHABLE(CONFIG_USB_RTL8152) ++extern u8 rtl8152_get_version(struct usb_interface *intf); ++#endif ++ ++#endif /* __LINUX_R8152_H */ diff --git a/target/linux/generic/backport-5.15/783-v5.12-net-usb-r8152-use-new-tasklet-API.patch b/target/linux/generic/backport-5.15/783-v5.12-net-usb-r8152-use-new-tasklet-API.patch new file mode 100644 index 0000000000..b625d5ea99 --- /dev/null +++ b/target/linux/generic/backport-5.15/783-v5.12-net-usb-r8152-use-new-tasklet-API.patch @@ -0,0 +1,41 @@ +From 90f1afc7f96c8f7cf19c82e5f4b39e61a63b053d Mon Sep 17 00:00:00 2001 +From: Emil Renner Berthing <kernel@esmil.dk> +Date: Sun, 31 Jan 2021 00:47:29 +0100 +Subject: [PATCH] net: usb: r8152: use new tasklet API + +commit f3163f1cb87141c7a41a15a5d4c98b353f807b04 upstream. + +This converts the driver to use the new tasklet API introduced in +commit 12cc923f1ccc ("tasklet: Introduce new initialization API") + +Signed-off-by: Emil Renner Berthing <kernel@esmil.dk> +Signed-off-by: Jakub Kicinski <kuba@kernel.org> +--- + drivers/net/usb/r8152.c | 8 +++----- + 1 file changed, 3 insertions(+), 5 deletions(-) + +--- a/drivers/net/usb/r8152.c ++++ b/drivers/net/usb/r8152.c +@@ -2394,11 +2394,9 @@ static void tx_bottom(struct r8152 *tp) + } while (res == 0); + } + +-static void bottom_half(unsigned long data) ++static void bottom_half(struct tasklet_struct *t) + { +- struct r8152 *tp; +- +- tp = (struct r8152 *)data; ++ struct r8152 *tp = from_tasklet(tp, t, tx_tl); + + if (test_bit(RTL8152_UNPLUG, &tp->flags)) + return; +@@ -6718,7 +6716,7 @@ static int rtl8152_probe(struct usb_inte + mutex_init(&tp->control); + INIT_DELAYED_WORK(&tp->schedule, rtl_work_func_t); + INIT_DELAYED_WORK(&tp->hw_phy_work, rtl_hw_phy_work_func_t); +- tasklet_init(&tp->tx_tl, bottom_half, (unsigned long)tp); ++ tasklet_setup(&tp->tx_tl, bottom_half); + tasklet_disable(&tp->tx_tl); + + netdev->netdev_ops = &rtl8152_netdev_ops; diff --git a/target/linux/generic/backport-5.15/784-v5.12-r8152-replace-several-functions-about-phy-patch-requ.patch b/target/linux/generic/backport-5.15/784-v5.12-r8152-replace-several-functions-about-phy-patch-requ.patch new file mode 100644 index 0000000000..bbb2b5c0c9 --- /dev/null +++ b/target/linux/generic/backport-5.15/784-v5.12-r8152-replace-several-functions-about-phy-patch-requ.patch @@ -0,0 +1,198 @@ +From 86b98abf4f8c691c260c5113d6a2d32f5377caca Mon Sep 17 00:00:00 2001 +From: Hayes Wang <hayeswang@realtek.com> +Date: Wed, 3 Feb 2021 17:14:28 +0800 +Subject: [PATCH] r8152: replace several functions about phy patch + request + +commit a08c0d309d8c078d22717d815cf9853f6f2c07bd upstream. + +Replace r8153_patch_request() with rtl_phy_patch_request(). +Replace r8153_pre_ram_code() with rtl_pre_ram_code(). +Replace r8153_post_ram_code() with rtl_post_ram_code(). +Add rtl_patch_key_set(). + +The new functions have an additional parameter. It is used to wait +the patch request command finished. When the PHY is resumed from +the state of power cut, the PHY is at a safe mode and the +OCP_PHY_PATCH_STAT wouldn't be updated. For this situation, it is +safe to set patch request command without waiting OCP_PHY_PATCH_STAT. + +Signed-off-by: Hayes Wang <hayeswang@realtek.com> +Signed-off-by: Jakub Kicinski <kuba@kernel.org> +--- + drivers/net/usb/r8152.c | 84 ++++++++++++++++++++++++----------------- + 1 file changed, 50 insertions(+), 34 deletions(-) + +--- a/drivers/net/usb/r8152.c ++++ b/drivers/net/usb/r8152.c +@@ -3444,59 +3444,76 @@ static void rtl_clear_bp(struct r8152 *t + ocp_write_word(tp, type, PLA_BP_BA, 0); + } + +-static int r8153_patch_request(struct r8152 *tp, bool request) ++static int rtl_phy_patch_request(struct r8152 *tp, bool request, bool wait) + { +- u16 data; ++ u16 data, check; + int i; + + data = ocp_reg_read(tp, OCP_PHY_PATCH_CMD); +- if (request) ++ if (request) { + data |= PATCH_REQUEST; +- else ++ check = 0; ++ } else { + data &= ~PATCH_REQUEST; ++ check = PATCH_READY; ++ } + ocp_reg_write(tp, OCP_PHY_PATCH_CMD, data); + +- for (i = 0; request && i < 5000; i++) { ++ for (i = 0; wait && i < 5000; i++) { ++ u32 ocp_data; ++ + usleep_range(1000, 2000); +- if (ocp_reg_read(tp, OCP_PHY_PATCH_STAT) & PATCH_READY) ++ ocp_data = ocp_reg_read(tp, OCP_PHY_PATCH_STAT); ++ if ((ocp_data & PATCH_READY) ^ check) + break; + } + +- if (request && !(ocp_reg_read(tp, OCP_PHY_PATCH_STAT) & PATCH_READY)) { +- netif_err(tp, drv, tp->netdev, "patch request fail\n"); +- r8153_patch_request(tp, false); ++ if (request && wait && ++ !(ocp_reg_read(tp, OCP_PHY_PATCH_STAT) & PATCH_READY)) { ++ dev_err(&tp->intf->dev, "PHY patch request fail\n"); ++ rtl_phy_patch_request(tp, false, false); + return -ETIME; + } else { + return 0; + } + } + +-static int r8153_pre_ram_code(struct r8152 *tp, u16 key_addr, u16 patch_key) ++static void rtl_patch_key_set(struct r8152 *tp, u16 key_addr, u16 patch_key) + { +- if (r8153_patch_request(tp, true)) { +- dev_err(&tp->intf->dev, "patch request fail\n"); +- return -ETIME; +- } ++ if (patch_key && key_addr) { ++ sram_write(tp, key_addr, patch_key); ++ sram_write(tp, SRAM_PHY_LOCK, PHY_PATCH_LOCK); ++ } else if (key_addr) { ++ u16 data; + +- sram_write(tp, key_addr, patch_key); +- sram_write(tp, SRAM_PHY_LOCK, PHY_PATCH_LOCK); ++ sram_write(tp, 0x0000, 0x0000); + +- return 0; ++ data = ocp_reg_read(tp, OCP_PHY_LOCK); ++ data &= ~PATCH_LOCK; ++ ocp_reg_write(tp, OCP_PHY_LOCK, data); ++ ++ sram_write(tp, key_addr, 0x0000); ++ } else { ++ WARN_ON_ONCE(1); ++ } + } + +-static int r8153_post_ram_code(struct r8152 *tp, u16 key_addr) ++static int ++rtl_pre_ram_code(struct r8152 *tp, u16 key_addr, u16 patch_key, bool wait) + { +- u16 data; ++ if (rtl_phy_patch_request(tp, true, wait)) ++ return -ETIME; + +- sram_write(tp, 0x0000, 0x0000); ++ rtl_patch_key_set(tp, key_addr, patch_key); + +- data = ocp_reg_read(tp, OCP_PHY_LOCK); +- data &= ~PATCH_LOCK; +- ocp_reg_write(tp, OCP_PHY_LOCK, data); ++ return 0; ++} + +- sram_write(tp, key_addr, 0x0000); ++static int rtl_post_ram_code(struct r8152 *tp, u16 key_addr, bool wait) ++{ ++ rtl_patch_key_set(tp, key_addr, 0); + +- r8153_patch_request(tp, false); ++ rtl_phy_patch_request(tp, false, wait); + + ocp_write_word(tp, MCU_TYPE_PLA, PLA_OCP_GPHY_BASE, tp->ocp_base); + +@@ -3981,7 +3998,7 @@ static void rtl8152_fw_mac_apply(struct + dev_dbg(&tp->intf->dev, "successfully applied %s\n", mac->info); + } + +-static void rtl8152_apply_firmware(struct r8152 *tp) ++static void rtl8152_apply_firmware(struct r8152 *tp, bool power_cut) + { + struct rtl_fw *rtl_fw = &tp->rtl_fw; + const struct firmware *fw; +@@ -4012,12 +4029,11 @@ static void rtl8152_apply_firmware(struc + case RTL_FW_PHY_START: + key = (struct fw_phy_patch_key *)block; + key_addr = __le16_to_cpu(key->key_reg); +- r8153_pre_ram_code(tp, key_addr, +- __le16_to_cpu(key->key_data)); ++ rtl_pre_ram_code(tp, key_addr, __le16_to_cpu(key->key_data), !power_cut); + break; + case RTL_FW_PHY_STOP: + WARN_ON(!key_addr); +- r8153_post_ram_code(tp, key_addr); ++ rtl_post_ram_code(tp, key_addr, !power_cut); + break; + case RTL_FW_PHY_NC: + rtl8152_fw_phy_nc_apply(tp, (struct fw_phy_nc *)block); +@@ -4222,7 +4238,7 @@ static void rtl8152_disable(struct r8152 + + static void r8152b_hw_phy_cfg(struct r8152 *tp) + { +- rtl8152_apply_firmware(tp); ++ rtl8152_apply_firmware(tp, false); + rtl_eee_enable(tp, tp->eee_en); + r8152_aldps_en(tp, true); + r8152b_enable_fc(tp); +@@ -4504,7 +4520,7 @@ static void r8153_hw_phy_cfg(struct r815 + /* disable EEE before updating the PHY parameters */ + rtl_eee_enable(tp, false); + +- rtl8152_apply_firmware(tp); ++ rtl8152_apply_firmware(tp, false); + + if (tp->version == RTL_VER_03) { + data = ocp_reg_read(tp, OCP_EEE_CFG); +@@ -4578,7 +4594,7 @@ static void r8153b_hw_phy_cfg(struct r81 + /* disable EEE before updating the PHY parameters */ + rtl_eee_enable(tp, false); + +- rtl8152_apply_firmware(tp); ++ rtl8152_apply_firmware(tp, false); + + r8153b_green_en(tp, test_bit(GREEN_ETHERNET, &tp->flags)); + +@@ -4619,7 +4635,7 @@ static void r8153b_hw_phy_cfg(struct r81 + ocp_write_word(tp, MCU_TYPE_PLA, PLA_PHY_PWR, ocp_data); + + /* Advnace EEE */ +- if (!r8153_patch_request(tp, true)) { ++ if (!rtl_phy_patch_request(tp, true, true)) { + data = ocp_reg_read(tp, OCP_POWER_CFG); + data |= EEE_CLKDIV_EN; + ocp_reg_write(tp, OCP_POWER_CFG, data); +@@ -4636,7 +4652,7 @@ static void r8153b_hw_phy_cfg(struct r81 + ocp_reg_write(tp, OCP_SYSCLK_CFG, clk_div_expo(5)); + tp->ups_info._250m_ckdiv = true; + +- r8153_patch_request(tp, false); ++ rtl_phy_patch_request(tp, false, true); + } + + if (tp->eee_en) diff --git a/target/linux/generic/backport-5.15/785-v5.12-r8152-adjust-the-flow-of-power-cut-for-RTL8153B.patch b/target/linux/generic/backport-5.15/785-v5.12-r8152-adjust-the-flow-of-power-cut-for-RTL8153B.patch new file mode 100644 index 0000000000..b28e89a43f --- /dev/null +++ b/target/linux/generic/backport-5.15/785-v5.12-r8152-adjust-the-flow-of-power-cut-for-RTL8153B.patch @@ -0,0 +1,134 @@ +From 29a61d8564ad3439d03c7ec135016a4e70072af1 Mon Sep 17 00:00:00 2001 +From: Hayes Wang <hayeswang@realtek.com> +Date: Wed, 3 Feb 2021 17:14:29 +0800 +Subject: [PATCH] r8152: adjust the flow of power cut for RTL8153B + +commit 80fd850b31f09263ad175b2f640d5c5c6f76ed41 upstream. + +For runtime resuming, the RTL8153B may be resumed from the state +of power cut, when enabling the feature of UPS. Then, the PHY +would be reset, so it is necessary to be initailized again. + +Besides, the USB_U1U2_TIMER also has to be set again, so I move +it from r8153b_init() to r8153b_hw_phy_cfg(). + +Signed-off-by: Hayes Wang <hayeswang@realtek.com> +Signed-off-by: Jakub Kicinski <kuba@kernel.org> +--- + drivers/net/usb/r8152.c | 68 ++++++++++++++++++++++++----------------- + 1 file changed, 40 insertions(+), 28 deletions(-) + +--- a/drivers/net/usb/r8152.c ++++ b/drivers/net/usb/r8152.c +@@ -1372,6 +1372,10 @@ void write_mii_word(struct net_device *n + static int + r8152_submit_rx(struct r8152 *tp, struct rx_agg *agg, gfp_t mem_flags); + ++static int ++rtl8152_set_speed(struct r8152 *tp, u8 autoneg, u32 speed, u8 duplex, ++ u32 advertising); ++ + static int rtl8152_set_mac_address(struct net_device *netdev, void *p) + { + struct r8152 *tp = netdev_priv(netdev); +@@ -3183,8 +3187,6 @@ static void r8153b_ups_en(struct r8152 * + ocp_data |= BIT(0); + ocp_write_byte(tp, MCU_TYPE_USB, 0xcfff, ocp_data); + } else { +- u16 data; +- + ocp_data &= ~(UPS_EN | USP_PREWAKE); + ocp_write_byte(tp, MCU_TYPE_USB, USB_POWER_CUT, ocp_data); + +@@ -3192,31 +3194,20 @@ static void r8153b_ups_en(struct r8152 * + ocp_data &= ~BIT(0); + ocp_write_byte(tp, MCU_TYPE_USB, 0xcfff, ocp_data); + +- ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_MISC_0); +- ocp_data &= ~PCUT_STATUS; +- ocp_write_word(tp, MCU_TYPE_USB, USB_MISC_0, ocp_data); ++ if (ocp_read_word(tp, MCU_TYPE_USB, USB_MISC_0) & PCUT_STATUS) { ++ int i; + +- data = r8153_phy_status(tp, 0); ++ for (i = 0; i < 500; i++) { ++ if (ocp_read_word(tp, MCU_TYPE_PLA, PLA_BOOT_CTRL) & ++ AUTOLOAD_DONE) ++ break; ++ msleep(20); ++ } + +- switch (data) { +- case PHY_STAT_PWRDN: +- case PHY_STAT_EXT_INIT: +- r8153b_green_en(tp, +- test_bit(GREEN_ETHERNET, &tp->flags)); +- +- data = r8152_mdio_read(tp, MII_BMCR); +- data &= ~BMCR_PDOWN; +- data |= BMCR_RESET; +- r8152_mdio_write(tp, MII_BMCR, data); ++ tp->rtl_ops.hw_phy_cfg(tp); + +- data = r8153_phy_status(tp, PHY_STAT_LAN_ON); +- fallthrough; +- +- default: +- if (data != PHY_STAT_LAN_ON) +- netif_warn(tp, link, tp->netdev, +- "PHY not ready"); +- break; ++ rtl8152_set_speed(tp, tp->autoneg, tp->speed, ++ tp->duplex, tp->advertising); + } + } + } +@@ -4588,13 +4579,37 @@ static void r8153b_hw_phy_cfg(struct r81 + u32 ocp_data; + u16 data; + ++ ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_MISC_0); ++ if (ocp_data & PCUT_STATUS) { ++ ocp_data &= ~PCUT_STATUS; ++ ocp_write_word(tp, MCU_TYPE_USB, USB_MISC_0, ocp_data); ++ } ++ + /* disable ALDPS before updating the PHY parameters */ + r8153_aldps_en(tp, false); + + /* disable EEE before updating the PHY parameters */ + rtl_eee_enable(tp, false); + +- rtl8152_apply_firmware(tp, false); ++ /* U1/U2/L1 idle timer. 500 us */ ++ ocp_write_word(tp, MCU_TYPE_USB, USB_U1U2_TIMER, 500); ++ ++ data = r8153_phy_status(tp, 0); ++ ++ switch (data) { ++ case PHY_STAT_PWRDN: ++ case PHY_STAT_EXT_INIT: ++ rtl8152_apply_firmware(tp, true); ++ ++ data = r8152_mdio_read(tp, MII_BMCR); ++ data &= ~BMCR_PDOWN; ++ r8152_mdio_write(tp, MII_BMCR, data); ++ break; ++ case PHY_STAT_LAN_ON: ++ default: ++ rtl8152_apply_firmware(tp, false); ++ break; ++ } + + r8153b_green_en(tp, test_bit(GREEN_ETHERNET, &tp->flags)); + +@@ -5543,9 +5558,6 @@ static void r8153b_init(struct r8152 *tp + /* MSC timer = 0xfff * 8ms = 32760 ms */ + ocp_write_word(tp, MCU_TYPE_USB, USB_MSC_TIMER, 0x0fff); + +- /* U1/U2/L1 idle timer. 500 us */ +- ocp_write_word(tp, MCU_TYPE_USB, USB_U1U2_TIMER, 500); +- + r8153b_power_cut_en(tp, false); + r8153b_ups_en(tp, false); + r8153_queue_wake(tp, false); diff --git a/target/linux/generic/backport-5.15/786-v5.12-r8152-enable-U1-U2-for-USB_SPEED_SUPER.patch b/target/linux/generic/backport-5.15/786-v5.12-r8152-enable-U1-U2-for-USB_SPEED_SUPER.patch new file mode 100644 index 0000000000..8cf91c3893 --- /dev/null +++ b/target/linux/generic/backport-5.15/786-v5.12-r8152-enable-U1-U2-for-USB_SPEED_SUPER.patch @@ -0,0 +1,47 @@ +From 69b4339c0b9f3edc6a8f681f05efaaf4add1bb0e Mon Sep 17 00:00:00 2001 +From: Hayes Wang <hayeswang@realtek.com> +Date: Fri, 19 Feb 2021 17:04:40 +0800 +Subject: [PATCH] r8152: enable U1/U2 for USB_SPEED_SUPER + +commit 7a0ae61acde2cebd69665837170405eced86a6c7 upstream. + +U1/U2 shoued be enabled for USB 3.0 or later. The USB 2.0 doesn't +support it. + +Signed-off-by: Hayes Wang <hayeswang@realtek.com> +Signed-off-by: Jakub Kicinski <kuba@kernel.org> +--- + drivers/net/usb/r8152.c | 7 ++++--- + 1 file changed, 4 insertions(+), 3 deletions(-) + +--- a/drivers/net/usb/r8152.c ++++ b/drivers/net/usb/r8152.c +@@ -3336,7 +3336,7 @@ static void rtl8153b_runtime_enable(stru + r8153b_ups_en(tp, false); + r8153_queue_wake(tp, false); + rtl_runtime_suspend_enable(tp, false); +- if (tp->udev->speed != USB_SPEED_HIGH) ++ if (tp->udev->speed >= USB_SPEED_SUPER) + r8153b_u1u2en(tp, true); + } + } +@@ -5029,7 +5029,7 @@ static void rtl8153b_up(struct r8152 *tp + + r8153_aldps_en(tp, true); + +- if (tp->udev->speed != USB_SPEED_HIGH) ++ if (tp->udev->speed >= USB_SPEED_SUPER) + r8153b_u1u2en(tp, true); + } + +@@ -5571,8 +5571,9 @@ static void r8153b_init(struct r8152 *tp + ocp_data |= POLL_LINK_CHG; + ocp_write_word(tp, MCU_TYPE_PLA, PLA_EXTRA_STATUS, ocp_data); + +- if (tp->udev->speed != USB_SPEED_HIGH) ++ if (tp->udev->speed >= USB_SPEED_SUPER) + r8153b_u1u2en(tp, true); ++ + usb_enable_lpm(tp->udev); + + /* MAC clock speed down */ diff --git a/target/linux/generic/backport-5.15/787-v5.12-r8152-check-if-the-pointer-of-the-function-exists.patch b/target/linux/generic/backport-5.15/787-v5.12-r8152-check-if-the-pointer-of-the-function-exists.patch new file mode 100644 index 0000000000..5bc4e2b0a1 --- /dev/null +++ b/target/linux/generic/backport-5.15/787-v5.12-r8152-check-if-the-pointer-of-the-function-exists.patch @@ -0,0 +1,51 @@ +From e78b75f5be204a0a235da995d01c778dc282bb42 Mon Sep 17 00:00:00 2001 +From: Hayes Wang <hayeswang@realtek.com> +Date: Fri, 19 Feb 2021 17:04:41 +0800 +Subject: [PATCH] r8152: check if the pointer of the function exists + +commit c79515e47935c747282c6ed2ee5b2ef039756eeb upstream. + +Return error code if autosuspend_en, eee_get, or eee_set don't exist. + +Signed-off-by: Hayes Wang <hayeswang@realtek.com> +Signed-off-by: Jakub Kicinski <kuba@kernel.org> +--- + drivers/net/usb/r8152.c | 13 +++++++++++++ + 1 file changed, 13 insertions(+) + +--- a/drivers/net/usb/r8152.c ++++ b/drivers/net/usb/r8152.c +@@ -5758,6 +5758,9 @@ static int rtl8152_runtime_suspend(struc + struct net_device *netdev = tp->netdev; + int ret = 0; + ++ if (!tp->rtl_ops.autosuspend_en) ++ return -EBUSY; ++ + set_bit(SELECTIVE_SUSPEND, &tp->flags); + smp_mb__after_atomic(); + +@@ -6157,6 +6160,11 @@ rtl_ethtool_get_eee(struct net_device *n + struct r8152 *tp = netdev_priv(net); + int ret; + ++ if (!tp->rtl_ops.eee_get) { ++ ret = -EOPNOTSUPP; ++ goto out; ++ } ++ + ret = usb_autopm_get_interface(tp->intf); + if (ret < 0) + goto out; +@@ -6179,6 +6187,11 @@ rtl_ethtool_set_eee(struct net_device *n + struct r8152 *tp = netdev_priv(net); + int ret; + ++ if (!tp->rtl_ops.eee_set) { ++ ret = -EOPNOTSUPP; ++ goto out; ++ } ++ + ret = usb_autopm_get_interface(tp->intf); + if (ret < 0) + goto out; diff --git a/target/linux/generic/backport-5.15/788-v5.12-r8152-replace-netif_err-with-dev_err.patch b/target/linux/generic/backport-5.15/788-v5.12-r8152-replace-netif_err-with-dev_err.patch new file mode 100644 index 0000000000..967b4ccc16 --- /dev/null +++ b/target/linux/generic/backport-5.15/788-v5.12-r8152-replace-netif_err-with-dev_err.patch @@ -0,0 +1,36 @@ +From 38e44c7926512cff0b2809dc329de2a8e769e523 Mon Sep 17 00:00:00 2001 +From: Hayes Wang <hayeswang@realtek.com> +Date: Fri, 19 Feb 2021 17:04:42 +0800 +Subject: [PATCH] r8152: replace netif_err with dev_err + +commit 156c3207611262266f0eea589ac3f00c5657320e upstream. + +Some messages are before calling register_netdev(), so replace +netif_err() with dev_err(). + +Signed-off-by: Hayes Wang <hayeswang@realtek.com> +Signed-off-by: Jakub Kicinski <kuba@kernel.org> +--- + drivers/net/usb/r8152.c | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +--- a/drivers/net/usb/r8152.c ++++ b/drivers/net/usb/r8152.c +@@ -6594,7 +6594,7 @@ static int rtl_ops_init(struct r8152 *tp + + default: + ret = -ENODEV; +- netif_err(tp, probe, tp->netdev, "Unknown Device\n"); ++ dev_err(&tp->intf->dev, "Unknown Device\n"); + break; + } + +@@ -6851,7 +6851,7 @@ static int rtl8152_probe(struct usb_inte + + ret = register_netdev(netdev); + if (ret != 0) { +- netif_err(tp, probe, netdev, "couldn't register the device\n"); ++ dev_err(&intf->dev, "couldn't register the device\n"); + goto out1; + } + diff --git a/target/linux/generic/backport-5.15/789-v5.12-r8152-spilt-rtl_set_eee_plus-and-r8153b_green_en.patch b/target/linux/generic/backport-5.15/789-v5.12-r8152-spilt-rtl_set_eee_plus-and-r8153b_green_en.patch new file mode 100644 index 0000000000..c49a4ce5b6 --- /dev/null +++ b/target/linux/generic/backport-5.15/789-v5.12-r8152-spilt-rtl_set_eee_plus-and-r8153b_green_en.patch @@ -0,0 +1,89 @@ +From 260814de2d6cb958767785ffcb2e76915d1be32b Mon Sep 17 00:00:00 2001 +From: Hayes Wang <hayeswang@realtek.com> +Date: Fri, 19 Feb 2021 17:04:43 +0800 +Subject: [PATCH] r8152: spilt rtl_set_eee_plus and r8153b_green_en + +commit 40fa7568ac230446d888b7ad402cff9e20fe3ad5 upstream. + +Add rtl_eee_plus_en() and rtl_green_en(). + +Signed-off-by: Hayes Wang <hayeswang@realtek.com> +Signed-off-by: Jakub Kicinski <kuba@kernel.org> +--- + drivers/net/usb/r8152.c | 43 ++++++++++++++++++++++++++--------------- + 1 file changed, 27 insertions(+), 16 deletions(-) + +--- a/drivers/net/usb/r8152.c ++++ b/drivers/net/usb/r8152.c +@@ -2633,21 +2633,24 @@ static inline u8 rtl8152_get_speed(struc + return ocp_read_byte(tp, MCU_TYPE_PLA, PLA_PHYSTATUS); + } + +-static void rtl_set_eee_plus(struct r8152 *tp) ++static void rtl_eee_plus_en(struct r8152 *tp, bool enable) + { + u32 ocp_data; +- u8 speed; + +- speed = rtl8152_get_speed(tp); +- if (speed & _10bps) { +- ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_EEEP_CR); ++ ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_EEEP_CR); ++ if (enable) + ocp_data |= EEEP_CR_EEEP_TX; +- ocp_write_word(tp, MCU_TYPE_PLA, PLA_EEEP_CR, ocp_data); +- } else { +- ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_EEEP_CR); ++ else + ocp_data &= ~EEEP_CR_EEEP_TX; +- ocp_write_word(tp, MCU_TYPE_PLA, PLA_EEEP_CR, ocp_data); +- } ++ ocp_write_word(tp, MCU_TYPE_PLA, PLA_EEEP_CR, ocp_data); ++} ++ ++static void rtl_set_eee_plus(struct r8152 *tp) ++{ ++ if (rtl8152_get_speed(tp) & _10bps) ++ rtl_eee_plus_en(tp, true); ++ else ++ rtl_eee_plus_en(tp, false); + } + + static void rxdy_gated_en(struct r8152 *tp, bool enable) +@@ -3128,10 +3131,22 @@ static void r8153b_ups_flags(struct r815 + ocp_write_dword(tp, MCU_TYPE_USB, USB_UPS_FLAGS, ups_flags); + } + +-static void r8153b_green_en(struct r8152 *tp, bool enable) ++static void rtl_green_en(struct r8152 *tp, bool enable) + { + u16 data; + ++ data = sram_read(tp, SRAM_GREEN_CFG); ++ if (enable) ++ data |= GREEN_ETH_EN; ++ else ++ data &= ~GREEN_ETH_EN; ++ sram_write(tp, SRAM_GREEN_CFG, data); ++ ++ tp->ups_info.green = enable; ++} ++ ++static void r8153b_green_en(struct r8152 *tp, bool enable) ++{ + if (enable) { + sram_write(tp, 0x8045, 0); /* 10M abiq&ldvbias */ + sram_write(tp, 0x804d, 0x1222); /* 100M short abiq&ldvbias */ +@@ -3142,11 +3157,7 @@ static void r8153b_green_en(struct r8152 + sram_write(tp, 0x805d, 0x2444); /* 1000M short abiq&ldvbias */ + } + +- data = sram_read(tp, SRAM_GREEN_CFG); +- data |= GREEN_ETH_EN; +- sram_write(tp, SRAM_GREEN_CFG, data); +- +- tp->ups_info.green = enable; ++ rtl_green_en(tp, true); + } + + static u16 r8153_phy_status(struct r8152 *tp, u16 desired) diff --git a/target/linux/generic/backport-5.15/790-v5.13-r8152-set-inter-fram-gap-time-depending-on-speed.patch b/target/linux/generic/backport-5.15/790-v5.13-r8152-set-inter-fram-gap-time-depending-on-speed.patch new file mode 100644 index 0000000000..f2593e8f00 --- /dev/null +++ b/target/linux/generic/backport-5.15/790-v5.13-r8152-set-inter-fram-gap-time-depending-on-speed.patch @@ -0,0 +1,75 @@ +From f1bbbb260a8016373adf239c716d2da90e6ced0b Mon Sep 17 00:00:00 2001 +From: Hayes Wang <hayeswang@realtek.com> +Date: Fri, 16 Apr 2021 16:04:32 +0800 +Subject: [PATCH] r8152: set inter fram gap time depending on speed + +commit 5133bcc7481528e36fff0a3b056601efb704fb32 upstream. + +Set the maximum inter frame gap time (144ns) for speed 10M/half and +100M/half. It improves the performance for those speeds. And, there +is no effect for the other speeds. + +For 10M/half and 100M/half, the fast inter frame gap time let the +device couldn't use the feature of the aggregation effectively, +because the transfer would be completed fastly. Therefore, use the +maximum value to improve the effect of the aggregation. However, you +may not feel the improvement for fast CPUs, because they compensate +for the effect of the aggregation. + +Signed-off-by: Hayes Wang <hayeswang@realtek.com> +Signed-off-by: David S. Miller <davem@davemloft.net> +--- + drivers/net/usb/r8152.c | 28 ++++++++++++++++++++++++++++ + 1 file changed, 28 insertions(+) + +--- a/drivers/net/usb/r8152.c ++++ b/drivers/net/usb/r8152.c +@@ -250,6 +250,9 @@ + + /* PLA_TCR1 */ + #define VERSION_MASK 0x7cf0 ++#define IFG_MASK (BIT(3) | BIT(9) | BIT(8)) ++#define IFG_144NS BIT(9) ++#define IFG_96NS (BIT(9) | BIT(8)) + + /* PLA_MTPS */ + #define MTPS_JUMBO (12 * 1024 / 64) +@@ -2748,6 +2751,29 @@ static int rtl_stop_rx(struct r8152 *tp) + return 0; + } + ++static void rtl_set_ifg(struct r8152 *tp, u16 speed) ++{ ++ u32 ocp_data; ++ ++ ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_TCR1); ++ ocp_data &= ~IFG_MASK; ++ if ((speed & (_10bps | _100bps)) && !(speed & FULL_DUP)) { ++ ocp_data |= IFG_144NS; ++ ocp_write_word(tp, MCU_TYPE_PLA, PLA_TCR1, ocp_data); ++ ++ ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL4); ++ ocp_data &= ~TX10MIDLE_EN; ++ ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL4, ocp_data); ++ } else { ++ ocp_data |= IFG_96NS; ++ ocp_write_word(tp, MCU_TYPE_PLA, PLA_TCR1, ocp_data); ++ ++ ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL4); ++ ocp_data |= TX10MIDLE_EN; ++ ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL4, ocp_data); ++ } ++} ++ + static inline void r8153b_rx_agg_chg_indicate(struct r8152 *tp) + { + ocp_write_byte(tp, MCU_TYPE_USB, USB_UPT_RXDMA_OWN, +@@ -2851,6 +2877,8 @@ static int rtl8153_enable(struct r8152 * + r8153_set_rx_early_timeout(tp); + r8153_set_rx_early_size(tp); + ++ rtl_set_ifg(tp, rtl8152_get_speed(tp)); ++ + if (tp->version == RTL_VER_09) { + u32 ocp_data; + diff --git a/target/linux/generic/backport-5.15/791-v5.13-r8152-adjust-rtl8152_check_firmware-function.patch b/target/linux/generic/backport-5.15/791-v5.13-r8152-adjust-rtl8152_check_firmware-function.patch new file mode 100644 index 0000000000..cddd39ea94 --- /dev/null +++ b/target/linux/generic/backport-5.15/791-v5.13-r8152-adjust-rtl8152_check_firmware-function.patch @@ -0,0 +1,152 @@ +From f10c9edf47d3fa240d965e151a48c670f5035b73 Mon Sep 17 00:00:00 2001 +From: Hayes Wang <hayeswang@realtek.com> +Date: Fri, 16 Apr 2021 16:04:33 +0800 +Subject: [PATCH] r8152: adjust rtl8152_check_firmware function + +commit a8a7be178e81a3d4b6972cbeb0ccd091ca2f9f89 upstream. + +Use bits operations to record and check the firmware. + +Signed-off-by: Hayes Wang <hayeswang@realtek.com> +Signed-off-by: David S. Miller <davem@davemloft.net> +--- + drivers/net/usb/r8152.c | 51 +++++++++++++++++++++++------------------ + 1 file changed, 29 insertions(+), 22 deletions(-) + +--- a/drivers/net/usb/r8152.c ++++ b/drivers/net/usb/r8152.c +@@ -875,6 +875,14 @@ struct fw_header { + struct fw_block blocks[]; + } __packed; + ++enum rtl8152_fw_flags { ++ FW_FLAGS_USB = 0, ++ FW_FLAGS_PLA, ++ FW_FLAGS_START, ++ FW_FLAGS_STOP, ++ FW_FLAGS_NC, ++}; ++ + /** + * struct fw_mac - a firmware block used by RTL_FW_PLA and RTL_FW_USB. + * The layout of the firmware block is: +@@ -3801,10 +3809,7 @@ static long rtl8152_check_firmware(struc + { + const struct firmware *fw = rtl_fw->fw; + struct fw_header *fw_hdr = (struct fw_header *)fw->data; +- struct fw_mac *pla = NULL, *usb = NULL; +- struct fw_phy_patch_key *start = NULL; +- struct fw_phy_nc *phy_nc = NULL; +- struct fw_block *stop = NULL; ++ unsigned long fw_flags = 0; + long ret = -EFAULT; + int i; + +@@ -3833,50 +3838,52 @@ static long rtl8152_check_firmware(struc + goto fail; + goto fw_end; + case RTL_FW_PLA: +- if (pla) { ++ if (test_bit(FW_FLAGS_PLA, &fw_flags)) { + dev_err(&tp->intf->dev, + "multiple PLA firmware encountered"); + goto fail; + } + +- pla = (struct fw_mac *)block; +- if (!rtl8152_is_fw_mac_ok(tp, pla)) { ++ if (!rtl8152_is_fw_mac_ok(tp, (struct fw_mac *)block)) { + dev_err(&tp->intf->dev, + "check PLA firmware failed\n"); + goto fail; + } ++ __set_bit(FW_FLAGS_PLA, &fw_flags); + break; + case RTL_FW_USB: +- if (usb) { ++ if (test_bit(FW_FLAGS_USB, &fw_flags)) { + dev_err(&tp->intf->dev, + "multiple USB firmware encountered"); + goto fail; + } + +- usb = (struct fw_mac *)block; +- if (!rtl8152_is_fw_mac_ok(tp, usb)) { ++ if (!rtl8152_is_fw_mac_ok(tp, (struct fw_mac *)block)) { + dev_err(&tp->intf->dev, + "check USB firmware failed\n"); + goto fail; + } ++ __set_bit(FW_FLAGS_USB, &fw_flags); + break; + case RTL_FW_PHY_START: +- if (start || phy_nc || stop) { ++ if (test_bit(FW_FLAGS_START, &fw_flags) || ++ test_bit(FW_FLAGS_NC, &fw_flags) || ++ test_bit(FW_FLAGS_STOP, &fw_flags)) { + dev_err(&tp->intf->dev, + "check PHY_START fail\n"); + goto fail; + } + +- if (__le32_to_cpu(block->length) != sizeof(*start)) { ++ if (__le32_to_cpu(block->length) != sizeof(struct fw_phy_patch_key)) { + dev_err(&tp->intf->dev, + "Invalid length for PHY_START\n"); + goto fail; + } +- +- start = (struct fw_phy_patch_key *)block; ++ __set_bit(FW_FLAGS_START, &fw_flags); + break; + case RTL_FW_PHY_STOP: +- if (stop || !start) { ++ if (test_bit(FW_FLAGS_STOP, &fw_flags) || ++ !test_bit(FW_FLAGS_START, &fw_flags)) { + dev_err(&tp->intf->dev, + "Check PHY_STOP fail\n"); + goto fail; +@@ -3887,28 +3894,28 @@ static long rtl8152_check_firmware(struc + "Invalid length for PHY_STOP\n"); + goto fail; + } +- +- stop = block; ++ __set_bit(FW_FLAGS_STOP, &fw_flags); + break; + case RTL_FW_PHY_NC: +- if (!start || stop) { ++ if (!test_bit(FW_FLAGS_START, &fw_flags) || ++ test_bit(FW_FLAGS_STOP, &fw_flags)) { + dev_err(&tp->intf->dev, + "check PHY_NC fail\n"); + goto fail; + } + +- if (phy_nc) { ++ if (test_bit(FW_FLAGS_NC, &fw_flags)) { + dev_err(&tp->intf->dev, + "multiple PHY NC encountered\n"); + goto fail; + } + +- phy_nc = (struct fw_phy_nc *)block; +- if (!rtl8152_is_fw_phy_nc_ok(tp, phy_nc)) { ++ if (!rtl8152_is_fw_phy_nc_ok(tp, (struct fw_phy_nc *)block)) { + dev_err(&tp->intf->dev, + "check PHY NC firmware failed\n"); + goto fail; + } ++ __set_bit(FW_FLAGS_NC, &fw_flags); + + break; + default: +@@ -3922,7 +3929,7 @@ static long rtl8152_check_firmware(struc + } + + fw_end: +- if ((phy_nc || start) && !stop) { ++ if (test_bit(FW_FLAGS_START, &fw_flags) && !test_bit(FW_FLAGS_STOP, &fw_flags)) { + dev_err(&tp->intf->dev, "without PHY_STOP\n"); + goto fail; + } diff --git a/target/linux/generic/backport-5.15/792-v5.13-r8152-add-help-function-to-change-mtu.patch b/target/linux/generic/backport-5.15/792-v5.13-r8152-add-help-function-to-change-mtu.patch new file mode 100644 index 0000000000..3559e3b95a --- /dev/null +++ b/target/linux/generic/backport-5.15/792-v5.13-r8152-add-help-function-to-change-mtu.patch @@ -0,0 +1,157 @@ +From f010a7d51cbb42bdb956f0a28b8868b15d7a3816 Mon Sep 17 00:00:00 2001 +From: Hayes Wang <hayeswang@realtek.com> +Date: Fri, 16 Apr 2021 16:04:34 +0800 +Subject: [PATCH] r8152: add help function to change mtu + +commit 67ce1a806f164e59a074fea8809725d3411eaa20 upstream. + +The different chips may have different requests when changing mtu. +Therefore, add a new help function of rtl_ops to change mtu. Besides, +reset the tx/rx after changing mtu. + +Additionally, add mtu_to_size() and size_to_mtu() macros to simplify +the code. + +Signed-off-by: Hayes Wang <hayeswang@realtek.com> +Signed-off-by: David S. Miller <davem@davemloft.net> +--- + drivers/net/usb/r8152.c | 53 ++++++++++++++++++++++++----------------- + 1 file changed, 31 insertions(+), 22 deletions(-) + +--- a/drivers/net/usb/r8152.c ++++ b/drivers/net/usb/r8152.c +@@ -658,15 +658,13 @@ enum rtl_register_content { + + #define INTR_LINK 0x0004 + +-#define RTL8153_MAX_PACKET 9216 /* 9K */ +-#define RTL8153_MAX_MTU (RTL8153_MAX_PACKET - VLAN_ETH_HLEN - \ +- ETH_FCS_LEN) + #define RTL8152_RMS (VLAN_ETH_FRAME_LEN + ETH_FCS_LEN) + #define RTL8153_RMS RTL8153_MAX_PACKET + #define RTL8152_TX_TIMEOUT (5 * HZ) + #define RTL8152_NAPI_WEIGHT 64 +-#define rx_reserved_size(x) ((x) + VLAN_ETH_HLEN + ETH_FCS_LEN + \ +- sizeof(struct rx_desc) + RX_ALIGN) ++#define mtu_to_size(m) ((m) + VLAN_ETH_HLEN + ETH_FCS_LEN) ++#define size_to_mtu(s) ((s) - VLAN_ETH_HLEN - ETH_FCS_LEN) ++#define rx_reserved_size(x) (mtu_to_size(x) + sizeof(struct rx_desc) + RX_ALIGN) + + /* rtl8152 flags */ + enum rtl8152_flags { +@@ -796,6 +794,7 @@ struct r8152 { + bool (*in_nway)(struct r8152 *tp); + void (*hw_phy_cfg)(struct r8152 *tp); + void (*autosuspend_en)(struct r8152 *tp, bool enable); ++ void (*change_mtu)(struct r8152 *tp); + } rtl_ops; + + struct ups_info { +@@ -1022,8 +1021,7 @@ enum tx_csum_stat { + static const int multicast_filter_limit = 32; + static unsigned int agg_buf_sz = 16384; + +-#define RTL_LIMITED_TSO_SIZE (agg_buf_sz - sizeof(struct tx_desc) - \ +- VLAN_ETH_HLEN - ETH_FCS_LEN) ++#define RTL_LIMITED_TSO_SIZE (size_to_mtu(agg_buf_sz) - sizeof(struct tx_desc)) + + static + int get_registers(struct r8152 *tp, u16 value, u16 index, u16 size, void *data) +@@ -2633,10 +2631,7 @@ static void rtl8152_nic_reset(struct r81 + + static void set_tx_qlen(struct r8152 *tp) + { +- struct net_device *netdev = tp->netdev; +- +- tp->tx_qlen = agg_buf_sz / (netdev->mtu + VLAN_ETH_HLEN + ETH_FCS_LEN + +- sizeof(struct tx_desc)); ++ tp->tx_qlen = agg_buf_sz / (mtu_to_size(tp->netdev->mtu) + sizeof(struct tx_desc)); + } + + static inline u8 rtl8152_get_speed(struct r8152 *tp) +@@ -4725,6 +4720,12 @@ static void r8153b_hw_phy_cfg(struct r81 + set_bit(PHY_RESET, &tp->flags); + } + ++static void rtl8153_change_mtu(struct r8152 *tp) ++{ ++ ocp_write_word(tp, MCU_TYPE_PLA, PLA_RMS, mtu_to_size(tp->netdev->mtu)); ++ ocp_write_byte(tp, MCU_TYPE_PLA, PLA_MTPS, MTPS_JUMBO); ++} ++ + static void r8153_first_init(struct r8152 *tp) + { + u32 ocp_data; +@@ -4757,9 +4758,7 @@ static void r8153_first_init(struct r815 + + rtl_rx_vlan_en(tp, tp->netdev->features & NETIF_F_HW_VLAN_CTAG_RX); + +- ocp_data = tp->netdev->mtu + VLAN_ETH_HLEN + ETH_FCS_LEN; +- ocp_write_word(tp, MCU_TYPE_PLA, PLA_RMS, ocp_data); +- ocp_write_byte(tp, MCU_TYPE_PLA, PLA_MTPS, MTPS_JUMBO); ++ rtl8153_change_mtu(tp); + + ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_TCR0); + ocp_data |= TCR0_AUTO_FIFO; +@@ -4794,8 +4793,7 @@ static void r8153_enter_oob(struct r8152 + + wait_oob_link_list_ready(tp); + +- ocp_data = tp->netdev->mtu + VLAN_ETH_HLEN + ETH_FCS_LEN; +- ocp_write_word(tp, MCU_TYPE_PLA, PLA_RMS, ocp_data); ++ ocp_write_word(tp, MCU_TYPE_PLA, PLA_RMS, mtu_to_size(tp->netdev->mtu)); + + switch (tp->version) { + case RTL_VER_03: +@@ -6518,12 +6516,21 @@ static int rtl8152_change_mtu(struct net + dev->mtu = new_mtu; + + if (netif_running(dev)) { +- u32 rms = new_mtu + VLAN_ETH_HLEN + ETH_FCS_LEN; +- +- ocp_write_word(tp, MCU_TYPE_PLA, PLA_RMS, rms); ++ if (tp->rtl_ops.change_mtu) ++ tp->rtl_ops.change_mtu(tp); + +- if (netif_carrier_ok(dev)) +- r8153_set_rx_early_size(tp); ++ if (netif_carrier_ok(dev)) { ++ netif_stop_queue(dev); ++ napi_disable(&tp->napi); ++ tasklet_disable(&tp->tx_tl); ++ tp->rtl_ops.disable(tp); ++ tp->rtl_ops.enable(tp); ++ rtl_start_rx(tp); ++ tasklet_enable(&tp->tx_tl); ++ napi_enable(&tp->napi); ++ rtl8152_set_rx_mode(dev); ++ netif_wake_queue(dev); ++ } + } + + mutex_unlock(&tp->control); +@@ -6612,6 +6619,7 @@ static int rtl_ops_init(struct r8152 *tp + ops->in_nway = rtl8153_in_nway; + ops->hw_phy_cfg = r8153_hw_phy_cfg; + ops->autosuspend_en = rtl8153_runtime_enable; ++ ops->change_mtu = rtl8153_change_mtu; + if (tp->udev->speed < USB_SPEED_SUPER) + tp->rx_buf_sz = 16 * 1024; + else +@@ -6633,6 +6641,7 @@ static int rtl_ops_init(struct r8152 *tp + ops->in_nway = rtl8153_in_nway; + ops->hw_phy_cfg = r8153b_hw_phy_cfg; + ops->autosuspend_en = rtl8153b_runtime_enable; ++ ops->change_mtu = rtl8153_change_mtu; + tp->rx_buf_sz = 32 * 1024; + tp->eee_en = true; + tp->eee_adv = MDIO_EEE_1000T | MDIO_EEE_100TX; +@@ -6853,7 +6862,7 @@ static int rtl8152_probe(struct usb_inte + netdev->max_mtu = ETH_DATA_LEN; + break; + default: +- netdev->max_mtu = RTL8153_MAX_MTU; ++ netdev->max_mtu = size_to_mtu(9 * 1024); + break; + } + diff --git a/target/linux/generic/backport-5.15/793-v5.13-r8152-support-new-chips.patch b/target/linux/generic/backport-5.15/793-v5.13-r8152-support-new-chips.patch new file mode 100644 index 0000000000..06a141b451 --- /dev/null +++ b/target/linux/generic/backport-5.15/793-v5.13-r8152-support-new-chips.patch @@ -0,0 +1,2886 @@ +From e7439e7fd384f55f55837f7e4866e74d8dca3827 Mon Sep 17 00:00:00 2001 +From: Hayes Wang <hayeswang@realtek.com> +Date: Fri, 16 Apr 2021 16:04:35 +0800 +Subject: [PATCH] r8152: support new chips + +commit 195aae321c829dd1945900d75561e6aa79cce208 upstream. + +Support RTL8153C, RTL8153D, RTL8156A, and RTL8156B. The RTL8156A +and RTL8156B are the 2.5G ethernet. + +Signed-off-by: Hayes Wang <hayeswang@realtek.com> +Signed-off-by: David S. Miller <davem@davemloft.net> +--- + drivers/net/usb/r8152.c | 2634 +++++++++++++++++++++++++++++++++++---- + 1 file changed, 2359 insertions(+), 275 deletions(-) + +--- a/drivers/net/usb/r8152.c ++++ b/drivers/net/usb/r8152.c +@@ -44,10 +44,14 @@ + + #define PLA_IDR 0xc000 + #define PLA_RCR 0xc010 ++#define PLA_RCR1 0xc012 + #define PLA_RMS 0xc016 + #define PLA_RXFIFO_CTRL0 0xc0a0 ++#define PLA_RXFIFO_FULL 0xc0a2 + #define PLA_RXFIFO_CTRL1 0xc0a4 ++#define PLA_RX_FIFO_FULL 0xc0a6 + #define PLA_RXFIFO_CTRL2 0xc0a8 ++#define PLA_RX_FIFO_EMPTY 0xc0aa + #define PLA_DMY_REG0 0xc0b0 + #define PLA_FMC 0xc0b4 + #define PLA_CFG_WOL 0xc0b6 +@@ -64,6 +68,8 @@ + #define PLA_MACDBG_PRE 0xd38c /* RTL_VER_04 only */ + #define PLA_MACDBG_POST 0xd38e /* RTL_VER_04 only */ + #define PLA_EXTRA_STATUS 0xd398 ++#define PLA_GPHY_CTRL 0xd3ae ++#define PLA_POL_GPIO_CTRL 0xdc6a + #define PLA_EFUSE_DATA 0xdd00 + #define PLA_EFUSE_CMD 0xdd02 + #define PLA_LEDSEL 0xdd90 +@@ -73,6 +79,8 @@ + #define PLA_LWAKE_CTRL_REG 0xe007 + #define PLA_GPHY_INTR_IMR 0xe022 + #define PLA_EEE_CR 0xe040 ++#define PLA_EEE_TXTWSYS 0xe04c ++#define PLA_EEE_TXTWSYS_2P5G 0xe058 + #define PLA_EEEP_CR 0xe080 + #define PLA_MAC_PWR_CTRL 0xe0c0 + #define PLA_MAC_PWR_CTRL2 0xe0ca +@@ -83,6 +91,7 @@ + #define PLA_TCR1 0xe612 + #define PLA_MTPS 0xe615 + #define PLA_TXFIFO_CTRL 0xe618 ++#define PLA_TXFIFO_FULL 0xe61a + #define PLA_RSTTALLY 0xe800 + #define PLA_CR 0xe813 + #define PLA_CRWECR 0xe81c +@@ -99,6 +108,7 @@ + #define PLA_SFF_STS_7 0xe8de + #define PLA_PHYSTATUS 0xe908 + #define PLA_CONFIG6 0xe90a /* CONFIG6 */ ++#define PLA_USB_CFG 0xe952 + #define PLA_BP_BA 0xfc26 + #define PLA_BP_0 0xfc28 + #define PLA_BP_1 0xfc2a +@@ -113,6 +123,7 @@ + #define USB_USB2PHY 0xb41e + #define USB_SSPHYLINK1 0xb426 + #define USB_SSPHYLINK2 0xb428 ++#define USB_L1_CTRL 0xb45e + #define USB_U2P3_CTRL 0xb460 + #define USB_CSR_DUMMY1 0xb464 + #define USB_CSR_DUMMY2 0xb466 +@@ -123,7 +134,12 @@ + #define USB_FW_FIX_EN0 0xcfca + #define USB_FW_FIX_EN1 0xcfcc + #define USB_LPM_CONFIG 0xcfd8 ++#define USB_ECM_OPTION 0xcfee + #define USB_CSTMR 0xcfef /* RTL8153A */ ++#define USB_MISC_2 0xcfff ++#define USB_ECM_OP 0xd26b ++#define USB_GPHY_CTRL 0xd284 ++#define USB_SPEED_OPTION 0xd32a + #define USB_FW_CTRL 0xd334 /* RTL8153B */ + #define USB_FC_TIMER 0xd340 + #define USB_USB_CTRL 0xd406 +@@ -137,16 +153,20 @@ + #define USB_RX_EXTRA_AGGR_TMR 0xd432 /* RTL8153B */ + #define USB_TX_DMA 0xd434 + #define USB_UPT_RXDMA_OWN 0xd437 ++#define USB_UPHY3_MDCMDIO 0xd480 + #define USB_TOLERANCE 0xd490 + #define USB_LPM_CTRL 0xd41a + #define USB_BMU_RESET 0xd4b0 ++#define USB_BMU_CONFIG 0xd4b4 + #define USB_U1U2_TIMER 0xd4da + #define USB_FW_TASK 0xd4e8 /* RTL8153B */ ++#define USB_RX_AGGR_NUM 0xd4ee + #define USB_UPS_CTRL 0xd800 + #define USB_POWER_CUT 0xd80a + #define USB_MISC_0 0xd81a + #define USB_MISC_1 0xd81f + #define USB_AFE_CTRL2 0xd824 ++#define USB_UPHY_XTAL 0xd826 + #define USB_UPS_CFG 0xd842 + #define USB_UPS_FLAGS 0xd848 + #define USB_WDT1_CTRL 0xe404 +@@ -189,6 +209,9 @@ + #define OCP_EEE_ABLE 0xa5c4 + #define OCP_EEE_ADV 0xa5d0 + #define OCP_EEE_LPABLE 0xa5d2 ++#define OCP_10GBT_CTRL 0xa5d4 ++#define OCP_10GBT_STAT 0xa5d6 ++#define OCP_EEE_ADV2 0xa6d4 + #define OCP_PHY_STATE 0xa708 /* nway state for 8153 */ + #define OCP_PHY_PATCH_STAT 0xb800 + #define OCP_PHY_PATCH_CMD 0xb820 +@@ -200,6 +223,7 @@ + /* SRAM Register */ + #define SRAM_GREEN_CFG 0x8011 + #define SRAM_LPF_CFG 0x8012 ++#define SRAM_GPHY_FW_VER 0x801e + #define SRAM_10M_AMP1 0x8080 + #define SRAM_10M_AMP2 0x8082 + #define SRAM_IMPEDANCE 0x8084 +@@ -211,11 +235,19 @@ + #define RCR_AM 0x00000004 + #define RCR_AB 0x00000008 + #define RCR_ACPT_ALL (RCR_AAP | RCR_APM | RCR_AM | RCR_AB) ++#define SLOT_EN BIT(11) ++ ++/* PLA_RCR1 */ ++#define OUTER_VLAN BIT(7) ++#define INNER_VLAN BIT(6) + + /* PLA_RXFIFO_CTRL0 */ + #define RXFIFO_THR1_NORMAL 0x00080002 + #define RXFIFO_THR1_OOB 0x01800003 + ++/* PLA_RXFIFO_FULL */ ++#define RXFIFO_FULL_MASK 0xfff ++ + /* PLA_RXFIFO_CTRL1 */ + #define RXFIFO_THR2_FULL 0x00000060 + #define RXFIFO_THR2_HIGH 0x00000038 +@@ -286,6 +318,7 @@ + #define MCU_BORW_EN 0x4000 + + /* PLA_CPCR */ ++#define FLOW_CTRL_EN BIT(0) + #define CPCR_RX_VLAN 0x0040 + + /* PLA_CFG_WOL */ +@@ -311,6 +344,10 @@ + /* PLA_CONFIG6 */ + #define LANWAKE_CLR_EN BIT(0) + ++/* PLA_USB_CFG */ ++#define EN_XG_LIP BIT(1) ++#define EN_G_LIP BIT(2) ++ + /* PLA_CONFIG5 */ + #define BWF_EN 0x0040 + #define MWF_EN 0x0020 +@@ -334,6 +371,7 @@ + /* PLA_MAC_PWR_CTRL2 */ + #define EEE_SPDWN_RATIO 0x8007 + #define MAC_CLK_SPDWN_EN BIT(15) ++#define EEE_SPDWN_RATIO_MASK 0xff + + /* PLA_MAC_PWR_CTRL3 */ + #define PLA_MCU_SPDWN_EN BIT(14) +@@ -346,6 +384,7 @@ + #define PWRSAVE_SPDWN_EN 0x1000 + #define RXDV_SPDWN_EN 0x0800 + #define TX10MIDLE_EN 0x0100 ++#define IDLE_SPDWN_EN BIT(6) + #define TP100_SPDWN_EN 0x0020 + #define TP500_SPDWN_EN 0x0010 + #define TP1000_SPDWN_EN 0x0008 +@@ -386,6 +425,13 @@ + #define LINK_CHANGE_FLAG BIT(8) + #define POLL_LINK_CHG BIT(0) + ++/* PLA_GPHY_CTRL */ ++#define GPHY_FLASH BIT(1) ++ ++/* PLA_POL_GPIO_CTRL */ ++#define DACK_DET_EN BIT(15) ++#define POL_GPHY_PATCH BIT(4) ++ + /* USB_USB2PHY */ + #define USB2PHY_SUSPEND 0x0001 + #define USB2PHY_L1 0x0002 +@@ -434,6 +480,9 @@ + #define BMU_RESET_EP_IN 0x01 + #define BMU_RESET_EP_OUT 0x02 + ++/* USB_BMU_CONFIG */ ++#define ACT_ODMA BIT(1) ++ + /* USB_UPT_RXDMA_OWN */ + #define OWN_UPDATE BIT(0) + #define OWN_CLEAR BIT(1) +@@ -441,27 +490,52 @@ + /* USB_FW_TASK */ + #define FC_PATCH_TASK BIT(1) + ++/* USB_RX_AGGR_NUM */ ++#define RX_AGGR_NUM_MASK 0x1ff ++ + /* USB_UPS_CTRL */ + #define POWER_CUT 0x0100 + + /* USB_PM_CTRL_STATUS */ + #define RESUME_INDICATE 0x0001 + ++/* USB_ECM_OPTION */ ++#define BYPASS_MAC_RESET BIT(5) ++ + /* USB_CSTMR */ + #define FORCE_SUPER BIT(0) + ++/* USB_MISC_2 */ ++#define UPS_FORCE_PWR_DOWN BIT(0) ++ ++/* USB_ECM_OP */ ++#define EN_ALL_SPEED BIT(0) ++ ++/* USB_GPHY_CTRL */ ++#define GPHY_PATCH_DONE BIT(2) ++#define BYPASS_FLASH BIT(5) ++#define BACKUP_RESTRORE BIT(6) ++ ++/* USB_SPEED_OPTION */ ++#define RG_PWRDN_EN BIT(8) ++#define ALL_SPEED_OFF BIT(9) ++ + /* USB_FW_CTRL */ + #define FLOW_CTRL_PATCH_OPT BIT(1) ++#define AUTO_SPEEDUP BIT(3) ++#define FLOW_CTRL_PATCH_2 BIT(8) + + /* USB_FC_TIMER */ + #define CTRL_TIMER_EN BIT(15) + + /* USB_USB_CTRL */ ++#define CDC_ECM_EN BIT(3) + #define RX_AGG_DISABLE 0x0010 + #define RX_ZERO_EN 0x0080 + + /* USB_U2P3_CTRL */ + #define U2P3_ENABLE 0x0001 ++#define RX_DETECT8 BIT(3) + + /* USB_POWER_CUT */ + #define PWR_EN 0x0001 +@@ -497,8 +571,12 @@ + #define SEN_VAL_NORMAL 0xa000 + #define SEL_RXIDLE 0x0100 + ++/* USB_UPHY_XTAL */ ++#define OOBS_POLLING BIT(8) ++ + /* USB_UPS_CFG */ + #define SAW_CNT_1MS_MASK 0x0fff ++#define MID_REVERSE BIT(5) /* RTL8156A */ + + /* USB_UPS_FLAGS */ + #define UPS_FLAGS_R_TUNE BIT(0) +@@ -506,6 +584,7 @@ + #define UPS_FLAGS_250M_CKDIV BIT(2) + #define UPS_FLAGS_EN_ALDPS BIT(3) + #define UPS_FLAGS_CTAP_SHORT_DIS BIT(4) ++#define UPS_FLAGS_SPEED_MASK (0xf << 16) + #define ups_flags_speed(x) ((x) << 16) + #define UPS_FLAGS_EN_EEE BIT(20) + #define UPS_FLAGS_EN_500M_EEE BIT(21) +@@ -526,6 +605,8 @@ enum spd_duplex { + FORCE_10M_FULL, + FORCE_100M_HALF, + FORCE_100M_FULL, ++ FORCE_1000M_FULL, ++ NWAY_2500M_FULL, + }; + + /* OCP_ALDPS_CONFIG */ +@@ -590,6 +671,9 @@ enum spd_duplex { + #define EN_10M_CLKDIV BIT(11) + #define EN_10M_BGOFF 0x0080 + ++/* OCP_10GBT_CTRL */ ++#define RTL_ADV2_5G_F_R BIT(5) /* Advertise 2.5GBASE-T fast-retrain */ ++ + /* OCP_PHY_STATE */ + #define TXDIS_STATE 0x01 + #define ABD_STATE 0x02 +@@ -609,7 +693,8 @@ enum spd_duplex { + #define EN_EMI_L 0x0040 + + /* OCP_SYSCLK_CFG */ +-#define clk_div_expo(x) (min(x, 5) << 8) ++#define sysclk_div_expo(x) (min(x, 5) << 8) ++#define clk_div_expo(x) (min(x, 5) << 4) + + /* SRAM_GREEN_CFG */ + #define GREEN_ETH_EN BIT(15) +@@ -640,6 +725,11 @@ enum spd_duplex { + #define BP4_SUPER_ONLY 0x1578 /* RTL_VER_04 only */ + + enum rtl_register_content { ++ _2500bps = BIT(10), ++ _1250bps = BIT(9), ++ _500bps = BIT(8), ++ _tx_flow = BIT(6), ++ _rx_flow = BIT(5), + _1000bps = 0x10, + _100bps = 0x08, + _10bps = 0x04, +@@ -647,6 +737,9 @@ enum rtl_register_content { + FULL_DUP = 0x01, + }; + ++#define is_speed_2500(_speed) (((_speed) & (_2500bps | LINK_STATUS)) == (_2500bps | LINK_STATUS)) ++#define is_flow_control(_speed) (((_speed) & (_tx_flow | _rx_flow)) == (_tx_flow | _rx_flow)) ++ + #define RTL8152_MAX_TX 4 + #define RTL8152_MAX_RX 10 + #define INTBUFSIZE 2 +@@ -661,7 +754,6 @@ enum rtl_register_content { + #define RTL8152_RMS (VLAN_ETH_FRAME_LEN + ETH_FCS_LEN) + #define RTL8153_RMS RTL8153_MAX_PACKET + #define RTL8152_TX_TIMEOUT (5 * HZ) +-#define RTL8152_NAPI_WEIGHT 64 + #define mtu_to_size(m) ((m) + VLAN_ETH_HLEN + ETH_FCS_LEN) + #define size_to_mtu(s) ((s) - VLAN_ETH_HLEN - ETH_FCS_LEN) + #define rx_reserved_size(x) (mtu_to_size(x) + sizeof(struct rx_desc) + RX_ALIGN) +@@ -798,6 +890,7 @@ struct r8152 { + } rtl_ops; + + struct ups_info { ++ u32 r_tune:1; + u32 _10m_ckdiv:1; + u32 _250m_ckdiv:1; + u32 aldps:1; +@@ -839,7 +932,9 @@ struct r8152 { + u32 rx_buf_sz; + u32 rx_copybreak; + u32 rx_pending; ++ u32 fc_pause_on, fc_pause_off; + ++ u32 support_2500full:1; + u16 ocp_base; + u16 speed; + u16 eee_adv; +@@ -999,6 +1094,15 @@ enum rtl_version { + RTL_VER_07, + RTL_VER_08, + RTL_VER_09, ++ ++ RTL_TEST_01, ++ RTL_VER_10, ++ RTL_VER_11, ++ RTL_VER_12, ++ RTL_VER_13, ++ RTL_VER_14, ++ RTL_VER_15, ++ + RTL_VER_MAX + }; + +@@ -1014,6 +1118,7 @@ enum tx_csum_stat { + #define RTL_ADVERTISED_100_FULL BIT(3) + #define RTL_ADVERTISED_1000_HALF BIT(4) + #define RTL_ADVERTISED_1000_FULL BIT(5) ++#define RTL_ADVERTISED_2500_FULL BIT(6) + + /* Maximum number of multicast addresses to filter (vs. Rx-all-multicast). + * The RTL chips use a 64 element hash table based on the Ethernet CRC. +@@ -2607,7 +2712,7 @@ static netdev_tx_t rtl8152_start_xmit(st + + static void r8152b_reset_packet_filter(struct r8152 *tp) + { +- u32 ocp_data; ++ u32 ocp_data; + + ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_FMC); + ocp_data &= ~FMC_FCR_MCU_EN; +@@ -2618,14 +2723,47 @@ static void r8152b_reset_packet_filter(s + + static void rtl8152_nic_reset(struct r8152 *tp) + { +- int i; ++ u32 ocp_data; ++ int i; + +- ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CR, CR_RST); ++ switch (tp->version) { ++ case RTL_TEST_01: ++ case RTL_VER_10: ++ case RTL_VER_11: ++ ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_CR); ++ ocp_data &= ~CR_TE; ++ ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CR, ocp_data); ++ ++ ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_BMU_RESET); ++ ocp_data &= ~BMU_RESET_EP_IN; ++ ocp_write_word(tp, MCU_TYPE_USB, USB_BMU_RESET, ocp_data); ++ ++ ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_USB_CTRL); ++ ocp_data |= CDC_ECM_EN; ++ ocp_write_word(tp, MCU_TYPE_USB, USB_USB_CTRL, ocp_data); ++ ++ ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_CR); ++ ocp_data &= ~CR_RE; ++ ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CR, ocp_data); ++ ++ ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_BMU_RESET); ++ ocp_data |= BMU_RESET_EP_IN; ++ ocp_write_word(tp, MCU_TYPE_USB, USB_BMU_RESET, ocp_data); ++ ++ ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_USB_CTRL); ++ ocp_data &= ~CDC_ECM_EN; ++ ocp_write_word(tp, MCU_TYPE_USB, USB_USB_CTRL, ocp_data); ++ break; + +- for (i = 0; i < 1000; i++) { +- if (!(ocp_read_byte(tp, MCU_TYPE_PLA, PLA_CR) & CR_RST)) +- break; +- usleep_range(100, 400); ++ default: ++ ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CR, CR_RST); ++ ++ for (i = 0; i < 1000; i++) { ++ if (!(ocp_read_byte(tp, MCU_TYPE_PLA, PLA_CR) & CR_RST)) ++ break; ++ usleep_range(100, 400); ++ } ++ break; + } + } + +@@ -2634,9 +2772,9 @@ static void set_tx_qlen(struct r8152 *tp + tp->tx_qlen = agg_buf_sz / (mtu_to_size(tp->netdev->mtu) + sizeof(struct tx_desc)); + } + +-static inline u8 rtl8152_get_speed(struct r8152 *tp) ++static inline u16 rtl8152_get_speed(struct r8152 *tp) + { +- return ocp_read_byte(tp, MCU_TYPE_PLA, PLA_PHYSTATUS); ++ return ocp_read_word(tp, MCU_TYPE_PLA, PLA_PHYSTATUS); + } + + static void rtl_eee_plus_en(struct r8152 *tp, bool enable) +@@ -2796,6 +2934,7 @@ static int rtl_enable(struct r8152 *tp) + switch (tp->version) { + case RTL_VER_08: + case RTL_VER_09: ++ case RTL_VER_14: + r8153b_rx_agg_chg_indicate(tp); + break; + default: +@@ -2833,6 +2972,7 @@ static void r8153_set_rx_early_timeout(s + + case RTL_VER_08: + case RTL_VER_09: ++ case RTL_VER_14: + /* The RTL8153B uses USB_RX_EXTRA_AGGR_TMR for rx timeout + * primarily. For USB_RX_EARLY_TIMEOUT, we fix it to 128ns. + */ +@@ -2842,6 +2982,18 @@ static void r8153_set_rx_early_timeout(s + ocp_data); + break; + ++ case RTL_VER_10: ++ case RTL_VER_11: ++ case RTL_VER_12: ++ case RTL_VER_13: ++ case RTL_VER_15: ++ ocp_write_word(tp, MCU_TYPE_USB, USB_RX_EARLY_TIMEOUT, ++ 640 / 8); ++ ocp_write_word(tp, MCU_TYPE_USB, USB_RX_EXTRA_AGGR_TMR, ++ ocp_data); ++ r8153b_rx_agg_chg_indicate(tp); ++ break; ++ + default: + break; + } +@@ -2861,8 +3013,19 @@ static void r8153_set_rx_early_size(stru + break; + case RTL_VER_08: + case RTL_VER_09: ++ case RTL_VER_14: ++ ocp_write_word(tp, MCU_TYPE_USB, USB_RX_EARLY_SIZE, ++ ocp_data / 8); ++ break; ++ case RTL_TEST_01: ++ case RTL_VER_10: ++ case RTL_VER_11: ++ case RTL_VER_12: ++ case RTL_VER_13: ++ case RTL_VER_15: + ocp_write_word(tp, MCU_TYPE_USB, USB_RX_EARLY_SIZE, + ocp_data / 8); ++ r8153b_rx_agg_chg_indicate(tp); + break; + default: + WARN_ON_ONCE(1); +@@ -2872,6 +3035,8 @@ static void r8153_set_rx_early_size(stru + + static int rtl8153_enable(struct r8152 *tp) + { ++ u32 ocp_data; ++ + if (test_bit(RTL8152_UNPLUG, &tp->flags)) + return -ENODEV; + +@@ -2882,15 +3047,18 @@ static int rtl8153_enable(struct r8152 * + + rtl_set_ifg(tp, rtl8152_get_speed(tp)); + +- if (tp->version == RTL_VER_09) { +- u32 ocp_data; +- ++ switch (tp->version) { ++ case RTL_VER_09: ++ case RTL_VER_14: + ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_FW_TASK); + ocp_data &= ~FC_PATCH_TASK; + ocp_write_word(tp, MCU_TYPE_USB, USB_FW_TASK, ocp_data); + usleep_range(1000, 2000); + ocp_data |= FC_PATCH_TASK; + ocp_write_word(tp, MCU_TYPE_USB, USB_FW_TASK, ocp_data); ++ break; ++ default: ++ break; + } + + return rtl_enable(tp); +@@ -2955,12 +3123,40 @@ static void rtl_rx_vlan_en(struct r8152 + { + u32 ocp_data; + +- ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_CPCR); +- if (enable) +- ocp_data |= CPCR_RX_VLAN; +- else +- ocp_data &= ~CPCR_RX_VLAN; +- ocp_write_word(tp, MCU_TYPE_PLA, PLA_CPCR, ocp_data); ++ switch (tp->version) { ++ case RTL_VER_01: ++ case RTL_VER_02: ++ case RTL_VER_03: ++ case RTL_VER_04: ++ case RTL_VER_05: ++ case RTL_VER_06: ++ case RTL_VER_07: ++ case RTL_VER_08: ++ case RTL_VER_09: ++ case RTL_VER_14: ++ ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_CPCR); ++ if (enable) ++ ocp_data |= CPCR_RX_VLAN; ++ else ++ ocp_data &= ~CPCR_RX_VLAN; ++ ocp_write_word(tp, MCU_TYPE_PLA, PLA_CPCR, ocp_data); ++ break; ++ ++ case RTL_TEST_01: ++ case RTL_VER_10: ++ case RTL_VER_11: ++ case RTL_VER_12: ++ case RTL_VER_13: ++ case RTL_VER_15: ++ default: ++ ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_RCR1); ++ if (enable) ++ ocp_data |= OUTER_VLAN | INNER_VLAN; ++ else ++ ocp_data &= ~(OUTER_VLAN | INNER_VLAN); ++ ocp_write_word(tp, MCU_TYPE_PLA, PLA_RCR1, ocp_data); ++ break; ++ } + } + + static int rtl8152_set_features(struct net_device *dev, +@@ -3053,6 +3249,40 @@ static void __rtl_set_wol(struct r8152 * + device_set_wakeup_enable(&tp->udev->dev, false); + } + ++static void r8153_mac_clk_speed_down(struct r8152 *tp, bool enable) ++{ ++ u32 ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL2); ++ ++ /* MAC clock speed down */ ++ if (enable) ++ ocp_data |= MAC_CLK_SPDWN_EN; ++ else ++ ocp_data &= ~MAC_CLK_SPDWN_EN; ++ ++ ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL2, ocp_data); ++} ++ ++static void r8156_mac_clk_spd(struct r8152 *tp, bool enable) ++{ ++ u32 ocp_data; ++ ++ /* MAC clock speed down */ ++ if (enable) { ++ /* aldps_spdwn_ratio, tp10_spdwn_ratio */ ++ ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL, ++ 0x0403); ++ ++ ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL2); ++ ocp_data &= ~EEE_SPDWN_RATIO_MASK; ++ ocp_data |= MAC_CLK_SPDWN_EN | 0x03; /* eee_spdwn_ratio */ ++ ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL2, ocp_data); ++ } else { ++ ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL2); ++ ocp_data &= ~MAC_CLK_SPDWN_EN; ++ ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL2, ocp_data); ++ } ++} ++ + static void r8153_u1u2en(struct r8152 *tp, bool enable) + { + u8 u1u2[8]; +@@ -3112,6 +3342,9 @@ static void r8153b_ups_flags(struct r815 + if (tp->ups_info.eee_cmod_lv) + ups_flags |= UPS_FLAGS_EEE_CMOD_LV_EN; + ++ if (tp->ups_info.r_tune) ++ ups_flags |= UPS_FLAGS_R_TUNE; ++ + if (tp->ups_info._10m_ckdiv) + ups_flags |= UPS_FLAGS_EN_10M_CKDIV; + +@@ -3162,6 +3395,88 @@ static void r8153b_ups_flags(struct r815 + ocp_write_dword(tp, MCU_TYPE_USB, USB_UPS_FLAGS, ups_flags); + } + ++static void r8156_ups_flags(struct r8152 *tp) ++{ ++ u32 ups_flags = 0; ++ ++ if (tp->ups_info.green) ++ ups_flags |= UPS_FLAGS_EN_GREEN; ++ ++ if (tp->ups_info.aldps) ++ ups_flags |= UPS_FLAGS_EN_ALDPS; ++ ++ if (tp->ups_info.eee) ++ ups_flags |= UPS_FLAGS_EN_EEE; ++ ++ if (tp->ups_info.flow_control) ++ ups_flags |= UPS_FLAGS_EN_FLOW_CTR; ++ ++ if (tp->ups_info.eee_ckdiv) ++ ups_flags |= UPS_FLAGS_EN_EEE_CKDIV; ++ ++ if (tp->ups_info._10m_ckdiv) ++ ups_flags |= UPS_FLAGS_EN_10M_CKDIV; ++ ++ if (tp->ups_info.eee_plloff_100) ++ ups_flags |= UPS_FLAGS_EEE_PLLOFF_100; ++ ++ if (tp->ups_info.eee_plloff_giga) ++ ups_flags |= UPS_FLAGS_EEE_PLLOFF_GIGA; ++ ++ if (tp->ups_info._250m_ckdiv) ++ ups_flags |= UPS_FLAGS_250M_CKDIV; ++ ++ switch (tp->ups_info.speed_duplex) { ++ case FORCE_10M_HALF: ++ ups_flags |= ups_flags_speed(0); ++ break; ++ case FORCE_10M_FULL: ++ ups_flags |= ups_flags_speed(1); ++ break; ++ case FORCE_100M_HALF: ++ ups_flags |= ups_flags_speed(2); ++ break; ++ case FORCE_100M_FULL: ++ ups_flags |= ups_flags_speed(3); ++ break; ++ case NWAY_10M_HALF: ++ ups_flags |= ups_flags_speed(4); ++ break; ++ case NWAY_10M_FULL: ++ ups_flags |= ups_flags_speed(5); ++ break; ++ case NWAY_100M_HALF: ++ ups_flags |= ups_flags_speed(6); ++ break; ++ case NWAY_100M_FULL: ++ ups_flags |= ups_flags_speed(7); ++ break; ++ case NWAY_1000M_FULL: ++ ups_flags |= ups_flags_speed(8); ++ break; ++ case NWAY_2500M_FULL: ++ ups_flags |= ups_flags_speed(9); ++ break; ++ default: ++ break; ++ } ++ ++ switch (tp->ups_info.lite_mode) { ++ case 1: ++ ups_flags |= 0 << 5; ++ break; ++ case 2: ++ ups_flags |= 2 << 5; ++ break; ++ case 0: ++ default: ++ ups_flags |= 1 << 5; ++ break; ++ } ++ ++ ocp_write_dword(tp, MCU_TYPE_USB, USB_UPS_FLAGS, ups_flags); ++} ++ + static void rtl_green_en(struct r8152 *tp, bool enable) + { + u16 data; +@@ -3225,16 +3540,16 @@ static void r8153b_ups_en(struct r8152 * + ocp_data |= UPS_EN | USP_PREWAKE | PHASE2_EN; + ocp_write_byte(tp, MCU_TYPE_USB, USB_POWER_CUT, ocp_data); + +- ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, 0xcfff); +- ocp_data |= BIT(0); +- ocp_write_byte(tp, MCU_TYPE_USB, 0xcfff, ocp_data); ++ ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_MISC_2); ++ ocp_data |= UPS_FORCE_PWR_DOWN; ++ ocp_write_byte(tp, MCU_TYPE_USB, USB_MISC_2, ocp_data); + } else { + ocp_data &= ~(UPS_EN | USP_PREWAKE); + ocp_write_byte(tp, MCU_TYPE_USB, USB_POWER_CUT, ocp_data); + +- ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, 0xcfff); +- ocp_data &= ~BIT(0); +- ocp_write_byte(tp, MCU_TYPE_USB, 0xcfff, ocp_data); ++ ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_MISC_2); ++ ocp_data &= ~UPS_FORCE_PWR_DOWN; ++ ocp_write_byte(tp, MCU_TYPE_USB, USB_MISC_2, ocp_data); + + if (ocp_read_word(tp, MCU_TYPE_USB, USB_MISC_0) & PCUT_STATUS) { + int i; +@@ -3254,6 +3569,95 @@ static void r8153b_ups_en(struct r8152 * + } + } + ++static void r8153c_ups_en(struct r8152 *tp, bool enable) ++{ ++ u32 ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_POWER_CUT); ++ ++ if (enable) { ++ r8153b_ups_flags(tp); ++ ++ ocp_data |= UPS_EN | USP_PREWAKE | PHASE2_EN; ++ ocp_write_byte(tp, MCU_TYPE_USB, USB_POWER_CUT, ocp_data); ++ ++ ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_MISC_2); ++ ocp_data |= UPS_FORCE_PWR_DOWN; ++ ocp_data &= ~BIT(7); ++ ocp_write_byte(tp, MCU_TYPE_USB, USB_MISC_2, ocp_data); ++ } else { ++ ocp_data &= ~(UPS_EN | USP_PREWAKE); ++ ocp_write_byte(tp, MCU_TYPE_USB, USB_POWER_CUT, ocp_data); ++ ++ ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_MISC_2); ++ ocp_data &= ~UPS_FORCE_PWR_DOWN; ++ ocp_write_byte(tp, MCU_TYPE_USB, USB_MISC_2, ocp_data); ++ ++ if (ocp_read_word(tp, MCU_TYPE_USB, USB_MISC_0) & PCUT_STATUS) { ++ int i; ++ ++ for (i = 0; i < 500; i++) { ++ if (ocp_read_word(tp, MCU_TYPE_PLA, PLA_BOOT_CTRL) & ++ AUTOLOAD_DONE) ++ break; ++ msleep(20); ++ } ++ ++ tp->rtl_ops.hw_phy_cfg(tp); ++ ++ rtl8152_set_speed(tp, tp->autoneg, tp->speed, ++ tp->duplex, tp->advertising); ++ } ++ ++ ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_CONFIG); ++ ++ ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_CONFIG34); ++ ocp_data |= BIT(8); ++ ocp_write_word(tp, MCU_TYPE_PLA, PLA_CONFIG34, ocp_data); ++ ++ ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_NORAML); ++ } ++} ++ ++static void r8156_ups_en(struct r8152 *tp, bool enable) ++{ ++ u32 ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_POWER_CUT); ++ ++ if (enable) { ++ r8156_ups_flags(tp); ++ ++ ocp_data |= UPS_EN | USP_PREWAKE | PHASE2_EN; ++ ocp_write_byte(tp, MCU_TYPE_USB, USB_POWER_CUT, ocp_data); ++ ++ ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_MISC_2); ++ ocp_data |= UPS_FORCE_PWR_DOWN; ++ ocp_write_byte(tp, MCU_TYPE_USB, USB_MISC_2, ocp_data); ++ ++ switch (tp->version) { ++ case RTL_VER_13: ++ case RTL_VER_15: ++ ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_UPHY_XTAL); ++ ocp_data &= ~OOBS_POLLING; ++ ocp_write_byte(tp, MCU_TYPE_USB, USB_UPHY_XTAL, ocp_data); ++ break; ++ default: ++ break; ++ } ++ } else { ++ ocp_data &= ~(UPS_EN | USP_PREWAKE); ++ ocp_write_byte(tp, MCU_TYPE_USB, USB_POWER_CUT, ocp_data); ++ ++ ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_MISC_2); ++ ocp_data &= ~UPS_FORCE_PWR_DOWN; ++ ocp_write_byte(tp, MCU_TYPE_USB, USB_MISC_2, ocp_data); ++ ++ if (ocp_read_word(tp, MCU_TYPE_USB, USB_MISC_0) & PCUT_STATUS) { ++ tp->rtl_ops.hw_phy_cfg(tp); ++ ++ rtl8152_set_speed(tp, tp->autoneg, tp->speed, ++ tp->duplex, tp->advertising); ++ } ++ } ++} ++ + static void r8153_power_cut_en(struct r8152 *tp, bool enable) + { + u32 ocp_data; +@@ -3383,6 +3787,38 @@ static void rtl8153b_runtime_enable(stru + } + } + ++static void rtl8153c_runtime_enable(struct r8152 *tp, bool enable) ++{ ++ if (enable) { ++ r8153_queue_wake(tp, true); ++ r8153b_u1u2en(tp, false); ++ r8153_u2p3en(tp, false); ++ rtl_runtime_suspend_enable(tp, true); ++ r8153c_ups_en(tp, true); ++ } else { ++ r8153c_ups_en(tp, false); ++ r8153_queue_wake(tp, false); ++ rtl_runtime_suspend_enable(tp, false); ++ r8153b_u1u2en(tp, true); ++ } ++} ++ ++static void rtl8156_runtime_enable(struct r8152 *tp, bool enable) ++{ ++ if (enable) { ++ r8153_queue_wake(tp, true); ++ r8153b_u1u2en(tp, false); ++ r8153_u2p3en(tp, false); ++ rtl_runtime_suspend_enable(tp, true); ++ } else { ++ r8153_queue_wake(tp, false); ++ rtl_runtime_suspend_enable(tp, false); ++ r8153_u2p3en(tp, true); ++ if (tp->udev->speed >= USB_SPEED_SUPER) ++ r8153b_u1u2en(tp, true); ++ } ++} ++ + static void r8153_teredo_off(struct r8152 *tp) + { + u32 ocp_data; +@@ -3403,14 +3839,19 @@ static void r8153_teredo_off(struct r815 + + case RTL_VER_08: + case RTL_VER_09: ++ case RTL_TEST_01: ++ case RTL_VER_10: ++ case RTL_VER_11: ++ case RTL_VER_12: ++ case RTL_VER_13: ++ case RTL_VER_14: ++ case RTL_VER_15: ++ default: + /* The bit 0 ~ 7 are relative with teredo settings. They are + * W1C (write 1 to clear), so set all 1 to disable it. + */ + ocp_write_byte(tp, MCU_TYPE_PLA, PLA_TEREDO_CFG, 0xff); + break; +- +- default: +- break; + } + + ocp_write_word(tp, MCU_TYPE_PLA, PLA_WDT6_CTRL, WDT6_SET_MODE); +@@ -3445,6 +3886,12 @@ static void rtl_clear_bp(struct r8152 *t + break; + case RTL_VER_08: + case RTL_VER_09: ++ case RTL_VER_10: ++ case RTL_VER_11: ++ case RTL_VER_12: ++ case RTL_VER_13: ++ case RTL_VER_14: ++ case RTL_VER_15: + default: + if (type == MCU_TYPE_USB) { + ocp_write_word(tp, MCU_TYPE_USB, USB_BP2_EN, 0); +@@ -3654,6 +4101,11 @@ static bool rtl8152_is_fw_mac_ok(struct + case RTL_VER_06: + case RTL_VER_08: + case RTL_VER_09: ++ case RTL_VER_11: ++ case RTL_VER_12: ++ case RTL_VER_13: ++ case RTL_VER_14: ++ case RTL_VER_15: + fw_reg = 0xf800; + bp_ba_addr = PLA_BP_BA; + bp_en_addr = PLA_BP_EN; +@@ -3677,6 +4129,11 @@ static bool rtl8152_is_fw_mac_ok(struct + break; + case RTL_VER_08: + case RTL_VER_09: ++ case RTL_VER_11: ++ case RTL_VER_12: ++ case RTL_VER_13: ++ case RTL_VER_14: ++ case RTL_VER_15: + fw_reg = 0xe600; + bp_ba_addr = USB_BP_BA; + bp_en_addr = USB_BP2_EN; +@@ -4216,6 +4673,22 @@ static void r8153_eee_en(struct r8152 *t + tp->ups_info.eee = enable; + } + ++static void r8156_eee_en(struct r8152 *tp, bool enable) ++{ ++ u16 config; ++ ++ r8153_eee_en(tp, enable); ++ ++ config = ocp_reg_read(tp, OCP_EEE_ADV2); ++ ++ if (enable) ++ config |= MDIO_EEE_2_5GT; ++ else ++ config &= ~MDIO_EEE_2_5GT; ++ ++ ocp_reg_write(tp, OCP_EEE_ADV2, config); ++} ++ + static void rtl_eee_enable(struct r8152 *tp, bool enable) + { + switch (tp->version) { +@@ -4237,6 +4710,7 @@ static void rtl_eee_enable(struct r8152 + case RTL_VER_06: + case RTL_VER_08: + case RTL_VER_09: ++ case RTL_VER_14: + if (enable) { + r8153_eee_en(tp, true); + ocp_reg_write(tp, OCP_EEE_ADV, tp->eee_adv); +@@ -4245,6 +4719,19 @@ static void rtl_eee_enable(struct r8152 + ocp_reg_write(tp, OCP_EEE_ADV, 0); + } + break; ++ case RTL_VER_10: ++ case RTL_VER_11: ++ case RTL_VER_12: ++ case RTL_VER_13: ++ case RTL_VER_15: ++ if (enable) { ++ r8156_eee_en(tp, true); ++ ocp_reg_write(tp, OCP_EEE_ADV, tp->eee_adv); ++ } else { ++ r8156_eee_en(tp, false); ++ ocp_reg_write(tp, OCP_EEE_ADV, 0); ++ } ++ break; + default: + break; + } +@@ -4291,6 +4778,20 @@ static void wait_oob_link_list_ready(str + } + } + ++static void r8156b_wait_loading_flash(struct r8152 *tp) ++{ ++ if ((ocp_read_word(tp, MCU_TYPE_PLA, PLA_GPHY_CTRL) & GPHY_FLASH) && ++ !(ocp_read_word(tp, MCU_TYPE_USB, USB_GPHY_CTRL) & BYPASS_FLASH)) { ++ int i; ++ ++ for (i = 0; i < 100; i++) { ++ if (ocp_read_word(tp, MCU_TYPE_USB, USB_GPHY_CTRL) & GPHY_PATCH_DONE) ++ break; ++ usleep_range(1000, 2000); ++ } ++ } ++} ++ + static void r8152b_exit_oob(struct r8152 *tp) + { + u32 ocp_data; +@@ -4341,7 +4842,7 @@ static void r8152b_exit_oob(struct r8152 + } + + /* TX share fifo free credit full threshold */ +- ocp_write_dword(tp, MCU_TYPE_PLA, PLA_TXFIFO_CTRL, TXFIFO_THR_NORMAL); ++ ocp_write_dword(tp, MCU_TYPE_PLA, PLA_TXFIFO_CTRL, TXFIFO_THR_NORMAL2); + + ocp_write_byte(tp, MCU_TYPE_USB, USB_TX_AGG, TX_AGG_MAX_THRESHOLD); + ocp_write_dword(tp, MCU_TYPE_USB, USB_RX_BUF_TH, RX_THR_HIGH); +@@ -4518,6 +5019,21 @@ static int r8153b_post_firmware_1(struct + return 0; + } + ++static int r8153c_post_firmware_1(struct r8152 *tp) ++{ ++ u32 ocp_data; ++ ++ ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_FW_CTRL); ++ ocp_data |= FLOW_CTRL_PATCH_2; ++ ocp_write_word(tp, MCU_TYPE_USB, USB_FW_CTRL, ocp_data); ++ ++ ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_FW_TASK); ++ ocp_data |= FC_PATCH_TASK; ++ ocp_write_word(tp, MCU_TYPE_USB, USB_FW_TASK, ocp_data); ++ ++ return 0; ++} ++ + static void r8153_aldps_en(struct r8152 *tp, bool enable) + { + u16 data; +@@ -4720,6 +5236,13 @@ static void r8153b_hw_phy_cfg(struct r81 + set_bit(PHY_RESET, &tp->flags); + } + ++static void r8153c_hw_phy_cfg(struct r8152 *tp) ++{ ++ r8153b_hw_phy_cfg(tp); ++ ++ tp->ups_info.r_tune = true; ++} ++ + static void rtl8153_change_mtu(struct r8152 *tp) + { + ocp_write_word(tp, MCU_TYPE_PLA, PLA_RMS, mtu_to_size(tp->netdev->mtu)); +@@ -4807,6 +5330,7 @@ static void r8153_enter_oob(struct r8152 + + case RTL_VER_08: + case RTL_VER_09: ++ case RTL_VER_14: + /* Clear teredo wake event. bit[15:8] is the teredo wakeup + * type. Set it to zero. bits[7:0] are the W1C bits about + * the events. Set them to all 1 to clear them. +@@ -4843,6 +5367,96 @@ static void rtl8153_disable(struct r8152 + r8153_aldps_en(tp, true); + } + ++static int rtl8156_enable(struct r8152 *tp) ++{ ++ u32 ocp_data; ++ u16 speed; ++ ++ if (test_bit(RTL8152_UNPLUG, &tp->flags)) ++ return -ENODEV; ++ ++ set_tx_qlen(tp); ++ rtl_set_eee_plus(tp); ++ r8153_set_rx_early_timeout(tp); ++ r8153_set_rx_early_size(tp); ++ ++ speed = rtl8152_get_speed(tp); ++ rtl_set_ifg(tp, speed); ++ ++ ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL4); ++ if (speed & _2500bps) ++ ocp_data &= ~IDLE_SPDWN_EN; ++ else ++ ocp_data |= IDLE_SPDWN_EN; ++ ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL4, ocp_data); ++ ++ if (speed & _1000bps) ++ ocp_write_word(tp, MCU_TYPE_PLA, PLA_EEE_TXTWSYS, 0x11); ++ else if (speed & _500bps) ++ ocp_write_word(tp, MCU_TYPE_PLA, PLA_EEE_TXTWSYS, 0x3d); ++ ++ if (tp->udev->speed == USB_SPEED_HIGH) { ++ /* USB 0xb45e[3:0] l1_nyet_hird */ ++ ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_L1_CTRL); ++ ocp_data &= ~0xf; ++ if (is_flow_control(speed)) ++ ocp_data |= 0xf; ++ else ++ ocp_data |= 0x1; ++ ocp_write_word(tp, MCU_TYPE_USB, USB_L1_CTRL, ocp_data); ++ } ++ ++ return rtl_enable(tp); ++} ++ ++static int rtl8156b_enable(struct r8152 *tp) ++{ ++ u32 ocp_data; ++ u16 speed; ++ ++ if (test_bit(RTL8152_UNPLUG, &tp->flags)) ++ return -ENODEV; ++ ++ set_tx_qlen(tp); ++ rtl_set_eee_plus(tp); ++ ++ ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_RX_AGGR_NUM); ++ ocp_data &= ~RX_AGGR_NUM_MASK; ++ ocp_write_word(tp, MCU_TYPE_USB, USB_RX_AGGR_NUM, ocp_data); ++ ++ r8153_set_rx_early_timeout(tp); ++ r8153_set_rx_early_size(tp); ++ ++ speed = rtl8152_get_speed(tp); ++ rtl_set_ifg(tp, speed); ++ ++ ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL4); ++ if (speed & _2500bps) ++ ocp_data &= ~IDLE_SPDWN_EN; ++ else ++ ocp_data |= IDLE_SPDWN_EN; ++ ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL4, ocp_data); ++ ++ if (tp->udev->speed == USB_SPEED_HIGH) { ++ ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_L1_CTRL); ++ ocp_data &= ~0xf; ++ if (is_flow_control(speed)) ++ ocp_data |= 0xf; ++ else ++ ocp_data |= 0x1; ++ ocp_write_word(tp, MCU_TYPE_USB, USB_L1_CTRL, ocp_data); ++ } ++ ++ ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_FW_TASK); ++ ocp_data &= ~FC_PATCH_TASK; ++ ocp_write_word(tp, MCU_TYPE_USB, USB_FW_TASK, ocp_data); ++ usleep_range(1000, 2000); ++ ocp_data |= FC_PATCH_TASK; ++ ocp_write_word(tp, MCU_TYPE_USB, USB_FW_TASK, ocp_data); ++ ++ return rtl_enable(tp); ++} ++ + static int rtl8152_set_speed(struct r8152 *tp, u8 autoneg, u32 speed, u8 duplex, + u32 advertising) + { +@@ -4891,58 +5505,73 @@ static int rtl8152_set_speed(struct r815 + + tp->mii.force_media = 1; + } else { +- u16 anar, tmp1; ++ u16 orig, new1; + u32 support; + + support = RTL_ADVERTISED_10_HALF | RTL_ADVERTISED_10_FULL | + RTL_ADVERTISED_100_HALF | RTL_ADVERTISED_100_FULL; + +- if (tp->mii.supports_gmii) ++ if (tp->mii.supports_gmii) { + support |= RTL_ADVERTISED_1000_FULL; + ++ if (tp->support_2500full) ++ support |= RTL_ADVERTISED_2500_FULL; ++ } ++ + if (!(advertising & support)) + return -EINVAL; + +- anar = r8152_mdio_read(tp, MII_ADVERTISE); +- tmp1 = anar & ~(ADVERTISE_10HALF | ADVERTISE_10FULL | ++ orig = r8152_mdio_read(tp, MII_ADVERTISE); ++ new1 = orig & ~(ADVERTISE_10HALF | ADVERTISE_10FULL | + ADVERTISE_100HALF | ADVERTISE_100FULL); + if (advertising & RTL_ADVERTISED_10_HALF) { +- tmp1 |= ADVERTISE_10HALF; ++ new1 |= ADVERTISE_10HALF; + tp->ups_info.speed_duplex = NWAY_10M_HALF; + } + if (advertising & RTL_ADVERTISED_10_FULL) { +- tmp1 |= ADVERTISE_10FULL; ++ new1 |= ADVERTISE_10FULL; + tp->ups_info.speed_duplex = NWAY_10M_FULL; + } + + if (advertising & RTL_ADVERTISED_100_HALF) { +- tmp1 |= ADVERTISE_100HALF; ++ new1 |= ADVERTISE_100HALF; + tp->ups_info.speed_duplex = NWAY_100M_HALF; + } + if (advertising & RTL_ADVERTISED_100_FULL) { +- tmp1 |= ADVERTISE_100FULL; ++ new1 |= ADVERTISE_100FULL; + tp->ups_info.speed_duplex = NWAY_100M_FULL; + } + +- if (anar != tmp1) { +- r8152_mdio_write(tp, MII_ADVERTISE, tmp1); +- tp->mii.advertising = tmp1; ++ if (orig != new1) { ++ r8152_mdio_write(tp, MII_ADVERTISE, new1); ++ tp->mii.advertising = new1; + } + + if (tp->mii.supports_gmii) { +- u16 gbcr; +- +- gbcr = r8152_mdio_read(tp, MII_CTRL1000); +- tmp1 = gbcr & ~(ADVERTISE_1000FULL | ++ orig = r8152_mdio_read(tp, MII_CTRL1000); ++ new1 = orig & ~(ADVERTISE_1000FULL | + ADVERTISE_1000HALF); + + if (advertising & RTL_ADVERTISED_1000_FULL) { +- tmp1 |= ADVERTISE_1000FULL; ++ new1 |= ADVERTISE_1000FULL; + tp->ups_info.speed_duplex = NWAY_1000M_FULL; + } + +- if (gbcr != tmp1) +- r8152_mdio_write(tp, MII_CTRL1000, tmp1); ++ if (orig != new1) ++ r8152_mdio_write(tp, MII_CTRL1000, new1); ++ } ++ ++ if (tp->support_2500full) { ++ orig = ocp_reg_read(tp, OCP_10GBT_CTRL); ++ new1 = orig & ~MDIO_AN_10GBT_CTRL_ADV2_5G; ++ ++ if (advertising & RTL_ADVERTISED_2500_FULL) { ++ new1 |= MDIO_AN_10GBT_CTRL_ADV2_5G; ++ tp->ups_info.speed_duplex = NWAY_2500M_FULL; ++ } ++ ++ if (orig != new1) ++ ocp_reg_write(tp, OCP_10GBT_CTRL, new1); + } + + bmcr = BMCR_ANENABLE | BMCR_ANRESTART; +@@ -5098,6 +5727,253 @@ static void rtl8153b_down(struct r8152 * + r8153_aldps_en(tp, true); + } + ++static void rtl8153c_change_mtu(struct r8152 *tp) ++{ ++ ocp_write_word(tp, MCU_TYPE_PLA, PLA_RMS, mtu_to_size(tp->netdev->mtu)); ++ ocp_write_byte(tp, MCU_TYPE_PLA, PLA_MTPS, 10 * 1024 / 64); ++ ++ ocp_write_word(tp, MCU_TYPE_PLA, PLA_TXFIFO_CTRL, 512 / 64); ++ ++ /* Adjust the tx fifo free credit full threshold, otherwise ++ * the fifo would be too small to send a jumbo frame packet. ++ */ ++ if (tp->netdev->mtu < 8000) ++ ocp_write_word(tp, MCU_TYPE_PLA, PLA_TXFIFO_FULL, 2048 / 8); ++ else ++ ocp_write_word(tp, MCU_TYPE_PLA, PLA_TXFIFO_FULL, 900 / 8); ++} ++ ++static void rtl8153c_up(struct r8152 *tp) ++{ ++ u32 ocp_data; ++ ++ if (test_bit(RTL8152_UNPLUG, &tp->flags)) ++ return; ++ ++ r8153b_u1u2en(tp, false); ++ r8153_u2p3en(tp, false); ++ r8153_aldps_en(tp, false); ++ ++ rxdy_gated_en(tp, true); ++ r8153_teredo_off(tp); ++ ++ ocp_data = ocp_read_dword(tp, MCU_TYPE_PLA, PLA_RCR); ++ ocp_data &= ~RCR_ACPT_ALL; ++ ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RCR, ocp_data); ++ ++ rtl8152_nic_reset(tp); ++ rtl_reset_bmu(tp); ++ ++ ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL); ++ ocp_data &= ~NOW_IS_OOB; ++ ocp_write_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL, ocp_data); ++ ++ ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_SFF_STS_7); ++ ocp_data &= ~MCU_BORW_EN; ++ ocp_write_word(tp, MCU_TYPE_PLA, PLA_SFF_STS_7, ocp_data); ++ ++ wait_oob_link_list_ready(tp); ++ ++ ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_SFF_STS_7); ++ ocp_data |= RE_INIT_LL; ++ ocp_write_word(tp, MCU_TYPE_PLA, PLA_SFF_STS_7, ocp_data); ++ ++ wait_oob_link_list_ready(tp); ++ ++ rtl_rx_vlan_en(tp, tp->netdev->features & NETIF_F_HW_VLAN_CTAG_RX); ++ ++ rtl8153c_change_mtu(tp); ++ ++ rtl8152_nic_reset(tp); ++ ++ /* rx share fifo credit full threshold */ ++ ocp_write_byte(tp, MCU_TYPE_PLA, PLA_RXFIFO_CTRL0, 0x02); ++ ocp_write_byte(tp, MCU_TYPE_PLA, PLA_RXFIFO_FULL, 0x08); ++ ocp_write_word(tp, MCU_TYPE_PLA, PLA_RXFIFO_CTRL1, RXFIFO_THR2_NORMAL); ++ ocp_write_word(tp, MCU_TYPE_PLA, PLA_RXFIFO_CTRL2, RXFIFO_THR3_NORMAL); ++ ++ ocp_write_dword(tp, MCU_TYPE_USB, USB_RX_BUF_TH, RX_THR_B); ++ ++ ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_CONFIG); ++ ++ ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_CONFIG34); ++ ocp_data |= BIT(8); ++ ocp_write_word(tp, MCU_TYPE_PLA, PLA_CONFIG34, ocp_data); ++ ++ ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_NORAML); ++ ++ ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL3); ++ ocp_data &= ~PLA_MCU_SPDWN_EN; ++ ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL3, ocp_data); ++ ++ r8153_aldps_en(tp, true); ++ r8153b_u1u2en(tp, true); ++} ++ ++static inline u32 fc_pause_on_auto(struct r8152 *tp) ++{ ++ return (ALIGN(mtu_to_size(tp->netdev->mtu), 1024) + 6 * 1024); ++} ++ ++static inline u32 fc_pause_off_auto(struct r8152 *tp) ++{ ++ return (ALIGN(mtu_to_size(tp->netdev->mtu), 1024) + 14 * 1024); ++} ++ ++static void r8156_fc_parameter(struct r8152 *tp) ++{ ++ u32 pause_on = tp->fc_pause_on ? tp->fc_pause_on : fc_pause_on_auto(tp); ++ u32 pause_off = tp->fc_pause_off ? tp->fc_pause_off : fc_pause_off_auto(tp); ++ ++ switch (tp->version) { ++ case RTL_VER_10: ++ case RTL_VER_11: ++ ocp_write_word(tp, MCU_TYPE_PLA, PLA_RX_FIFO_FULL, pause_on / 8); ++ ocp_write_word(tp, MCU_TYPE_PLA, PLA_RX_FIFO_EMPTY, pause_off / 8); ++ break; ++ case RTL_VER_12: ++ case RTL_VER_13: ++ case RTL_VER_15: ++ ocp_write_word(tp, MCU_TYPE_PLA, PLA_RX_FIFO_FULL, pause_on / 16); ++ ocp_write_word(tp, MCU_TYPE_PLA, PLA_RX_FIFO_EMPTY, pause_off / 16); ++ break; ++ default: ++ break; ++ } ++} ++ ++static void rtl8156_change_mtu(struct r8152 *tp) ++{ ++ u32 rx_max_size = mtu_to_size(tp->netdev->mtu); ++ ++ ocp_write_word(tp, MCU_TYPE_PLA, PLA_RMS, rx_max_size); ++ ocp_write_byte(tp, MCU_TYPE_PLA, PLA_MTPS, MTPS_JUMBO); ++ r8156_fc_parameter(tp); ++ ++ /* TX share fifo free credit full threshold */ ++ ocp_write_word(tp, MCU_TYPE_PLA, PLA_TXFIFO_CTRL, 512 / 64); ++ ocp_write_word(tp, MCU_TYPE_PLA, PLA_TXFIFO_FULL, ++ ALIGN(rx_max_size + sizeof(struct tx_desc), 1024) / 16); ++} ++ ++static void rtl8156_up(struct r8152 *tp) ++{ ++ u32 ocp_data; ++ ++ if (test_bit(RTL8152_UNPLUG, &tp->flags)) ++ return; ++ ++ r8153b_u1u2en(tp, false); ++ r8153_u2p3en(tp, false); ++ r8153_aldps_en(tp, false); ++ ++ rxdy_gated_en(tp, true); ++ r8153_teredo_off(tp); ++ ++ ocp_data = ocp_read_dword(tp, MCU_TYPE_PLA, PLA_RCR); ++ ocp_data &= ~RCR_ACPT_ALL; ++ ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RCR, ocp_data); ++ ++ rtl8152_nic_reset(tp); ++ rtl_reset_bmu(tp); ++ ++ ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL); ++ ocp_data &= ~NOW_IS_OOB; ++ ocp_write_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL, ocp_data); ++ ++ ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_SFF_STS_7); ++ ocp_data &= ~MCU_BORW_EN; ++ ocp_write_word(tp, MCU_TYPE_PLA, PLA_SFF_STS_7, ocp_data); ++ ++ rtl_rx_vlan_en(tp, tp->netdev->features & NETIF_F_HW_VLAN_CTAG_RX); ++ ++ rtl8156_change_mtu(tp); ++ ++ switch (tp->version) { ++ case RTL_TEST_01: ++ case RTL_VER_10: ++ case RTL_VER_11: ++ ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_BMU_CONFIG); ++ ocp_data |= ACT_ODMA; ++ ocp_write_word(tp, MCU_TYPE_USB, USB_BMU_CONFIG, ocp_data); ++ break; ++ default: ++ break; ++ } ++ ++ /* share FIFO settings */ ++ ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_RXFIFO_FULL); ++ ocp_data &= ~RXFIFO_FULL_MASK; ++ ocp_data |= 0x08; ++ ocp_write_word(tp, MCU_TYPE_PLA, PLA_RXFIFO_FULL, ocp_data); ++ ++ ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL3); ++ ocp_data &= ~PLA_MCU_SPDWN_EN; ++ ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL3, ocp_data); ++ ++ ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_SPEED_OPTION); ++ ocp_data &= ~(RG_PWRDN_EN | ALL_SPEED_OFF); ++ ocp_write_word(tp, MCU_TYPE_USB, USB_SPEED_OPTION, ocp_data); ++ ++ ocp_write_dword(tp, MCU_TYPE_USB, USB_RX_BUF_TH, 0x00600400); ++ ++ if (tp->saved_wolopts != __rtl_get_wol(tp)) { ++ netif_warn(tp, ifup, tp->netdev, "wol setting is changed\n"); ++ __rtl_set_wol(tp, tp->saved_wolopts); ++ } ++ ++ r8153_aldps_en(tp, true); ++ r8153_u2p3en(tp, true); ++ ++ if (tp->udev->speed >= USB_SPEED_SUPER) ++ r8153b_u1u2en(tp, true); ++} ++ ++static void rtl8156_down(struct r8152 *tp) ++{ ++ u32 ocp_data; ++ ++ if (test_bit(RTL8152_UNPLUG, &tp->flags)) { ++ rtl_drop_queued_tx(tp); ++ return; ++ } ++ ++ ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL3); ++ ocp_data |= PLA_MCU_SPDWN_EN; ++ ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL3, ocp_data); ++ ++ r8153b_u1u2en(tp, false); ++ r8153_u2p3en(tp, false); ++ r8153b_power_cut_en(tp, false); ++ r8153_aldps_en(tp, false); ++ ++ ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL); ++ ocp_data &= ~NOW_IS_OOB; ++ ocp_write_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL, ocp_data); ++ ++ rtl_disable(tp); ++ rtl_reset_bmu(tp); ++ ++ /* Clear teredo wake event. bit[15:8] is the teredo wakeup ++ * type. Set it to zero. bits[7:0] are the W1C bits about ++ * the events. Set them to all 1 to clear them. ++ */ ++ ocp_write_word(tp, MCU_TYPE_PLA, PLA_TEREDO_WAKE_BASE, 0x00ff); ++ ++ ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL); ++ ocp_data |= NOW_IS_OOB; ++ ocp_write_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL, ocp_data); ++ ++ rtl_rx_vlan_en(tp, true); ++ rxdy_gated_en(tp, false); ++ ++ ocp_data = ocp_read_dword(tp, MCU_TYPE_PLA, PLA_RCR); ++ ocp_data |= RCR_APM | RCR_AM | RCR_AB; ++ ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RCR, ocp_data); ++ ++ r8153_aldps_en(tp, true); ++} ++ + static bool rtl8152_in_nway(struct r8152 *tp) + { + u16 nway_state; +@@ -5128,7 +6004,7 @@ static void set_carrier(struct r8152 *tp + { + struct net_device *netdev = tp->netdev; + struct napi_struct *napi = &tp->napi; +- u8 speed; ++ u16 speed; + + speed = rtl8152_get_speed(tp); + +@@ -5141,7 +6017,7 @@ static void set_carrier(struct r8152 *tp + rtl_start_rx(tp); + clear_bit(RTL8152_SET_RX_MODE, &tp->flags); + _rtl8152_set_rx_mode(netdev); +- napi_enable(&tp->napi); ++ napi_enable(napi); + netif_wake_queue(netdev); + netif_info(tp, link, netdev, "carrier on\n"); + } else if (netif_queue_stopped(netdev) && +@@ -5521,14 +6397,9 @@ static void r8153_init(struct r8152 *tp) + + ocp_write_word(tp, MCU_TYPE_USB, USB_CONNECT_TIMER, 0x0001); + +- /* MAC clock speed down */ +- ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL, 0); +- ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL2, 0); +- ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL3, 0); +- ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL4, 0); +- + r8153_power_cut_en(tp, false); + rtl_runtime_suspend_enable(tp, false); ++ r8153_mac_clk_speed_down(tp, false); + r8153_u1u2en(tp, true); + usb_enable_lpm(tp->udev); + +@@ -5621,9 +6492,7 @@ static void r8153b_init(struct r8152 *tp + usb_enable_lpm(tp->udev); + + /* MAC clock speed down */ +- ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL2); +- ocp_data |= MAC_CLK_SPDWN_EN; +- ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL2, ocp_data); ++ r8153_mac_clk_speed_down(tp, true); + + ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL3); + ocp_data &= ~PLA_MCU_SPDWN_EN; +@@ -5652,6 +6521,1069 @@ static void r8153b_init(struct r8152 *tp + r8152_led_configuration(tp); + } + ++static void r8153c_init(struct r8152 *tp) ++{ ++ u32 ocp_data; ++ u16 data; ++ int i; ++ ++ if (test_bit(RTL8152_UNPLUG, &tp->flags)) ++ return; ++ ++ r8153b_u1u2en(tp, false); ++ ++ /* Disable spi_en */ ++ ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_CONFIG); ++ ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_CONFIG5); ++ ocp_data &= ~BIT(3); ++ ocp_write_word(tp, MCU_TYPE_PLA, PLA_CONFIG5, ocp_data); ++ ocp_data = ocp_read_word(tp, MCU_TYPE_USB, 0xcbf0); ++ ocp_data |= BIT(1); ++ ocp_write_word(tp, MCU_TYPE_USB, 0xcbf0, ocp_data); ++ ++ for (i = 0; i < 500; i++) { ++ if (ocp_read_word(tp, MCU_TYPE_PLA, PLA_BOOT_CTRL) & ++ AUTOLOAD_DONE) ++ break; ++ ++ msleep(20); ++ if (test_bit(RTL8152_UNPLUG, &tp->flags)) ++ return; ++ } ++ ++ data = r8153_phy_status(tp, 0); ++ ++ data = r8152_mdio_read(tp, MII_BMCR); ++ if (data & BMCR_PDOWN) { ++ data &= ~BMCR_PDOWN; ++ r8152_mdio_write(tp, MII_BMCR, data); ++ } ++ ++ data = r8153_phy_status(tp, PHY_STAT_LAN_ON); ++ ++ r8153_u2p3en(tp, false); ++ ++ /* MSC timer = 0xfff * 8ms = 32760 ms */ ++ ocp_write_word(tp, MCU_TYPE_USB, USB_MSC_TIMER, 0x0fff); ++ ++ r8153b_power_cut_en(tp, false); ++ r8153c_ups_en(tp, false); ++ r8153_queue_wake(tp, false); ++ rtl_runtime_suspend_enable(tp, false); ++ ++ ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_EXTRA_STATUS); ++ if (rtl8152_get_speed(tp) & LINK_STATUS) ++ ocp_data |= CUR_LINK_OK; ++ else ++ ocp_data &= ~CUR_LINK_OK; ++ ++ ocp_data |= POLL_LINK_CHG; ++ ocp_write_word(tp, MCU_TYPE_PLA, PLA_EXTRA_STATUS, ocp_data); ++ ++ r8153b_u1u2en(tp, true); ++ ++ usb_enable_lpm(tp->udev); ++ ++ /* MAC clock speed down */ ++ r8153_mac_clk_speed_down(tp, true); ++ ++ ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_MISC_2); ++ ocp_data &= ~BIT(7); ++ ocp_write_byte(tp, MCU_TYPE_USB, USB_MISC_2, ocp_data); ++ ++ set_bit(GREEN_ETHERNET, &tp->flags); ++ ++ /* rx aggregation */ ++ ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_USB_CTRL); ++ ocp_data &= ~(RX_AGG_DISABLE | RX_ZERO_EN); ++ ocp_write_word(tp, MCU_TYPE_USB, USB_USB_CTRL, ocp_data); ++ ++ rtl_tally_reset(tp); ++ ++ tp->coalesce = 15000; /* 15 us */ ++} ++ ++static void r8156_hw_phy_cfg(struct r8152 *tp) ++{ ++ u32 ocp_data; ++ u16 data; ++ ++ ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_MISC_0); ++ if (ocp_data & PCUT_STATUS) { ++ ocp_data &= ~PCUT_STATUS; ++ ocp_write_word(tp, MCU_TYPE_USB, USB_MISC_0, ocp_data); ++ } ++ ++ data = r8153_phy_status(tp, 0); ++ switch (data) { ++ case PHY_STAT_EXT_INIT: ++ rtl8152_apply_firmware(tp, true); ++ ++ data = ocp_reg_read(tp, 0xa468); ++ data &= ~(BIT(3) | BIT(1)); ++ ocp_reg_write(tp, 0xa468, data); ++ break; ++ case PHY_STAT_LAN_ON: ++ case PHY_STAT_PWRDN: ++ default: ++ rtl8152_apply_firmware(tp, false); ++ break; ++ } ++ ++ /* disable ALDPS before updating the PHY parameters */ ++ r8153_aldps_en(tp, false); ++ ++ /* disable EEE before updating the PHY parameters */ ++ rtl_eee_enable(tp, false); ++ ++ data = r8153_phy_status(tp, PHY_STAT_LAN_ON); ++ WARN_ON_ONCE(data != PHY_STAT_LAN_ON); ++ ++ ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_PHY_PWR); ++ ocp_data |= PFM_PWM_SWITCH; ++ ocp_write_word(tp, MCU_TYPE_PLA, PLA_PHY_PWR, ocp_data); ++ ++ switch (tp->version) { ++ case RTL_VER_10: ++ data = ocp_reg_read(tp, 0xad40); ++ data &= ~0x3ff; ++ data |= BIT(7) | BIT(2); ++ ocp_reg_write(tp, 0xad40, data); ++ ++ data = ocp_reg_read(tp, 0xad4e); ++ data |= BIT(4); ++ ocp_reg_write(tp, 0xad4e, data); ++ data = ocp_reg_read(tp, 0xad16); ++ data &= ~0x3ff; ++ data |= 0x6; ++ ocp_reg_write(tp, 0xad16, data); ++ data = ocp_reg_read(tp, 0xad32); ++ data &= ~0x3f; ++ data |= 6; ++ ocp_reg_write(tp, 0xad32, data); ++ data = ocp_reg_read(tp, 0xac08); ++ data &= ~(BIT(12) | BIT(8)); ++ ocp_reg_write(tp, 0xac08, data); ++ data = ocp_reg_read(tp, 0xac8a); ++ data |= BIT(12) | BIT(13) | BIT(14); ++ data &= ~BIT(15); ++ ocp_reg_write(tp, 0xac8a, data); ++ data = ocp_reg_read(tp, 0xad18); ++ data |= BIT(10); ++ ocp_reg_write(tp, 0xad18, data); ++ data = ocp_reg_read(tp, 0xad1a); ++ data |= 0x3ff; ++ ocp_reg_write(tp, 0xad1a, data); ++ data = ocp_reg_read(tp, 0xad1c); ++ data |= 0x3ff; ++ ocp_reg_write(tp, 0xad1c, data); ++ ++ data = sram_read(tp, 0x80ea); ++ data &= ~0xff00; ++ data |= 0xc400; ++ sram_write(tp, 0x80ea, data); ++ data = sram_read(tp, 0x80eb); ++ data &= ~0x0700; ++ data |= 0x0300; ++ sram_write(tp, 0x80eb, data); ++ data = sram_read(tp, 0x80f8); ++ data &= ~0xff00; ++ data |= 0x1c00; ++ sram_write(tp, 0x80f8, data); ++ data = sram_read(tp, 0x80f1); ++ data &= ~0xff00; ++ data |= 0x3000; ++ sram_write(tp, 0x80f1, data); ++ ++ data = sram_read(tp, 0x80fe); ++ data &= ~0xff00; ++ data |= 0xa500; ++ sram_write(tp, 0x80fe, data); ++ data = sram_read(tp, 0x8102); ++ data &= ~0xff00; ++ data |= 0x5000; ++ sram_write(tp, 0x8102, data); ++ data = sram_read(tp, 0x8015); ++ data &= ~0xff00; ++ data |= 0x3300; ++ sram_write(tp, 0x8015, data); ++ data = sram_read(tp, 0x8100); ++ data &= ~0xff00; ++ data |= 0x7000; ++ sram_write(tp, 0x8100, data); ++ data = sram_read(tp, 0x8014); ++ data &= ~0xff00; ++ data |= 0xf000; ++ sram_write(tp, 0x8014, data); ++ data = sram_read(tp, 0x8016); ++ data &= ~0xff00; ++ data |= 0x6500; ++ sram_write(tp, 0x8016, data); ++ data = sram_read(tp, 0x80dc); ++ data &= ~0xff00; ++ data |= 0xed00; ++ sram_write(tp, 0x80dc, data); ++ data = sram_read(tp, 0x80df); ++ data |= BIT(8); ++ sram_write(tp, 0x80df, data); ++ data = sram_read(tp, 0x80e1); ++ data &= ~BIT(8); ++ sram_write(tp, 0x80e1, data); ++ ++ data = ocp_reg_read(tp, 0xbf06); ++ data &= ~0x003f; ++ data |= 0x0038; ++ ocp_reg_write(tp, 0xbf06, data); ++ ++ sram_write(tp, 0x819f, 0xddb6); ++ ++ ocp_reg_write(tp, 0xbc34, 0x5555); ++ data = ocp_reg_read(tp, 0xbf0a); ++ data &= ~0x0e00; ++ data |= 0x0a00; ++ ocp_reg_write(tp, 0xbf0a, data); ++ ++ data = ocp_reg_read(tp, 0xbd2c); ++ data &= ~BIT(13); ++ ocp_reg_write(tp, 0xbd2c, data); ++ break; ++ case RTL_VER_11: ++ data = ocp_reg_read(tp, 0xad16); ++ data |= 0x3ff; ++ ocp_reg_write(tp, 0xad16, data); ++ data = ocp_reg_read(tp, 0xad32); ++ data &= ~0x3f; ++ data |= 6; ++ ocp_reg_write(tp, 0xad32, data); ++ data = ocp_reg_read(tp, 0xac08); ++ data &= ~(BIT(12) | BIT(8)); ++ ocp_reg_write(tp, 0xac08, data); ++ data = ocp_reg_read(tp, 0xacc0); ++ data &= ~0x3; ++ data |= BIT(1); ++ ocp_reg_write(tp, 0xacc0, data); ++ data = ocp_reg_read(tp, 0xad40); ++ data &= ~0xe7; ++ data |= BIT(6) | BIT(2); ++ ocp_reg_write(tp, 0xad40, data); ++ data = ocp_reg_read(tp, 0xac14); ++ data &= ~BIT(7); ++ ocp_reg_write(tp, 0xac14, data); ++ data = ocp_reg_read(tp, 0xac80); ++ data &= ~(BIT(8) | BIT(9)); ++ ocp_reg_write(tp, 0xac80, data); ++ data = ocp_reg_read(tp, 0xac5e); ++ data &= ~0x7; ++ data |= BIT(1); ++ ocp_reg_write(tp, 0xac5e, data); ++ ocp_reg_write(tp, 0xad4c, 0x00a8); ++ ocp_reg_write(tp, 0xac5c, 0x01ff); ++ data = ocp_reg_read(tp, 0xac8a); ++ data &= ~0xf0; ++ data |= BIT(4) | BIT(5); ++ ocp_reg_write(tp, 0xac8a, data); ++ ocp_reg_write(tp, 0xb87c, 0x8157); ++ data = ocp_reg_read(tp, 0xb87e); ++ data &= ~0xff00; ++ data |= 0x0500; ++ ocp_reg_write(tp, 0xb87e, data); ++ ocp_reg_write(tp, 0xb87c, 0x8159); ++ data = ocp_reg_read(tp, 0xb87e); ++ data &= ~0xff00; ++ data |= 0x0700; ++ ocp_reg_write(tp, 0xb87e, data); ++ ++ /* AAGC */ ++ ocp_reg_write(tp, 0xb87c, 0x80a2); ++ ocp_reg_write(tp, 0xb87e, 0x0153); ++ ocp_reg_write(tp, 0xb87c, 0x809c); ++ ocp_reg_write(tp, 0xb87e, 0x0153); ++ ++ /* EEE parameter */ ++ ocp_write_word(tp, MCU_TYPE_PLA, PLA_EEE_TXTWSYS_2P5G, 0x0056); ++ ++ ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_USB_CFG); ++ ocp_data |= EN_XG_LIP | EN_G_LIP; ++ ocp_write_word(tp, MCU_TYPE_PLA, PLA_USB_CFG, ocp_data); ++ ++ sram_write(tp, 0x8257, 0x020f); /* XG PLL */ ++ sram_write(tp, 0x80ea, 0x7843); /* GIGA Master */ ++ ++ if (rtl_phy_patch_request(tp, true, true)) ++ return; ++ ++ /* Advance EEE */ ++ ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL4); ++ ocp_data |= EEE_SPDWN_EN; ++ ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL4, ocp_data); ++ ++ data = ocp_reg_read(tp, OCP_DOWN_SPEED); ++ data &= ~(EN_EEE_100 | EN_EEE_1000); ++ data |= EN_10M_CLKDIV; ++ ocp_reg_write(tp, OCP_DOWN_SPEED, data); ++ tp->ups_info._10m_ckdiv = true; ++ tp->ups_info.eee_plloff_100 = false; ++ tp->ups_info.eee_plloff_giga = false; ++ ++ data = ocp_reg_read(tp, OCP_POWER_CFG); ++ data &= ~EEE_CLKDIV_EN; ++ ocp_reg_write(tp, OCP_POWER_CFG, data); ++ tp->ups_info.eee_ckdiv = false; ++ ++ ocp_reg_write(tp, OCP_SYSCLK_CFG, 0); ++ ocp_reg_write(tp, OCP_SYSCLK_CFG, sysclk_div_expo(5)); ++ tp->ups_info._250m_ckdiv = false; ++ ++ rtl_phy_patch_request(tp, false, true); ++ ++ /* enable ADC Ibias Cal */ ++ data = ocp_reg_read(tp, 0xd068); ++ data |= BIT(13); ++ ocp_reg_write(tp, 0xd068, data); ++ ++ /* enable Thermal Sensor */ ++ data = sram_read(tp, 0x81a2); ++ data &= ~BIT(8); ++ sram_write(tp, 0x81a2, data); ++ data = ocp_reg_read(tp, 0xb54c); ++ data &= ~0xff00; ++ data |= 0xdb00; ++ ocp_reg_write(tp, 0xb54c, data); ++ ++ /* Nway 2.5G Lite */ ++ data = ocp_reg_read(tp, 0xa454); ++ data &= ~BIT(0); ++ ocp_reg_write(tp, 0xa454, data); ++ ++ /* CS DSP solution */ ++ data = ocp_reg_read(tp, OCP_10GBT_CTRL); ++ data |= RTL_ADV2_5G_F_R; ++ ocp_reg_write(tp, OCP_10GBT_CTRL, data); ++ data = ocp_reg_read(tp, 0xad4e); ++ data &= ~BIT(4); ++ ocp_reg_write(tp, 0xad4e, data); ++ data = ocp_reg_read(tp, 0xa86a); ++ data &= ~BIT(0); ++ ocp_reg_write(tp, 0xa86a, data); ++ ++ /* MDI SWAP */ ++ if ((ocp_read_word(tp, MCU_TYPE_USB, USB_UPS_CFG) & MID_REVERSE) && ++ (ocp_reg_read(tp, 0xd068) & BIT(1))) { ++ u16 swap_a, swap_b; ++ ++ data = ocp_reg_read(tp, 0xd068); ++ data &= ~0x1f; ++ data |= 0x1; /* p0 */ ++ ocp_reg_write(tp, 0xd068, data); ++ swap_a = ocp_reg_read(tp, 0xd06a); ++ data &= ~0x18; ++ data |= 0x18; /* p3 */ ++ ocp_reg_write(tp, 0xd068, data); ++ swap_b = ocp_reg_read(tp, 0xd06a); ++ data &= ~0x18; /* p0 */ ++ ocp_reg_write(tp, 0xd068, data); ++ ocp_reg_write(tp, 0xd06a, ++ (swap_a & ~0x7ff) | (swap_b & 0x7ff)); ++ data |= 0x18; /* p3 */ ++ ocp_reg_write(tp, 0xd068, data); ++ ocp_reg_write(tp, 0xd06a, ++ (swap_b & ~0x7ff) | (swap_a & 0x7ff)); ++ data &= ~0x18; ++ data |= 0x08; /* p1 */ ++ ocp_reg_write(tp, 0xd068, data); ++ swap_a = ocp_reg_read(tp, 0xd06a); ++ data &= ~0x18; ++ data |= 0x10; /* p2 */ ++ ocp_reg_write(tp, 0xd068, data); ++ swap_b = ocp_reg_read(tp, 0xd06a); ++ data &= ~0x18; ++ data |= 0x08; /* p1 */ ++ ocp_reg_write(tp, 0xd068, data); ++ ocp_reg_write(tp, 0xd06a, ++ (swap_a & ~0x7ff) | (swap_b & 0x7ff)); ++ data &= ~0x18; ++ data |= 0x10; /* p2 */ ++ ocp_reg_write(tp, 0xd068, data); ++ ocp_reg_write(tp, 0xd06a, ++ (swap_b & ~0x7ff) | (swap_a & 0x7ff)); ++ swap_a = ocp_reg_read(tp, 0xbd5a); ++ swap_b = ocp_reg_read(tp, 0xbd5c); ++ ocp_reg_write(tp, 0xbd5a, (swap_a & ~0x1f1f) | ++ ((swap_b & 0x1f) << 8) | ++ ((swap_b >> 8) & 0x1f)); ++ ocp_reg_write(tp, 0xbd5c, (swap_b & ~0x1f1f) | ++ ((swap_a & 0x1f) << 8) | ++ ((swap_a >> 8) & 0x1f)); ++ swap_a = ocp_reg_read(tp, 0xbc18); ++ swap_b = ocp_reg_read(tp, 0xbc1a); ++ ocp_reg_write(tp, 0xbc18, (swap_a & ~0x1f1f) | ++ ((swap_b & 0x1f) << 8) | ++ ((swap_b >> 8) & 0x1f)); ++ ocp_reg_write(tp, 0xbc1a, (swap_b & ~0x1f1f) | ++ ((swap_a & 0x1f) << 8) | ++ ((swap_a >> 8) & 0x1f)); ++ } ++ break; ++ default: ++ break; ++ } ++ ++ rtl_green_en(tp, test_bit(GREEN_ETHERNET, &tp->flags)); ++ ++ data = ocp_reg_read(tp, 0xa428); ++ data &= ~BIT(9); ++ ocp_reg_write(tp, 0xa428, data); ++ data = ocp_reg_read(tp, 0xa5ea); ++ data &= ~BIT(0); ++ ocp_reg_write(tp, 0xa5ea, data); ++ tp->ups_info.lite_mode = 0; ++ ++ if (tp->eee_en) ++ rtl_eee_enable(tp, true); ++ ++ r8153_aldps_en(tp, true); ++ r8152b_enable_fc(tp); ++ r8153_u2p3en(tp, true); ++ ++ set_bit(PHY_RESET, &tp->flags); ++} ++ ++static void r8156b_hw_phy_cfg(struct r8152 *tp) ++{ ++ u32 ocp_data; ++ u16 data; ++ ++ switch (tp->version) { ++ case RTL_VER_12: ++ ocp_reg_write(tp, 0xbf86, 0x9000); ++ data = ocp_reg_read(tp, 0xc402); ++ data |= BIT(10); ++ ocp_reg_write(tp, 0xc402, data); ++ data &= ~BIT(10); ++ ocp_reg_write(tp, 0xc402, data); ++ ocp_reg_write(tp, 0xbd86, 0x1010); ++ ocp_reg_write(tp, 0xbd88, 0x1010); ++ data = ocp_reg_read(tp, 0xbd4e); ++ data &= ~(BIT(10) | BIT(11)); ++ data |= BIT(11); ++ ocp_reg_write(tp, 0xbd4e, data); ++ data = ocp_reg_read(tp, 0xbf46); ++ data &= ~0xf00; ++ data |= 0x700; ++ ocp_reg_write(tp, 0xbf46, data); ++ break; ++ case RTL_VER_13: ++ case RTL_VER_15: ++ r8156b_wait_loading_flash(tp); ++ break; ++ default: ++ break; ++ } ++ ++ ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_MISC_0); ++ if (ocp_data & PCUT_STATUS) { ++ ocp_data &= ~PCUT_STATUS; ++ ocp_write_word(tp, MCU_TYPE_USB, USB_MISC_0, ocp_data); ++ } ++ ++ data = r8153_phy_status(tp, 0); ++ switch (data) { ++ case PHY_STAT_EXT_INIT: ++ rtl8152_apply_firmware(tp, true); ++ ++ data = ocp_reg_read(tp, 0xa466); ++ data &= ~BIT(0); ++ ocp_reg_write(tp, 0xa466, data); ++ ++ data = ocp_reg_read(tp, 0xa468); ++ data &= ~(BIT(3) | BIT(1)); ++ ocp_reg_write(tp, 0xa468, data); ++ break; ++ case PHY_STAT_LAN_ON: ++ case PHY_STAT_PWRDN: ++ default: ++ rtl8152_apply_firmware(tp, false); ++ break; ++ } ++ ++ data = r8152_mdio_read(tp, MII_BMCR); ++ if (data & BMCR_PDOWN) { ++ data &= ~BMCR_PDOWN; ++ r8152_mdio_write(tp, MII_BMCR, data); ++ } ++ ++ /* disable ALDPS before updating the PHY parameters */ ++ r8153_aldps_en(tp, false); ++ ++ /* disable EEE before updating the PHY parameters */ ++ rtl_eee_enable(tp, false); ++ ++ data = r8153_phy_status(tp, PHY_STAT_LAN_ON); ++ WARN_ON_ONCE(data != PHY_STAT_LAN_ON); ++ ++ ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_PHY_PWR); ++ ocp_data |= PFM_PWM_SWITCH; ++ ocp_write_word(tp, MCU_TYPE_PLA, PLA_PHY_PWR, ocp_data); ++ ++ switch (tp->version) { ++ case RTL_VER_12: ++ data = ocp_reg_read(tp, 0xbc08); ++ data |= BIT(3) | BIT(2); ++ ocp_reg_write(tp, 0xbc08, data); ++ ++ data = sram_read(tp, 0x8fff); ++ data &= ~0xff00; ++ data |= 0x0400; ++ sram_write(tp, 0x8fff, data); ++ ++ data = ocp_reg_read(tp, 0xacda); ++ data |= 0xff00; ++ ocp_reg_write(tp, 0xacda, data); ++ data = ocp_reg_read(tp, 0xacde); ++ data |= 0xf000; ++ ocp_reg_write(tp, 0xacde, data); ++ ocp_reg_write(tp, 0xac8c, 0x0ffc); ++ ocp_reg_write(tp, 0xac46, 0xb7b4); ++ ocp_reg_write(tp, 0xac50, 0x0fbc); ++ ocp_reg_write(tp, 0xac3c, 0x9240); ++ ocp_reg_write(tp, 0xac4e, 0x0db4); ++ ocp_reg_write(tp, 0xacc6, 0x0707); ++ ocp_reg_write(tp, 0xacc8, 0xa0d3); ++ ocp_reg_write(tp, 0xad08, 0x0007); ++ ++ ocp_reg_write(tp, 0xb87c, 0x8560); ++ ocp_reg_write(tp, 0xb87e, 0x19cc); ++ ocp_reg_write(tp, 0xb87c, 0x8562); ++ ocp_reg_write(tp, 0xb87e, 0x19cc); ++ ocp_reg_write(tp, 0xb87c, 0x8564); ++ ocp_reg_write(tp, 0xb87e, 0x19cc); ++ ocp_reg_write(tp, 0xb87c, 0x8566); ++ ocp_reg_write(tp, 0xb87e, 0x147d); ++ ocp_reg_write(tp, 0xb87c, 0x8568); ++ ocp_reg_write(tp, 0xb87e, 0x147d); ++ ocp_reg_write(tp, 0xb87c, 0x856a); ++ ocp_reg_write(tp, 0xb87e, 0x147d); ++ ocp_reg_write(tp, 0xb87c, 0x8ffe); ++ ocp_reg_write(tp, 0xb87e, 0x0907); ++ ocp_reg_write(tp, 0xb87c, 0x80d6); ++ ocp_reg_write(tp, 0xb87e, 0x2801); ++ ocp_reg_write(tp, 0xb87c, 0x80f2); ++ ocp_reg_write(tp, 0xb87e, 0x2801); ++ ocp_reg_write(tp, 0xb87c, 0x80f4); ++ ocp_reg_write(tp, 0xb87e, 0x6077); ++ ocp_reg_write(tp, 0xb506, 0x01e7); ++ ++ ocp_reg_write(tp, 0xb87c, 0x8013); ++ ocp_reg_write(tp, 0xb87e, 0x0700); ++ ocp_reg_write(tp, 0xb87c, 0x8fb9); ++ ocp_reg_write(tp, 0xb87e, 0x2801); ++ ocp_reg_write(tp, 0xb87c, 0x8fba); ++ ocp_reg_write(tp, 0xb87e, 0x0100); ++ ocp_reg_write(tp, 0xb87c, 0x8fbc); ++ ocp_reg_write(tp, 0xb87e, 0x1900); ++ ocp_reg_write(tp, 0xb87c, 0x8fbe); ++ ocp_reg_write(tp, 0xb87e, 0xe100); ++ ocp_reg_write(tp, 0xb87c, 0x8fc0); ++ ocp_reg_write(tp, 0xb87e, 0x0800); ++ ocp_reg_write(tp, 0xb87c, 0x8fc2); ++ ocp_reg_write(tp, 0xb87e, 0xe500); ++ ocp_reg_write(tp, 0xb87c, 0x8fc4); ++ ocp_reg_write(tp, 0xb87e, 0x0f00); ++ ocp_reg_write(tp, 0xb87c, 0x8fc6); ++ ocp_reg_write(tp, 0xb87e, 0xf100); ++ ocp_reg_write(tp, 0xb87c, 0x8fc8); ++ ocp_reg_write(tp, 0xb87e, 0x0400); ++ ocp_reg_write(tp, 0xb87c, 0x8fca); ++ ocp_reg_write(tp, 0xb87e, 0xf300); ++ ocp_reg_write(tp, 0xb87c, 0x8fcc); ++ ocp_reg_write(tp, 0xb87e, 0xfd00); ++ ocp_reg_write(tp, 0xb87c, 0x8fce); ++ ocp_reg_write(tp, 0xb87e, 0xff00); ++ ocp_reg_write(tp, 0xb87c, 0x8fd0); ++ ocp_reg_write(tp, 0xb87e, 0xfb00); ++ ocp_reg_write(tp, 0xb87c, 0x8fd2); ++ ocp_reg_write(tp, 0xb87e, 0x0100); ++ ocp_reg_write(tp, 0xb87c, 0x8fd4); ++ ocp_reg_write(tp, 0xb87e, 0xf400); ++ ocp_reg_write(tp, 0xb87c, 0x8fd6); ++ ocp_reg_write(tp, 0xb87e, 0xff00); ++ ocp_reg_write(tp, 0xb87c, 0x8fd8); ++ ocp_reg_write(tp, 0xb87e, 0xf600); ++ ++ ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_USB_CFG); ++ ocp_data |= EN_XG_LIP | EN_G_LIP; ++ ocp_write_byte(tp, MCU_TYPE_PLA, PLA_USB_CFG, ocp_data); ++ ocp_reg_write(tp, 0xb87c, 0x813d); ++ ocp_reg_write(tp, 0xb87e, 0x390e); ++ ocp_reg_write(tp, 0xb87c, 0x814f); ++ ocp_reg_write(tp, 0xb87e, 0x790e); ++ ocp_reg_write(tp, 0xb87c, 0x80b0); ++ ocp_reg_write(tp, 0xb87e, 0x0f31); ++ data = ocp_reg_read(tp, 0xbf4c); ++ data |= BIT(1); ++ ocp_reg_write(tp, 0xbf4c, data); ++ data = ocp_reg_read(tp, 0xbcca); ++ data |= BIT(9) | BIT(8); ++ ocp_reg_write(tp, 0xbcca, data); ++ ocp_reg_write(tp, 0xb87c, 0x8141); ++ ocp_reg_write(tp, 0xb87e, 0x320e); ++ ocp_reg_write(tp, 0xb87c, 0x8153); ++ ocp_reg_write(tp, 0xb87e, 0x720e); ++ ocp_reg_write(tp, 0xb87c, 0x8529); ++ ocp_reg_write(tp, 0xb87e, 0x050e); ++ data = ocp_reg_read(tp, OCP_EEE_CFG); ++ data &= ~CTAP_SHORT_EN; ++ ocp_reg_write(tp, OCP_EEE_CFG, data); ++ ++ sram_write(tp, 0x816c, 0xc4a0); ++ sram_write(tp, 0x8170, 0xc4a0); ++ sram_write(tp, 0x8174, 0x04a0); ++ sram_write(tp, 0x8178, 0x04a0); ++ sram_write(tp, 0x817c, 0x0719); ++ sram_write(tp, 0x8ff4, 0x0400); ++ sram_write(tp, 0x8ff1, 0x0404); ++ ++ ocp_reg_write(tp, 0xbf4a, 0x001b); ++ ocp_reg_write(tp, 0xb87c, 0x8033); ++ ocp_reg_write(tp, 0xb87e, 0x7c13); ++ ocp_reg_write(tp, 0xb87c, 0x8037); ++ ocp_reg_write(tp, 0xb87e, 0x7c13); ++ ocp_reg_write(tp, 0xb87c, 0x803b); ++ ocp_reg_write(tp, 0xb87e, 0xfc32); ++ ocp_reg_write(tp, 0xb87c, 0x803f); ++ ocp_reg_write(tp, 0xb87e, 0x7c13); ++ ocp_reg_write(tp, 0xb87c, 0x8043); ++ ocp_reg_write(tp, 0xb87e, 0x7c13); ++ ocp_reg_write(tp, 0xb87c, 0x8047); ++ ocp_reg_write(tp, 0xb87e, 0x7c13); ++ ++ ocp_reg_write(tp, 0xb87c, 0x8145); ++ ocp_reg_write(tp, 0xb87e, 0x370e); ++ ocp_reg_write(tp, 0xb87c, 0x8157); ++ ocp_reg_write(tp, 0xb87e, 0x770e); ++ ocp_reg_write(tp, 0xb87c, 0x8169); ++ ocp_reg_write(tp, 0xb87e, 0x0d0a); ++ ocp_reg_write(tp, 0xb87c, 0x817b); ++ ocp_reg_write(tp, 0xb87e, 0x1d0a); ++ ++ data = sram_read(tp, 0x8217); ++ data &= ~0xff00; ++ data |= 0x5000; ++ sram_write(tp, 0x8217, data); ++ data = sram_read(tp, 0x821a); ++ data &= ~0xff00; ++ data |= 0x5000; ++ sram_write(tp, 0x821a, data); ++ sram_write(tp, 0x80da, 0x0403); ++ data = sram_read(tp, 0x80dc); ++ data &= ~0xff00; ++ data |= 0x1000; ++ sram_write(tp, 0x80dc, data); ++ sram_write(tp, 0x80b3, 0x0384); ++ sram_write(tp, 0x80b7, 0x2007); ++ data = sram_read(tp, 0x80ba); ++ data &= ~0xff00; ++ data |= 0x6c00; ++ sram_write(tp, 0x80ba, data); ++ sram_write(tp, 0x80b5, 0xf009); ++ data = sram_read(tp, 0x80bd); ++ data &= ~0xff00; ++ data |= 0x9f00; ++ sram_write(tp, 0x80bd, data); ++ sram_write(tp, 0x80c7, 0xf083); ++ sram_write(tp, 0x80dd, 0x03f0); ++ data = sram_read(tp, 0x80df); ++ data &= ~0xff00; ++ data |= 0x1000; ++ sram_write(tp, 0x80df, data); ++ sram_write(tp, 0x80cb, 0x2007); ++ data = sram_read(tp, 0x80ce); ++ data &= ~0xff00; ++ data |= 0x6c00; ++ sram_write(tp, 0x80ce, data); ++ sram_write(tp, 0x80c9, 0x8009); ++ data = sram_read(tp, 0x80d1); ++ data &= ~0xff00; ++ data |= 0x8000; ++ sram_write(tp, 0x80d1, data); ++ sram_write(tp, 0x80a3, 0x200a); ++ sram_write(tp, 0x80a5, 0xf0ad); ++ sram_write(tp, 0x809f, 0x6073); ++ sram_write(tp, 0x80a1, 0x000b); ++ data = sram_read(tp, 0x80a9); ++ data &= ~0xff00; ++ data |= 0xc000; ++ sram_write(tp, 0x80a9, data); ++ ++ if (rtl_phy_patch_request(tp, true, true)) ++ return; ++ ++ data = ocp_reg_read(tp, 0xb896); ++ data &= ~BIT(0); ++ ocp_reg_write(tp, 0xb896, data); ++ data = ocp_reg_read(tp, 0xb892); ++ data &= ~0xff00; ++ ocp_reg_write(tp, 0xb892, data); ++ ocp_reg_write(tp, 0xb88e, 0xc23e); ++ ocp_reg_write(tp, 0xb890, 0x0000); ++ ocp_reg_write(tp, 0xb88e, 0xc240); ++ ocp_reg_write(tp, 0xb890, 0x0103); ++ ocp_reg_write(tp, 0xb88e, 0xc242); ++ ocp_reg_write(tp, 0xb890, 0x0507); ++ ocp_reg_write(tp, 0xb88e, 0xc244); ++ ocp_reg_write(tp, 0xb890, 0x090b); ++ ocp_reg_write(tp, 0xb88e, 0xc246); ++ ocp_reg_write(tp, 0xb890, 0x0c0e); ++ ocp_reg_write(tp, 0xb88e, 0xc248); ++ ocp_reg_write(tp, 0xb890, 0x1012); ++ ocp_reg_write(tp, 0xb88e, 0xc24a); ++ ocp_reg_write(tp, 0xb890, 0x1416); ++ data = ocp_reg_read(tp, 0xb896); ++ data |= BIT(0); ++ ocp_reg_write(tp, 0xb896, data); ++ ++ rtl_phy_patch_request(tp, false, true); ++ ++ data = ocp_reg_read(tp, 0xa86a); ++ data |= BIT(0); ++ ocp_reg_write(tp, 0xa86a, data); ++ data = ocp_reg_read(tp, 0xa6f0); ++ data |= BIT(0); ++ ocp_reg_write(tp, 0xa6f0, data); ++ ++ ocp_reg_write(tp, 0xbfa0, 0xd70d); ++ ocp_reg_write(tp, 0xbfa2, 0x4100); ++ ocp_reg_write(tp, 0xbfa4, 0xe868); ++ ocp_reg_write(tp, 0xbfa6, 0xdc59); ++ ocp_reg_write(tp, 0xb54c, 0x3c18); ++ data = ocp_reg_read(tp, 0xbfa4); ++ data &= ~BIT(5); ++ ocp_reg_write(tp, 0xbfa4, data); ++ data = sram_read(tp, 0x817d); ++ data |= BIT(12); ++ sram_write(tp, 0x817d, data); ++ break; ++ case RTL_VER_13: ++ /* 2.5G INRX */ ++ data = ocp_reg_read(tp, 0xac46); ++ data &= ~0x00f0; ++ data |= 0x0090; ++ ocp_reg_write(tp, 0xac46, data); ++ data = ocp_reg_read(tp, 0xad30); ++ data &= ~0x0003; ++ data |= 0x0001; ++ ocp_reg_write(tp, 0xad30, data); ++ fallthrough; ++ case RTL_VER_15: ++ /* EEE parameter */ ++ ocp_reg_write(tp, 0xb87c, 0x80f5); ++ ocp_reg_write(tp, 0xb87e, 0x760e); ++ ocp_reg_write(tp, 0xb87c, 0x8107); ++ ocp_reg_write(tp, 0xb87e, 0x360e); ++ ocp_reg_write(tp, 0xb87c, 0x8551); ++ data = ocp_reg_read(tp, 0xb87e); ++ data &= ~0xff00; ++ data |= 0x0800; ++ ocp_reg_write(tp, 0xb87e, data); ++ ++ /* ADC_PGA parameter */ ++ data = ocp_reg_read(tp, 0xbf00); ++ data &= ~0xe000; ++ data |= 0xa000; ++ ocp_reg_write(tp, 0xbf00, data); ++ data = ocp_reg_read(tp, 0xbf46); ++ data &= ~0x0f00; ++ data |= 0x0300; ++ ocp_reg_write(tp, 0xbf46, data); ++ ++ /* Green Table-PGA, 1G full viterbi */ ++ sram_write(tp, 0x8044, 0x2417); ++ sram_write(tp, 0x804a, 0x2417); ++ sram_write(tp, 0x8050, 0x2417); ++ sram_write(tp, 0x8056, 0x2417); ++ sram_write(tp, 0x805c, 0x2417); ++ sram_write(tp, 0x8062, 0x2417); ++ sram_write(tp, 0x8068, 0x2417); ++ sram_write(tp, 0x806e, 0x2417); ++ sram_write(tp, 0x8074, 0x2417); ++ sram_write(tp, 0x807a, 0x2417); ++ ++ /* XG PLL */ ++ data = ocp_reg_read(tp, 0xbf84); ++ data &= ~0xe000; ++ data |= 0xa000; ++ ocp_reg_write(tp, 0xbf84, data); ++ break; ++ default: ++ break; ++ } ++ ++ if (rtl_phy_patch_request(tp, true, true)) ++ return; ++ ++ ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL4); ++ ocp_data |= EEE_SPDWN_EN; ++ ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL4, ocp_data); ++ ++ data = ocp_reg_read(tp, OCP_DOWN_SPEED); ++ data &= ~(EN_EEE_100 | EN_EEE_1000); ++ data |= EN_10M_CLKDIV; ++ ocp_reg_write(tp, OCP_DOWN_SPEED, data); ++ tp->ups_info._10m_ckdiv = true; ++ tp->ups_info.eee_plloff_100 = false; ++ tp->ups_info.eee_plloff_giga = false; ++ ++ data = ocp_reg_read(tp, OCP_POWER_CFG); ++ data &= ~EEE_CLKDIV_EN; ++ ocp_reg_write(tp, OCP_POWER_CFG, data); ++ tp->ups_info.eee_ckdiv = false; ++ ++ rtl_phy_patch_request(tp, false, true); ++ ++ rtl_green_en(tp, test_bit(GREEN_ETHERNET, &tp->flags)); ++ ++ data = ocp_reg_read(tp, 0xa428); ++ data &= ~BIT(9); ++ ocp_reg_write(tp, 0xa428, data); ++ data = ocp_reg_read(tp, 0xa5ea); ++ data &= ~BIT(0); ++ ocp_reg_write(tp, 0xa5ea, data); ++ tp->ups_info.lite_mode = 0; ++ ++ if (tp->eee_en) ++ rtl_eee_enable(tp, true); ++ ++ r8153_aldps_en(tp, true); ++ r8152b_enable_fc(tp); ++ r8153_u2p3en(tp, true); ++ ++ set_bit(PHY_RESET, &tp->flags); ++} ++ ++static void r8156_init(struct r8152 *tp) ++{ ++ u32 ocp_data; ++ u16 data; ++ int i; ++ ++ if (test_bit(RTL8152_UNPLUG, &tp->flags)) ++ return; ++ ++ ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_ECM_OP); ++ ocp_data &= ~EN_ALL_SPEED; ++ ocp_write_byte(tp, MCU_TYPE_USB, USB_ECM_OP, ocp_data); ++ ++ ocp_write_word(tp, MCU_TYPE_USB, USB_SPEED_OPTION, 0); ++ ++ ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_ECM_OPTION); ++ ocp_data |= BYPASS_MAC_RESET; ++ ocp_write_word(tp, MCU_TYPE_USB, USB_ECM_OPTION, ocp_data); ++ ++ r8153b_u1u2en(tp, false); ++ ++ for (i = 0; i < 500; i++) { ++ if (ocp_read_word(tp, MCU_TYPE_PLA, PLA_BOOT_CTRL) & ++ AUTOLOAD_DONE) ++ break; ++ ++ msleep(20); ++ if (test_bit(RTL8152_UNPLUG, &tp->flags)) ++ return; ++ } ++ ++ data = r8153_phy_status(tp, 0); ++ if (data == PHY_STAT_EXT_INIT) { ++ data = ocp_reg_read(tp, 0xa468); ++ data &= ~(BIT(3) | BIT(1)); ++ ocp_reg_write(tp, 0xa468, data); ++ } ++ ++ data = r8152_mdio_read(tp, MII_BMCR); ++ if (data & BMCR_PDOWN) { ++ data &= ~BMCR_PDOWN; ++ r8152_mdio_write(tp, MII_BMCR, data); ++ } ++ ++ data = r8153_phy_status(tp, PHY_STAT_LAN_ON); ++ WARN_ON_ONCE(data != PHY_STAT_LAN_ON); ++ ++ r8153_u2p3en(tp, false); ++ ++ /* MSC timer = 0xfff * 8ms = 32760 ms */ ++ ocp_write_word(tp, MCU_TYPE_USB, USB_MSC_TIMER, 0x0fff); ++ ++ /* U1/U2/L1 idle timer. 500 us */ ++ ocp_write_word(tp, MCU_TYPE_USB, USB_U1U2_TIMER, 500); ++ ++ r8153b_power_cut_en(tp, false); ++ r8156_ups_en(tp, false); ++ r8153_queue_wake(tp, false); ++ rtl_runtime_suspend_enable(tp, false); ++ ++ if (tp->udev->speed >= USB_SPEED_SUPER) ++ r8153b_u1u2en(tp, true); ++ ++ usb_enable_lpm(tp->udev); ++ ++ r8156_mac_clk_spd(tp, true); ++ ++ ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL3); ++ ocp_data &= ~PLA_MCU_SPDWN_EN; ++ ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL3, ocp_data); ++ ++ ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_EXTRA_STATUS); ++ if (rtl8152_get_speed(tp) & LINK_STATUS) ++ ocp_data |= CUR_LINK_OK; ++ else ++ ocp_data &= ~CUR_LINK_OK; ++ ocp_data |= POLL_LINK_CHG; ++ ocp_write_word(tp, MCU_TYPE_PLA, PLA_EXTRA_STATUS, ocp_data); ++ ++ set_bit(GREEN_ETHERNET, &tp->flags); ++ ++ /* rx aggregation */ ++ ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_USB_CTRL); ++ ocp_data &= ~(RX_AGG_DISABLE | RX_ZERO_EN); ++ ocp_write_word(tp, MCU_TYPE_USB, USB_USB_CTRL, ocp_data); ++ ++ ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_BMU_CONFIG); ++ ocp_data |= ACT_ODMA; ++ ocp_write_byte(tp, MCU_TYPE_USB, USB_BMU_CONFIG, ocp_data); ++ ++ rtl_tally_reset(tp); ++ ++ tp->coalesce = 15000; /* 15 us */ ++} ++ ++static void r8156b_init(struct r8152 *tp) ++{ ++ u32 ocp_data; ++ u16 data; ++ int i; ++ ++ if (test_bit(RTL8152_UNPLUG, &tp->flags)) ++ return; ++ ++ ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_ECM_OP); ++ ocp_data &= ~EN_ALL_SPEED; ++ ocp_write_byte(tp, MCU_TYPE_USB, USB_ECM_OP, ocp_data); ++ ++ ocp_write_word(tp, MCU_TYPE_USB, USB_SPEED_OPTION, 0); ++ ++ ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_ECM_OPTION); ++ ocp_data |= BYPASS_MAC_RESET; ++ ocp_write_word(tp, MCU_TYPE_USB, USB_ECM_OPTION, ocp_data); ++ ++ ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_U2P3_CTRL); ++ ocp_data |= RX_DETECT8; ++ ocp_write_word(tp, MCU_TYPE_USB, USB_U2P3_CTRL, ocp_data); ++ ++ r8153b_u1u2en(tp, false); ++ ++ switch (tp->version) { ++ case RTL_VER_13: ++ case RTL_VER_15: ++ r8156b_wait_loading_flash(tp); ++ break; ++ default: ++ break; ++ } ++ ++ for (i = 0; i < 500; i++) { ++ if (ocp_read_word(tp, MCU_TYPE_PLA, PLA_BOOT_CTRL) & ++ AUTOLOAD_DONE) ++ break; ++ ++ msleep(20); ++ if (test_bit(RTL8152_UNPLUG, &tp->flags)) ++ return; ++ } ++ ++ data = r8153_phy_status(tp, 0); ++ if (data == PHY_STAT_EXT_INIT) { ++ data = ocp_reg_read(tp, 0xa468); ++ data &= ~(BIT(3) | BIT(1)); ++ ocp_reg_write(tp, 0xa468, data); ++ ++ data = ocp_reg_read(tp, 0xa466); ++ data &= ~BIT(0); ++ ocp_reg_write(tp, 0xa466, data); ++ } ++ ++ data = r8152_mdio_read(tp, MII_BMCR); ++ if (data & BMCR_PDOWN) { ++ data &= ~BMCR_PDOWN; ++ r8152_mdio_write(tp, MII_BMCR, data); ++ } ++ ++ data = r8153_phy_status(tp, PHY_STAT_LAN_ON); ++ ++ r8153_u2p3en(tp, false); ++ ++ /* MSC timer = 0xfff * 8ms = 32760 ms */ ++ ocp_write_word(tp, MCU_TYPE_USB, USB_MSC_TIMER, 0x0fff); ++ ++ /* U1/U2/L1 idle timer. 500 us */ ++ ocp_write_word(tp, MCU_TYPE_USB, USB_U1U2_TIMER, 500); ++ ++ r8153b_power_cut_en(tp, false); ++ r8156_ups_en(tp, false); ++ r8153_queue_wake(tp, false); ++ rtl_runtime_suspend_enable(tp, false); ++ ++ if (tp->udev->speed >= USB_SPEED_SUPER) ++ r8153b_u1u2en(tp, true); ++ ++ usb_enable_lpm(tp->udev); ++ ++ ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_RCR); ++ ocp_data &= ~SLOT_EN; ++ ocp_write_word(tp, MCU_TYPE_PLA, PLA_RCR, ocp_data); ++ ++ ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_CPCR); ++ ocp_data |= FLOW_CTRL_EN; ++ ocp_write_word(tp, MCU_TYPE_PLA, PLA_CPCR, ocp_data); ++ ++ /* enable fc timer and set timer to 600 ms. */ ++ ocp_write_word(tp, MCU_TYPE_USB, USB_FC_TIMER, ++ CTRL_TIMER_EN | (600 / 8)); ++ ++ ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_FW_CTRL); ++ if (!(ocp_read_word(tp, MCU_TYPE_PLA, PLA_POL_GPIO_CTRL) & DACK_DET_EN)) ++ ocp_data |= FLOW_CTRL_PATCH_2; ++ ocp_data &= ~AUTO_SPEEDUP; ++ ocp_write_word(tp, MCU_TYPE_USB, USB_FW_CTRL, ocp_data); ++ ++ ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_FW_TASK); ++ ocp_data |= FC_PATCH_TASK; ++ ocp_write_word(tp, MCU_TYPE_USB, USB_FW_TASK, ocp_data); ++ ++ r8156_mac_clk_spd(tp, true); ++ ++ ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL3); ++ ocp_data &= ~PLA_MCU_SPDWN_EN; ++ ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL3, ocp_data); ++ ++ ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_EXTRA_STATUS); ++ if (rtl8152_get_speed(tp) & LINK_STATUS) ++ ocp_data |= CUR_LINK_OK; ++ else ++ ocp_data &= ~CUR_LINK_OK; ++ ocp_data |= POLL_LINK_CHG; ++ ocp_write_word(tp, MCU_TYPE_PLA, PLA_EXTRA_STATUS, ocp_data); ++ ++ set_bit(GREEN_ETHERNET, &tp->flags); ++ ++ /* rx aggregation */ ++ ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_USB_CTRL); ++ ocp_data &= ~(RX_AGG_DISABLE | RX_ZERO_EN); ++ ocp_write_word(tp, MCU_TYPE_USB, USB_USB_CTRL, ocp_data); ++ ++ rtl_tally_reset(tp); ++ ++ tp->coalesce = 15000; /* 15 us */ ++} ++ + static int rtl8152_pre_reset(struct usb_interface *intf) + { + struct r8152 *tp = usb_get_intfdata(intf); +@@ -6015,6 +7947,22 @@ int rtl8152_get_link_ksettings(struct ne + + mii_ethtool_get_link_ksettings(&tp->mii, cmd); + ++ linkmode_mod_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT, ++ cmd->link_modes.supported, tp->support_2500full); ++ ++ if (tp->support_2500full) { ++ linkmode_mod_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT, ++ cmd->link_modes.advertising, ++ ocp_reg_read(tp, OCP_10GBT_CTRL) & MDIO_AN_10GBT_CTRL_ADV2_5G); ++ ++ linkmode_mod_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT, ++ cmd->link_modes.lp_advertising, ++ ocp_reg_read(tp, OCP_10GBT_STAT) & MDIO_AN_10GBT_STAT_LP2_5G); ++ ++ if (is_speed_2500(rtl8152_get_speed(tp))) ++ cmd->base.speed = SPEED_2500; ++ } ++ + mutex_unlock(&tp->control); + + usb_autopm_put_interface(tp->intf); +@@ -6058,6 +8006,10 @@ static int rtl8152_set_link_ksettings(st + cmd->link_modes.advertising)) + advertising |= RTL_ADVERTISED_1000_FULL; + ++ if (test_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT, ++ cmd->link_modes.advertising)) ++ advertising |= RTL_ADVERTISED_2500_FULL; ++ + mutex_lock(&tp->control); + + ret = rtl8152_set_speed(tp, cmd->base.autoneg, cmd->base.speed, +@@ -6647,6 +8599,67 @@ static int rtl_ops_init(struct r8152 *tp + tp->eee_adv = MDIO_EEE_1000T | MDIO_EEE_100TX; + break; + ++ case RTL_VER_11: ++ tp->eee_en = true; ++ tp->eee_adv = MDIO_EEE_1000T | MDIO_EEE_100TX; ++ fallthrough; ++ case RTL_VER_10: ++ ops->init = r8156_init; ++ ops->enable = rtl8156_enable; ++ ops->disable = rtl8153_disable; ++ ops->up = rtl8156_up; ++ ops->down = rtl8156_down; ++ ops->unload = rtl8153_unload; ++ ops->eee_get = r8153_get_eee; ++ ops->eee_set = r8152_set_eee; ++ ops->in_nway = rtl8153_in_nway; ++ ops->hw_phy_cfg = r8156_hw_phy_cfg; ++ ops->autosuspend_en = rtl8156_runtime_enable; ++ ops->change_mtu = rtl8156_change_mtu; ++ tp->rx_buf_sz = 48 * 1024; ++ tp->support_2500full = 1; ++ break; ++ ++ case RTL_VER_12: ++ case RTL_VER_13: ++ tp->support_2500full = 1; ++ fallthrough; ++ case RTL_VER_15: ++ tp->eee_en = true; ++ tp->eee_adv = MDIO_EEE_1000T | MDIO_EEE_100TX; ++ ops->init = r8156b_init; ++ ops->enable = rtl8156b_enable; ++ ops->disable = rtl8153_disable; ++ ops->up = rtl8156_up; ++ ops->down = rtl8156_down; ++ ops->unload = rtl8153_unload; ++ ops->eee_get = r8153_get_eee; ++ ops->eee_set = r8152_set_eee; ++ ops->in_nway = rtl8153_in_nway; ++ ops->hw_phy_cfg = r8156b_hw_phy_cfg; ++ ops->autosuspend_en = rtl8156_runtime_enable; ++ ops->change_mtu = rtl8156_change_mtu; ++ tp->rx_buf_sz = 48 * 1024; ++ break; ++ ++ case RTL_VER_14: ++ ops->init = r8153c_init; ++ ops->enable = rtl8153_enable; ++ ops->disable = rtl8153_disable; ++ ops->up = rtl8153c_up; ++ ops->down = rtl8153b_down; ++ ops->unload = rtl8153_unload; ++ ops->eee_get = r8153_get_eee; ++ ops->eee_set = r8152_set_eee; ++ ops->in_nway = rtl8153_in_nway; ++ ops->hw_phy_cfg = r8153c_hw_phy_cfg; ++ ops->autosuspend_en = rtl8153c_runtime_enable; ++ ops->change_mtu = rtl8153c_change_mtu; ++ tp->rx_buf_sz = 32 * 1024; ++ tp->eee_en = true; ++ tp->eee_adv = MDIO_EEE_1000T | MDIO_EEE_100TX; ++ break; ++ + default: + ret = -ENODEV; + dev_err(&tp->intf->dev, "Unknown Device\n"); +@@ -6660,11 +8673,13 @@ static int rtl_ops_init(struct r8152 *tp + #define FIRMWARE_8153A_3 "rtl_nic/rtl8153a-3.fw" + #define FIRMWARE_8153A_4 "rtl_nic/rtl8153a-4.fw" + #define FIRMWARE_8153B_2 "rtl_nic/rtl8153b-2.fw" ++#define FIRMWARE_8153C_1 "rtl_nic/rtl8153c-1.fw" + + MODULE_FIRMWARE(FIRMWARE_8153A_2); + MODULE_FIRMWARE(FIRMWARE_8153A_3); + MODULE_FIRMWARE(FIRMWARE_8153A_4); + MODULE_FIRMWARE(FIRMWARE_8153B_2); ++MODULE_FIRMWARE(FIRMWARE_8153C_1); + + static int rtl_fw_init(struct r8152 *tp) + { +@@ -6690,6 +8705,11 @@ static int rtl_fw_init(struct r8152 *tp) + rtl_fw->pre_fw = r8153b_pre_firmware_1; + rtl_fw->post_fw = r8153b_post_firmware_1; + break; ++ case RTL_VER_14: ++ rtl_fw->fw_name = FIRMWARE_8153C_1; ++ rtl_fw->pre_fw = r8153b_pre_firmware_1; ++ rtl_fw->post_fw = r8153c_post_firmware_1; ++ break; + default: + break; + } +@@ -6745,6 +8765,27 @@ u8 rtl8152_get_version(struct usb_interf + case 0x6010: + version = RTL_VER_09; + break; ++ case 0x7010: ++ version = RTL_TEST_01; ++ break; ++ case 0x7020: ++ version = RTL_VER_10; ++ break; ++ case 0x7030: ++ version = RTL_VER_11; ++ break; ++ case 0x7400: ++ version = RTL_VER_12; ++ break; ++ case 0x7410: ++ version = RTL_VER_13; ++ break; ++ case 0x6400: ++ version = RTL_VER_14; ++ break; ++ case 0x7420: ++ version = RTL_VER_15; ++ break; + default: + version = RTL_VER_UNKNOWN; + dev_info(&intf->dev, "Unknown version 0x%04x\n", ocp_data); +@@ -6857,12 +8898,29 @@ static int rtl8152_probe(struct usb_inte + /* MTU range: 68 - 1500 or 9194 */ + netdev->min_mtu = ETH_MIN_MTU; + switch (tp->version) { ++ case RTL_VER_03: ++ case RTL_VER_04: ++ case RTL_VER_05: ++ case RTL_VER_06: ++ case RTL_VER_08: ++ case RTL_VER_09: ++ case RTL_VER_14: ++ netdev->max_mtu = size_to_mtu(9 * 1024); ++ break; ++ case RTL_VER_10: ++ case RTL_VER_11: ++ netdev->max_mtu = size_to_mtu(15 * 1024); ++ break; ++ case RTL_VER_12: ++ case RTL_VER_13: ++ case RTL_VER_15: ++ netdev->max_mtu = size_to_mtu(16 * 1024); ++ break; + case RTL_VER_01: + case RTL_VER_02: +- netdev->max_mtu = ETH_DATA_LEN; +- break; ++ case RTL_VER_07: + default: +- netdev->max_mtu = size_to_mtu(9 * 1024); ++ netdev->max_mtu = ETH_DATA_LEN; + break; + } + +@@ -6878,7 +8936,13 @@ static int rtl8152_probe(struct usb_inte + tp->advertising = RTL_ADVERTISED_10_HALF | RTL_ADVERTISED_10_FULL | + RTL_ADVERTISED_100_HALF | RTL_ADVERTISED_100_FULL; + if (tp->mii.supports_gmii) { +- tp->speed = SPEED_1000; ++ if (tp->support_2500full && ++ tp->udev->speed >= USB_SPEED_SUPER) { ++ tp->speed = SPEED_2500; ++ tp->advertising |= RTL_ADVERTISED_2500_FULL; ++ } else { ++ tp->speed = SPEED_1000; ++ } + tp->advertising |= RTL_ADVERTISED_1000_FULL; + } + tp->duplex = DUPLEX_FULL; +@@ -6902,7 +8966,11 @@ static int rtl8152_probe(struct usb_inte + set_ethernet_addr(tp); + + usb_set_intfdata(intf, tp); +- netif_napi_add(netdev, &tp->napi, r8152_poll, RTL8152_NAPI_WEIGHT); ++ ++ if (tp->support_2500full) ++ netif_napi_add(netdev, &tp->napi, r8152_poll, 256); ++ else ++ netif_napi_add(netdev, &tp->napi, r8152_poll, 64); + + ret = register_netdev(netdev); + if (ret != 0) { +@@ -6938,7 +9006,8 @@ static void rtl8152_disconnect(struct us + unregister_netdev(tp->netdev); + tasklet_kill(&tp->tx_tl); + cancel_delayed_work_sync(&tp->hw_phy_work); +- tp->rtl_ops.unload(tp); ++ if (tp->rtl_ops.unload) ++ tp->rtl_ops.unload(tp); + rtl8152_release_firmware(tp); + free_netdev(tp->netdev); + } +@@ -6958,13 +9027,28 @@ static void rtl8152_disconnect(struct us + .idProduct = (prod), \ + .bInterfaceClass = USB_CLASS_COMM, \ + .bInterfaceSubClass = USB_CDC_SUBCLASS_ETHERNET, \ ++ .bInterfaceProtocol = USB_CDC_PROTO_NONE \ ++}, \ ++{ \ ++ .match_flags = USB_DEVICE_ID_MATCH_INT_INFO | \ ++ USB_DEVICE_ID_MATCH_DEVICE, \ ++ .idVendor = (vend), \ ++ .idProduct = (prod), \ ++ .bInterfaceClass = USB_CLASS_COMM, \ ++ .bInterfaceSubClass = USB_CDC_SUBCLASS_NCM, \ + .bInterfaceProtocol = USB_CDC_PROTO_NONE + + /* table of devices that work with this driver */ + static const struct usb_device_id rtl8152_table[] = { ++ /* Realtek */ + {REALTEK_USB_DEVICE(VENDOR_ID_REALTEK, 0x8050)}, ++ {REALTEK_USB_DEVICE(VENDOR_ID_REALTEK, 0x8053)}, + {REALTEK_USB_DEVICE(VENDOR_ID_REALTEK, 0x8152)}, + {REALTEK_USB_DEVICE(VENDOR_ID_REALTEK, 0x8153)}, ++ {REALTEK_USB_DEVICE(VENDOR_ID_REALTEK, 0x8155)}, ++ {REALTEK_USB_DEVICE(VENDOR_ID_REALTEK, 0x8156)}, ++ ++ /* Microsoft */ + {REALTEK_USB_DEVICE(VENDOR_ID_MICROSOFT, 0x07ab)}, + {REALTEK_USB_DEVICE(VENDOR_ID_MICROSOFT, 0x07c6)}, + {REALTEK_USB_DEVICE(VENDOR_ID_MICROSOFT, 0x0927)}, diff --git a/target/linux/generic/backport-5.15/794-v5.13-r8152-support-PHY-firmware-for-RTL8156-series.patch b/target/linux/generic/backport-5.15/794-v5.13-r8152-support-PHY-firmware-for-RTL8156-series.patch new file mode 100644 index 0000000000..943c821b82 --- /dev/null +++ b/target/linux/generic/backport-5.15/794-v5.13-r8152-support-PHY-firmware-for-RTL8156-series.patch @@ -0,0 +1,691 @@ +From ca09589a72a0aa17389754fb75a5cd1a5d46818f Mon Sep 17 00:00:00 2001 +From: Hayes Wang <hayeswang@realtek.com> +Date: Fri, 16 Apr 2021 16:04:36 +0800 +Subject: [PATCH] r8152: support PHY firmware for RTL8156 series + +commit 4a51b0e8a0143b0e83d51d9c58c6416c3818a9f2 upstream. + +Support new firmware type and method for RTL8156 series. + +Signed-off-by: Hayes Wang <hayeswang@realtek.com> +Signed-off-by: David S. Miller <davem@davemloft.net> +--- + drivers/net/usb/r8152.c | 563 +++++++++++++++++++++++++++++++++++++++- + 1 file changed, 561 insertions(+), 2 deletions(-) + +--- a/drivers/net/usb/r8152.c ++++ b/drivers/net/usb/r8152.c +@@ -975,8 +975,60 @@ enum rtl8152_fw_flags { + FW_FLAGS_START, + FW_FLAGS_STOP, + FW_FLAGS_NC, ++ FW_FLAGS_NC1, ++ FW_FLAGS_NC2, ++ FW_FLAGS_UC2, ++ FW_FLAGS_UC, ++ FW_FLAGS_SPEED_UP, ++ FW_FLAGS_VER, + }; + ++enum rtl8152_fw_fixup_cmd { ++ FW_FIXUP_AND = 0, ++ FW_FIXUP_OR, ++ FW_FIXUP_NOT, ++ FW_FIXUP_XOR, ++}; ++ ++struct fw_phy_set { ++ __le16 addr; ++ __le16 data; ++} __packed; ++ ++struct fw_phy_speed_up { ++ struct fw_block blk_hdr; ++ __le16 fw_offset; ++ __le16 version; ++ __le16 fw_reg; ++ __le16 reserved; ++ char info[]; ++} __packed; ++ ++struct fw_phy_ver { ++ struct fw_block blk_hdr; ++ struct fw_phy_set ver; ++ __le32 reserved; ++} __packed; ++ ++struct fw_phy_fixup { ++ struct fw_block blk_hdr; ++ struct fw_phy_set setting; ++ __le16 bit_cmd; ++ __le16 reserved; ++} __packed; ++ ++struct fw_phy_union { ++ struct fw_block blk_hdr; ++ __le16 fw_offset; ++ __le16 fw_reg; ++ struct fw_phy_set pre_set[2]; ++ struct fw_phy_set bp[8]; ++ struct fw_phy_set bp_en; ++ u8 pre_num; ++ u8 bp_num; ++ char info[]; ++} __packed; ++ + /** + * struct fw_mac - a firmware block used by RTL_FW_PLA and RTL_FW_USB. + * The layout of the firmware block is: +@@ -1081,6 +1133,15 @@ enum rtl_fw_type { + RTL_FW_PHY_START, + RTL_FW_PHY_STOP, + RTL_FW_PHY_NC, ++ RTL_FW_PHY_FIXUP, ++ RTL_FW_PHY_UNION_NC, ++ RTL_FW_PHY_UNION_NC1, ++ RTL_FW_PHY_UNION_NC2, ++ RTL_FW_PHY_UNION_UC2, ++ RTL_FW_PHY_UNION_UC, ++ RTL_FW_PHY_UNION_MISC, ++ RTL_FW_PHY_SPEED_UP, ++ RTL_FW_PHY_VER, + }; + + enum rtl_version { +@@ -4000,6 +4061,162 @@ static int rtl_post_ram_code(struct r815 + return 0; + } + ++static bool rtl8152_is_fw_phy_speed_up_ok(struct r8152 *tp, struct fw_phy_speed_up *phy) ++{ ++ u16 fw_offset; ++ u32 length; ++ bool rc = false; ++ ++ switch (tp->version) { ++ case RTL_VER_01: ++ case RTL_VER_02: ++ case RTL_VER_03: ++ case RTL_VER_04: ++ case RTL_VER_05: ++ case RTL_VER_06: ++ case RTL_VER_07: ++ case RTL_VER_08: ++ case RTL_VER_09: ++ case RTL_VER_10: ++ case RTL_VER_11: ++ case RTL_VER_12: ++ case RTL_VER_14: ++ goto out; ++ case RTL_VER_13: ++ case RTL_VER_15: ++ default: ++ break; ++ } ++ ++ fw_offset = __le16_to_cpu(phy->fw_offset); ++ length = __le32_to_cpu(phy->blk_hdr.length); ++ if (fw_offset < sizeof(*phy) || length <= fw_offset) { ++ dev_err(&tp->intf->dev, "invalid fw_offset\n"); ++ goto out; ++ } ++ ++ length -= fw_offset; ++ if (length & 3) { ++ dev_err(&tp->intf->dev, "invalid block length\n"); ++ goto out; ++ } ++ ++ if (__le16_to_cpu(phy->fw_reg) != 0x9A00) { ++ dev_err(&tp->intf->dev, "invalid register to load firmware\n"); ++ goto out; ++ } ++ ++ rc = true; ++out: ++ return rc; ++} ++ ++static bool rtl8152_is_fw_phy_ver_ok(struct r8152 *tp, struct fw_phy_ver *ver) ++{ ++ bool rc = false; ++ ++ switch (tp->version) { ++ case RTL_VER_10: ++ case RTL_VER_11: ++ case RTL_VER_12: ++ case RTL_VER_13: ++ case RTL_VER_15: ++ break; ++ default: ++ goto out; ++ } ++ ++ if (__le32_to_cpu(ver->blk_hdr.length) != sizeof(*ver)) { ++ dev_err(&tp->intf->dev, "invalid block length\n"); ++ goto out; ++ } ++ ++ if (__le16_to_cpu(ver->ver.addr) != SRAM_GPHY_FW_VER) { ++ dev_err(&tp->intf->dev, "invalid phy ver addr\n"); ++ goto out; ++ } ++ ++ rc = true; ++out: ++ return rc; ++} ++ ++static bool rtl8152_is_fw_phy_fixup_ok(struct r8152 *tp, struct fw_phy_fixup *fix) ++{ ++ bool rc = false; ++ ++ switch (tp->version) { ++ case RTL_VER_10: ++ case RTL_VER_11: ++ case RTL_VER_12: ++ case RTL_VER_13: ++ case RTL_VER_15: ++ break; ++ default: ++ goto out; ++ } ++ ++ if (__le32_to_cpu(fix->blk_hdr.length) != sizeof(*fix)) { ++ dev_err(&tp->intf->dev, "invalid block length\n"); ++ goto out; ++ } ++ ++ if (__le16_to_cpu(fix->setting.addr) != OCP_PHY_PATCH_CMD || ++ __le16_to_cpu(fix->setting.data) != BIT(7)) { ++ dev_err(&tp->intf->dev, "invalid phy fixup\n"); ++ goto out; ++ } ++ ++ rc = true; ++out: ++ return rc; ++} ++ ++static bool rtl8152_is_fw_phy_union_ok(struct r8152 *tp, struct fw_phy_union *phy) ++{ ++ u16 fw_offset; ++ u32 length; ++ bool rc = false; ++ ++ switch (tp->version) { ++ case RTL_VER_10: ++ case RTL_VER_11: ++ case RTL_VER_12: ++ case RTL_VER_13: ++ case RTL_VER_15: ++ break; ++ default: ++ goto out; ++ } ++ ++ fw_offset = __le16_to_cpu(phy->fw_offset); ++ length = __le32_to_cpu(phy->blk_hdr.length); ++ if (fw_offset < sizeof(*phy) || length <= fw_offset) { ++ dev_err(&tp->intf->dev, "invalid fw_offset\n"); ++ goto out; ++ } ++ ++ length -= fw_offset; ++ if (length & 1) { ++ dev_err(&tp->intf->dev, "invalid block length\n"); ++ goto out; ++ } ++ ++ if (phy->pre_num > 2) { ++ dev_err(&tp->intf->dev, "invalid pre_num %d\n", phy->pre_num); ++ goto out; ++ } ++ ++ if (phy->bp_num > 8) { ++ dev_err(&tp->intf->dev, "invalid bp_num %d\n", phy->bp_num); ++ goto out; ++ } ++ ++ rc = true; ++out: ++ return rc; ++} ++ + static bool rtl8152_is_fw_phy_nc_ok(struct r8152 *tp, struct fw_phy_nc *phy) + { + u32 length; +@@ -4320,6 +4537,10 @@ static long rtl8152_check_firmware(struc + case RTL_FW_PHY_START: + if (test_bit(FW_FLAGS_START, &fw_flags) || + test_bit(FW_FLAGS_NC, &fw_flags) || ++ test_bit(FW_FLAGS_NC1, &fw_flags) || ++ test_bit(FW_FLAGS_NC2, &fw_flags) || ++ test_bit(FW_FLAGS_UC2, &fw_flags) || ++ test_bit(FW_FLAGS_UC, &fw_flags) || + test_bit(FW_FLAGS_STOP, &fw_flags)) { + dev_err(&tp->intf->dev, + "check PHY_START fail\n"); +@@ -4368,7 +4589,153 @@ static long rtl8152_check_firmware(struc + goto fail; + } + __set_bit(FW_FLAGS_NC, &fw_flags); ++ break; ++ case RTL_FW_PHY_UNION_NC: ++ if (!test_bit(FW_FLAGS_START, &fw_flags) || ++ test_bit(FW_FLAGS_NC1, &fw_flags) || ++ test_bit(FW_FLAGS_NC2, &fw_flags) || ++ test_bit(FW_FLAGS_UC2, &fw_flags) || ++ test_bit(FW_FLAGS_UC, &fw_flags) || ++ test_bit(FW_FLAGS_STOP, &fw_flags)) { ++ dev_err(&tp->intf->dev, "PHY_UNION_NC out of order\n"); ++ goto fail; ++ } ++ ++ if (test_bit(FW_FLAGS_NC, &fw_flags)) { ++ dev_err(&tp->intf->dev, "multiple PHY_UNION_NC encountered\n"); ++ goto fail; ++ } + ++ if (!rtl8152_is_fw_phy_union_ok(tp, (struct fw_phy_union *)block)) { ++ dev_err(&tp->intf->dev, "check PHY_UNION_NC failed\n"); ++ goto fail; ++ } ++ __set_bit(FW_FLAGS_NC, &fw_flags); ++ break; ++ case RTL_FW_PHY_UNION_NC1: ++ if (!test_bit(FW_FLAGS_START, &fw_flags) || ++ test_bit(FW_FLAGS_NC2, &fw_flags) || ++ test_bit(FW_FLAGS_UC2, &fw_flags) || ++ test_bit(FW_FLAGS_UC, &fw_flags) || ++ test_bit(FW_FLAGS_STOP, &fw_flags)) { ++ dev_err(&tp->intf->dev, "PHY_UNION_NC1 out of order\n"); ++ goto fail; ++ } ++ ++ if (test_bit(FW_FLAGS_NC1, &fw_flags)) { ++ dev_err(&tp->intf->dev, "multiple PHY NC1 encountered\n"); ++ goto fail; ++ } ++ ++ if (!rtl8152_is_fw_phy_union_ok(tp, (struct fw_phy_union *)block)) { ++ dev_err(&tp->intf->dev, "check PHY_UNION_NC1 failed\n"); ++ goto fail; ++ } ++ __set_bit(FW_FLAGS_NC1, &fw_flags); ++ break; ++ case RTL_FW_PHY_UNION_NC2: ++ if (!test_bit(FW_FLAGS_START, &fw_flags) || ++ test_bit(FW_FLAGS_UC2, &fw_flags) || ++ test_bit(FW_FLAGS_UC, &fw_flags) || ++ test_bit(FW_FLAGS_STOP, &fw_flags)) { ++ dev_err(&tp->intf->dev, "PHY_UNION_NC2 out of order\n"); ++ goto fail; ++ } ++ ++ if (test_bit(FW_FLAGS_NC2, &fw_flags)) { ++ dev_err(&tp->intf->dev, "multiple PHY NC2 encountered\n"); ++ goto fail; ++ } ++ ++ if (!rtl8152_is_fw_phy_union_ok(tp, (struct fw_phy_union *)block)) { ++ dev_err(&tp->intf->dev, "check PHY_UNION_NC2 failed\n"); ++ goto fail; ++ } ++ __set_bit(FW_FLAGS_NC2, &fw_flags); ++ break; ++ case RTL_FW_PHY_UNION_UC2: ++ if (!test_bit(FW_FLAGS_START, &fw_flags) || ++ test_bit(FW_FLAGS_UC, &fw_flags) || ++ test_bit(FW_FLAGS_STOP, &fw_flags)) { ++ dev_err(&tp->intf->dev, "PHY_UNION_UC2 out of order\n"); ++ goto fail; ++ } ++ ++ if (test_bit(FW_FLAGS_UC2, &fw_flags)) { ++ dev_err(&tp->intf->dev, "multiple PHY UC2 encountered\n"); ++ goto fail; ++ } ++ ++ if (!rtl8152_is_fw_phy_union_ok(tp, (struct fw_phy_union *)block)) { ++ dev_err(&tp->intf->dev, "check PHY_UNION_UC2 failed\n"); ++ goto fail; ++ } ++ __set_bit(FW_FLAGS_UC2, &fw_flags); ++ break; ++ case RTL_FW_PHY_UNION_UC: ++ if (!test_bit(FW_FLAGS_START, &fw_flags) || ++ test_bit(FW_FLAGS_STOP, &fw_flags)) { ++ dev_err(&tp->intf->dev, "PHY_UNION_UC out of order\n"); ++ goto fail; ++ } ++ ++ if (test_bit(FW_FLAGS_UC, &fw_flags)) { ++ dev_err(&tp->intf->dev, "multiple PHY UC encountered\n"); ++ goto fail; ++ } ++ ++ if (!rtl8152_is_fw_phy_union_ok(tp, (struct fw_phy_union *)block)) { ++ dev_err(&tp->intf->dev, "check PHY_UNION_UC failed\n"); ++ goto fail; ++ } ++ __set_bit(FW_FLAGS_UC, &fw_flags); ++ break; ++ case RTL_FW_PHY_UNION_MISC: ++ if (!rtl8152_is_fw_phy_union_ok(tp, (struct fw_phy_union *)block)) { ++ dev_err(&tp->intf->dev, "check RTL_FW_PHY_UNION_MISC failed\n"); ++ goto fail; ++ } ++ break; ++ case RTL_FW_PHY_FIXUP: ++ if (!rtl8152_is_fw_phy_fixup_ok(tp, (struct fw_phy_fixup *)block)) { ++ dev_err(&tp->intf->dev, "check PHY fixup failed\n"); ++ goto fail; ++ } ++ break; ++ case RTL_FW_PHY_SPEED_UP: ++ if (test_bit(FW_FLAGS_SPEED_UP, &fw_flags)) { ++ dev_err(&tp->intf->dev, "multiple PHY firmware encountered"); ++ goto fail; ++ } ++ ++ if (!rtl8152_is_fw_phy_speed_up_ok(tp, (struct fw_phy_speed_up *)block)) { ++ dev_err(&tp->intf->dev, "check PHY speed up failed\n"); ++ goto fail; ++ } ++ __set_bit(FW_FLAGS_SPEED_UP, &fw_flags); ++ break; ++ case RTL_FW_PHY_VER: ++ if (test_bit(FW_FLAGS_START, &fw_flags) || ++ test_bit(FW_FLAGS_NC, &fw_flags) || ++ test_bit(FW_FLAGS_NC1, &fw_flags) || ++ test_bit(FW_FLAGS_NC2, &fw_flags) || ++ test_bit(FW_FLAGS_UC2, &fw_flags) || ++ test_bit(FW_FLAGS_UC, &fw_flags) || ++ test_bit(FW_FLAGS_STOP, &fw_flags)) { ++ dev_err(&tp->intf->dev, "Invalid order to set PHY version\n"); ++ goto fail; ++ } ++ ++ if (test_bit(FW_FLAGS_VER, &fw_flags)) { ++ dev_err(&tp->intf->dev, "multiple PHY version encountered"); ++ goto fail; ++ } ++ ++ if (!rtl8152_is_fw_phy_ver_ok(tp, (struct fw_phy_ver *)block)) { ++ dev_err(&tp->intf->dev, "check PHY version failed\n"); ++ goto fail; ++ } ++ __set_bit(FW_FLAGS_VER, &fw_flags); + break; + default: + dev_warn(&tp->intf->dev, "Unknown type %u is found\n", +@@ -4391,6 +4758,143 @@ fail: + return ret; + } + ++static void rtl_ram_code_speed_up(struct r8152 *tp, struct fw_phy_speed_up *phy, bool wait) ++{ ++ u32 len; ++ u8 *data; ++ ++ if (sram_read(tp, SRAM_GPHY_FW_VER) >= __le16_to_cpu(phy->version)) { ++ dev_dbg(&tp->intf->dev, "PHY firmware has been the newest\n"); ++ return; ++ } ++ ++ len = __le32_to_cpu(phy->blk_hdr.length); ++ len -= __le16_to_cpu(phy->fw_offset); ++ data = (u8 *)phy + __le16_to_cpu(phy->fw_offset); ++ ++ if (rtl_phy_patch_request(tp, true, wait)) ++ return; ++ ++ while (len) { ++ u32 ocp_data, size; ++ int i; ++ ++ if (len < 2048) ++ size = len; ++ else ++ size = 2048; ++ ++ ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_GPHY_CTRL); ++ ocp_data |= GPHY_PATCH_DONE | BACKUP_RESTRORE; ++ ocp_write_word(tp, MCU_TYPE_USB, USB_GPHY_CTRL, ocp_data); ++ ++ generic_ocp_write(tp, __le16_to_cpu(phy->fw_reg), 0xff, size, data, MCU_TYPE_USB); ++ ++ data += size; ++ len -= size; ++ ++ ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_POL_GPIO_CTRL); ++ ocp_data |= POL_GPHY_PATCH; ++ ocp_write_word(tp, MCU_TYPE_PLA, PLA_POL_GPIO_CTRL, ocp_data); ++ ++ for (i = 0; i < 1000; i++) { ++ if (!(ocp_read_word(tp, MCU_TYPE_PLA, PLA_POL_GPIO_CTRL) & POL_GPHY_PATCH)) ++ break; ++ } ++ ++ if (i == 1000) { ++ dev_err(&tp->intf->dev, "ram code speedup mode timeout\n"); ++ return; ++ } ++ } ++ ++ ocp_write_word(tp, MCU_TYPE_PLA, PLA_OCP_GPHY_BASE, tp->ocp_base); ++ rtl_phy_patch_request(tp, false, wait); ++ ++ if (sram_read(tp, SRAM_GPHY_FW_VER) == __le16_to_cpu(phy->version)) ++ dev_dbg(&tp->intf->dev, "successfully applied %s\n", phy->info); ++ else ++ dev_err(&tp->intf->dev, "ram code speedup mode fail\n"); ++} ++ ++static int rtl8152_fw_phy_ver(struct r8152 *tp, struct fw_phy_ver *phy_ver) ++{ ++ u16 ver_addr, ver; ++ ++ ver_addr = __le16_to_cpu(phy_ver->ver.addr); ++ ver = __le16_to_cpu(phy_ver->ver.data); ++ ++ if (sram_read(tp, ver_addr) >= ver) { ++ dev_dbg(&tp->intf->dev, "PHY firmware has been the newest\n"); ++ return 0; ++ } ++ ++ sram_write(tp, ver_addr, ver); ++ ++ dev_dbg(&tp->intf->dev, "PHY firmware version %x\n", ver); ++ ++ return ver; ++} ++ ++static void rtl8152_fw_phy_fixup(struct r8152 *tp, struct fw_phy_fixup *fix) ++{ ++ u16 addr, data; ++ ++ addr = __le16_to_cpu(fix->setting.addr); ++ data = ocp_reg_read(tp, addr); ++ ++ switch (__le16_to_cpu(fix->bit_cmd)) { ++ case FW_FIXUP_AND: ++ data &= __le16_to_cpu(fix->setting.data); ++ break; ++ case FW_FIXUP_OR: ++ data |= __le16_to_cpu(fix->setting.data); ++ break; ++ case FW_FIXUP_NOT: ++ data &= ~__le16_to_cpu(fix->setting.data); ++ break; ++ case FW_FIXUP_XOR: ++ data ^= __le16_to_cpu(fix->setting.data); ++ break; ++ default: ++ return; ++ } ++ ++ ocp_reg_write(tp, addr, data); ++ ++ dev_dbg(&tp->intf->dev, "applied ocp %x %x\n", addr, data); ++} ++ ++static void rtl8152_fw_phy_union_apply(struct r8152 *tp, struct fw_phy_union *phy) ++{ ++ __le16 *data; ++ u32 length; ++ int i, num; ++ ++ num = phy->pre_num; ++ for (i = 0; i < num; i++) ++ sram_write(tp, __le16_to_cpu(phy->pre_set[i].addr), ++ __le16_to_cpu(phy->pre_set[i].data)); ++ ++ length = __le32_to_cpu(phy->blk_hdr.length); ++ length -= __le16_to_cpu(phy->fw_offset); ++ num = length / 2; ++ data = (__le16 *)((u8 *)phy + __le16_to_cpu(phy->fw_offset)); ++ ++ ocp_reg_write(tp, OCP_SRAM_ADDR, __le16_to_cpu(phy->fw_reg)); ++ for (i = 0; i < num; i++) ++ ocp_reg_write(tp, OCP_SRAM_DATA, __le16_to_cpu(data[i])); ++ ++ num = phy->bp_num; ++ for (i = 0; i < num; i++) ++ sram_write(tp, __le16_to_cpu(phy->bp[i].addr), __le16_to_cpu(phy->bp[i].data)); ++ ++ if (phy->bp_num && phy->bp_en.addr) ++ sram_write(tp, __le16_to_cpu(phy->bp_en.addr), __le16_to_cpu(phy->bp_en.data)); ++ ++ dev_dbg(&tp->intf->dev, "successfully applied %s\n", phy->info); ++} ++ + static void rtl8152_fw_phy_nc_apply(struct r8152 *tp, struct fw_phy_nc *phy) + { + u16 mode_reg, bp_index; +@@ -4444,6 +4948,12 @@ static void rtl8152_fw_mac_apply(struct + return; + } + ++ fw_ver_reg = __le16_to_cpu(mac->fw_ver_reg); ++ if (fw_ver_reg && ocp_read_byte(tp, MCU_TYPE_USB, fw_ver_reg) >= mac->fw_ver_data) { ++ dev_dbg(&tp->intf->dev, "%s firmware has been the newest\n", type ? "PLA" : "USB"); ++ return; ++ } ++ + rtl_clear_bp(tp, type); + + /* Enable backup/restore of MACDBG. This is required after clearing PLA +@@ -4479,7 +4989,6 @@ static void rtl8152_fw_mac_apply(struct + ocp_write_word(tp, type, bp_en_addr, + __le16_to_cpu(mac->bp_en_value)); + +- fw_ver_reg = __le16_to_cpu(mac->fw_ver_reg); + if (fw_ver_reg) + ocp_write_byte(tp, MCU_TYPE_USB, fw_ver_reg, + mac->fw_ver_data); +@@ -4494,7 +5003,7 @@ static void rtl8152_apply_firmware(struc + struct fw_header *fw_hdr; + struct fw_phy_patch_key *key; + u16 key_addr = 0; +- int i; ++ int i, patch_phy = 1; + + if (IS_ERR_OR_NULL(rtl_fw->fw)) + return; +@@ -4516,17 +5025,40 @@ static void rtl8152_apply_firmware(struc + rtl8152_fw_mac_apply(tp, (struct fw_mac *)block); + break; + case RTL_FW_PHY_START: ++ if (!patch_phy) ++ break; + key = (struct fw_phy_patch_key *)block; + key_addr = __le16_to_cpu(key->key_reg); + rtl_pre_ram_code(tp, key_addr, __le16_to_cpu(key->key_data), !power_cut); + break; + case RTL_FW_PHY_STOP: ++ if (!patch_phy) ++ break; + WARN_ON(!key_addr); + rtl_post_ram_code(tp, key_addr, !power_cut); + break; + case RTL_FW_PHY_NC: + rtl8152_fw_phy_nc_apply(tp, (struct fw_phy_nc *)block); + break; ++ case RTL_FW_PHY_VER: ++ patch_phy = rtl8152_fw_phy_ver(tp, (struct fw_phy_ver *)block); ++ break; ++ case RTL_FW_PHY_UNION_NC: ++ case RTL_FW_PHY_UNION_NC1: ++ case RTL_FW_PHY_UNION_NC2: ++ case RTL_FW_PHY_UNION_UC2: ++ case RTL_FW_PHY_UNION_UC: ++ case RTL_FW_PHY_UNION_MISC: ++ if (patch_phy) ++ rtl8152_fw_phy_union_apply(tp, (struct fw_phy_union *)block); ++ break; ++ case RTL_FW_PHY_FIXUP: ++ if (patch_phy) ++ rtl8152_fw_phy_fixup(tp, (struct fw_phy_fixup *)block); ++ break; ++ case RTL_FW_PHY_SPEED_UP: ++ rtl_ram_code_speed_up(tp, (struct fw_phy_speed_up *)block, !power_cut); ++ break; + default: + break; + } +@@ -5034,6 +5566,21 @@ static int r8153c_post_firmware_1(struct + return 0; + } + ++static int r8156a_post_firmware_1(struct r8152 *tp) ++{ ++ u32 ocp_data; ++ ++ ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_FW_FIX_EN1); ++ ocp_data |= FW_IP_RESET_EN; ++ ocp_write_word(tp, MCU_TYPE_USB, USB_FW_FIX_EN1, ocp_data); ++ ++ /* Modify U3PHY parameter for compatibility issue */ ++ ocp_write_dword(tp, MCU_TYPE_USB, USB_UPHY3_MDCMDIO, 0x4026840e); ++ ocp_write_dword(tp, MCU_TYPE_USB, USB_UPHY3_MDCMDIO, 0x4001acc9); ++ ++ return 0; ++} ++ + static void r8153_aldps_en(struct r8152 *tp, bool enable) + { + u16 data; +@@ -8674,12 +9221,16 @@ static int rtl_ops_init(struct r8152 *tp + #define FIRMWARE_8153A_4 "rtl_nic/rtl8153a-4.fw" + #define FIRMWARE_8153B_2 "rtl_nic/rtl8153b-2.fw" + #define FIRMWARE_8153C_1 "rtl_nic/rtl8153c-1.fw" ++#define FIRMWARE_8156A_2 "rtl_nic/rtl8156a-2.fw" ++#define FIRMWARE_8156B_2 "rtl_nic/rtl8156b-2.fw" + + MODULE_FIRMWARE(FIRMWARE_8153A_2); + MODULE_FIRMWARE(FIRMWARE_8153A_3); + MODULE_FIRMWARE(FIRMWARE_8153A_4); + MODULE_FIRMWARE(FIRMWARE_8153B_2); + MODULE_FIRMWARE(FIRMWARE_8153C_1); ++MODULE_FIRMWARE(FIRMWARE_8156A_2); ++MODULE_FIRMWARE(FIRMWARE_8156B_2); + + static int rtl_fw_init(struct r8152 *tp) + { +@@ -8705,6 +9256,14 @@ static int rtl_fw_init(struct r8152 *tp) + rtl_fw->pre_fw = r8153b_pre_firmware_1; + rtl_fw->post_fw = r8153b_post_firmware_1; + break; ++ case RTL_VER_11: ++ rtl_fw->fw_name = FIRMWARE_8156A_2; ++ rtl_fw->post_fw = r8156a_post_firmware_1; ++ break; ++ case RTL_VER_13: ++ case RTL_VER_15: ++ rtl_fw->fw_name = FIRMWARE_8156B_2; ++ break; + case RTL_VER_14: + rtl_fw->fw_name = FIRMWARE_8153C_1; + rtl_fw->pre_fw = r8153b_pre_firmware_1; diff --git a/target/linux/generic/backport-5.15/795-v5.13-r8152-search-the-configuration-of-vendor-mode.patch b/target/linux/generic/backport-5.15/795-v5.13-r8152-search-the-configuration-of-vendor-mode.patch new file mode 100644 index 0000000000..9ce0e778cf --- /dev/null +++ b/target/linux/generic/backport-5.15/795-v5.13-r8152-search-the-configuration-of-vendor-mode.patch @@ -0,0 +1,79 @@ +From 579f58dd2819910354753bc5489fc1588fe9cfe2 Mon Sep 17 00:00:00 2001 +From: Hayes Wang <hayeswang@realtek.com> +Date: Fri, 16 Apr 2021 16:04:37 +0800 +Subject: [PATCH] r8152: search the configuration of vendor mode + +commit c2198943e33b100ed21dfb636c8fa6baef841e9d upstream. + +The vendor mode is not always at config #1, so it is necessary to +set the correct configuration number. + +Signed-off-by: Hayes Wang <hayeswang@realtek.com> +Signed-off-by: David S. Miller <davem@davemloft.net> +--- + drivers/net/usb/r8152.c | 39 +++++++++++++++++++++++++++++++++++---- + 1 file changed, 35 insertions(+), 4 deletions(-) + +--- a/drivers/net/usb/r8152.c ++++ b/drivers/net/usb/r8152.c +@@ -30,7 +30,7 @@ + #include <linux/usb/r8152.h> + + /* Information for net-next */ +-#define NETNEXT_VERSION "11" ++#define NETNEXT_VERSION "12" + + /* Information for net */ + #define NET_VERSION "11" +@@ -8131,6 +8131,39 @@ static void r8156b_init(struct r8152 *tp + tp->coalesce = 15000; /* 15 us */ + } + ++static bool rtl_vendor_mode(struct usb_interface *intf) ++{ ++ struct usb_host_interface *alt = intf->cur_altsetting; ++ struct usb_device *udev; ++ struct usb_host_config *c; ++ int i, num_configs; ++ ++ if (alt->desc.bInterfaceClass == USB_CLASS_VENDOR_SPEC) ++ return true; ++ ++ /* The vendor mode is not always config #1, so to find it out. */ ++ udev = interface_to_usbdev(intf); ++ c = udev->config; ++ num_configs = udev->descriptor.bNumConfigurations; ++ for (i = 0; i < num_configs; (i++, c++)) { ++ struct usb_interface_descriptor *desc = NULL; ++ ++ if (c->desc.bNumInterfaces > 0) ++ desc = &c->intf_cache[0]->altsetting->desc; ++ else ++ continue; ++ ++ if (desc->bInterfaceClass == USB_CLASS_VENDOR_SPEC) { ++ usb_driver_set_configuration(udev, c->desc.bConfigurationValue); ++ break; ++ } ++ } ++ ++ WARN_ON_ONCE(i == num_configs); ++ ++ return false; ++} ++ + static int rtl8152_pre_reset(struct usb_interface *intf) + { + struct r8152 *tp = usb_get_intfdata(intf); +@@ -9369,10 +9402,8 @@ static int rtl8152_probe(struct usb_inte + if (version == RTL_VER_UNKNOWN) + return -ENODEV; + +- if (udev->actconfig->desc.bConfigurationValue != 1) { +- usb_driver_set_configuration(udev, 1); ++ if (!rtl_vendor_mode(intf)) + return -ENODEV; +- } + + if (intf->cur_altsetting->desc.bNumEndpoints < 3) + return -ENODEV; diff --git a/target/linux/generic/backport-5.15/800-v5.13-0001-firmware-bcm47xx_nvram-rename-finding-function-and-i.patch b/target/linux/generic/backport-5.15/800-v5.13-0001-firmware-bcm47xx_nvram-rename-finding-function-and-i.patch new file mode 100644 index 0000000000..19938704b7 --- /dev/null +++ b/target/linux/generic/backport-5.15/800-v5.13-0001-firmware-bcm47xx_nvram-rename-finding-function-and-i.patch @@ -0,0 +1,80 @@ +From fb009cbdd0693bd633f11e99526617b3d392cfad Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?Rafa=C5=82=20Mi=C5=82ecki?= <rafal@milecki.pl> +Date: Mon, 8 Mar 2021 10:03:16 +0100 +Subject: [PATCH] firmware: bcm47xx_nvram: rename finding function and its + variables +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +1. Use "bcm47xx_" function name prefix for consistency +2. It takes flash start as argument so s/iobase/flash_start/ +3. "off" was used for finding flash end so just call it "flash_size" + +Signed-off-by: Rafał Miłecki <rafal@milecki.pl> +Signed-off-by: Thomas Bogendoerfer <tsbogend@alpha.franken.de> +--- + drivers/firmware/broadcom/bcm47xx_nvram.c | 24 ++++++++++++----------- + 1 file changed, 13 insertions(+), 11 deletions(-) + +--- a/drivers/firmware/broadcom/bcm47xx_nvram.c ++++ b/drivers/firmware/broadcom/bcm47xx_nvram.c +@@ -48,11 +48,13 @@ static u32 find_nvram_size(void __iomem + return 0; + } + +-/* Probe for NVRAM header */ +-static int nvram_find_and_copy(void __iomem *iobase, u32 lim) ++/** ++ * bcm47xx_nvram_find_and_copy - find NVRAM on flash mapping & copy it ++ */ ++static int bcm47xx_nvram_find_and_copy(void __iomem *flash_start, size_t res_size) + { + struct nvram_header __iomem *header; +- u32 off; ++ size_t flash_size; + u32 size; + + if (nvram_len) { +@@ -61,25 +63,25 @@ static int nvram_find_and_copy(void __io + } + + /* TODO: when nvram is on nand flash check for bad blocks first. */ +- off = FLASH_MIN; +- while (off <= lim) { ++ flash_size = FLASH_MIN; ++ while (flash_size <= res_size) { + /* Windowed flash access */ +- size = find_nvram_size(iobase + off); ++ size = find_nvram_size(flash_start + flash_size); + if (size) { +- header = (struct nvram_header *)(iobase + off - size); ++ header = (struct nvram_header *)(flash_start + flash_size - size); + goto found; + } +- off <<= 1; ++ flash_size <<= 1; + } + + /* Try embedded NVRAM at 4 KB and 1 KB as last resorts */ +- header = (struct nvram_header *)(iobase + 4096); ++ header = (struct nvram_header *)(flash_start + 4096); + if (header->magic == NVRAM_MAGIC) { + size = NVRAM_SPACE; + goto found; + } + +- header = (struct nvram_header *)(iobase + 1024); ++ header = (struct nvram_header *)(flash_start + 1024); + if (header->magic == NVRAM_MAGIC) { + size = NVRAM_SPACE; + goto found; +@@ -124,7 +126,7 @@ int bcm47xx_nvram_init_from_mem(u32 base + if (!iobase) + return -ENOMEM; + +- err = nvram_find_and_copy(iobase, lim); ++ err = bcm47xx_nvram_find_and_copy(iobase, lim); + + iounmap(iobase); + diff --git a/target/linux/generic/backport-5.15/800-v5.13-0002-firmware-bcm47xx_nvram-add-helper-checking-for-NVRAM.patch b/target/linux/generic/backport-5.15/800-v5.13-0002-firmware-bcm47xx_nvram-add-helper-checking-for-NVRAM.patch new file mode 100644 index 0000000000..6ab072883d --- /dev/null +++ b/target/linux/generic/backport-5.15/800-v5.13-0002-firmware-bcm47xx_nvram-add-helper-checking-for-NVRAM.patch @@ -0,0 +1,90 @@ +From 0a24b51a3264a3f942a75025ea5ff6133c8989b0 Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?Rafa=C5=82=20Mi=C5=82ecki?= <rafal@milecki.pl> +Date: Mon, 8 Mar 2021 10:03:17 +0100 +Subject: [PATCH] firmware: bcm47xx_nvram: add helper checking for NVRAM +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +This avoids duplicating code doing casting and checking for NVRAM magic. + +Signed-off-by: Rafał Miłecki <rafal@milecki.pl> +Signed-off-by: Thomas Bogendoerfer <tsbogend@alpha.franken.de> +--- + drivers/firmware/broadcom/bcm47xx_nvram.c | 30 ++++++++++++++--------- + 1 file changed, 18 insertions(+), 12 deletions(-) + +--- a/drivers/firmware/broadcom/bcm47xx_nvram.c ++++ b/drivers/firmware/broadcom/bcm47xx_nvram.c +@@ -34,14 +34,20 @@ static char nvram_buf[NVRAM_SPACE]; + static size_t nvram_len; + static const u32 nvram_sizes[] = {0x6000, 0x8000, 0xF000, 0x10000}; + ++/** ++ * bcm47xx_nvram_is_valid - check for a valid NVRAM at specified memory ++ */ ++static bool bcm47xx_nvram_is_valid(void __iomem *nvram) ++{ ++ return ((struct nvram_header *)nvram)->magic == NVRAM_MAGIC; ++} ++ + static u32 find_nvram_size(void __iomem *end) + { +- struct nvram_header __iomem *header; + int i; + + for (i = 0; i < ARRAY_SIZE(nvram_sizes); i++) { +- header = (struct nvram_header *)(end - nvram_sizes[i]); +- if (header->magic == NVRAM_MAGIC) ++ if (bcm47xx_nvram_is_valid(end - nvram_sizes[i])) + return nvram_sizes[i]; + } + +@@ -55,6 +61,7 @@ static int bcm47xx_nvram_find_and_copy(v + { + struct nvram_header __iomem *header; + size_t flash_size; ++ size_t offset; + u32 size; + + if (nvram_len) { +@@ -68,31 +75,30 @@ static int bcm47xx_nvram_find_and_copy(v + /* Windowed flash access */ + size = find_nvram_size(flash_start + flash_size); + if (size) { +- header = (struct nvram_header *)(flash_start + flash_size - size); ++ offset = flash_size - size; + goto found; + } + flash_size <<= 1; + } + + /* Try embedded NVRAM at 4 KB and 1 KB as last resorts */ +- header = (struct nvram_header *)(flash_start + 4096); +- if (header->magic == NVRAM_MAGIC) { +- size = NVRAM_SPACE; ++ ++ offset = 4096; ++ if (bcm47xx_nvram_is_valid(flash_start + offset)) + goto found; +- } + +- header = (struct nvram_header *)(flash_start + 1024); +- if (header->magic == NVRAM_MAGIC) { +- size = NVRAM_SPACE; ++ offset = 1024; ++ if (bcm47xx_nvram_is_valid(flash_start + offset)) + goto found; +- } + + pr_err("no nvram found\n"); + return -ENXIO; + + found: ++ header = (struct nvram_header *)(flash_start + offset); + __ioread32_copy(nvram_buf, header, sizeof(*header) / 4); + nvram_len = ((struct nvram_header *)(nvram_buf))->len; ++ size = res_size - offset; + if (nvram_len > size) { + pr_err("The nvram size according to the header seems to be bigger than the partition on flash\n"); + nvram_len = size; diff --git a/target/linux/generic/backport-5.15/800-v5.13-0003-firmware-bcm47xx_nvram-extract-code-copying-NVRAM.patch b/target/linux/generic/backport-5.15/800-v5.13-0003-firmware-bcm47xx_nvram-extract-code-copying-NVRAM.patch new file mode 100644 index 0000000000..a1351f1197 --- /dev/null +++ b/target/linux/generic/backport-5.15/800-v5.13-0003-firmware-bcm47xx_nvram-extract-code-copying-NVRAM.patch @@ -0,0 +1,80 @@ +From 298923cf999cecd2ef06df126f85a3d68da8c4d8 Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?Rafa=C5=82=20Mi=C5=82ecki?= <rafal@milecki.pl> +Date: Mon, 8 Mar 2021 10:03:18 +0100 +Subject: [PATCH] firmware: bcm47xx_nvram: extract code copying NVRAM +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +This simplifies function finding NVRAM. It doesn't directly deal with +NVRAM structure anymore and is a bit smaller. + +Signed-off-by: Rafał Miłecki <rafal@milecki.pl> +Signed-off-by: Thomas Bogendoerfer <tsbogend@alpha.franken.de> +--- + drivers/firmware/broadcom/bcm47xx_nvram.c | 43 +++++++++++++---------- + 1 file changed, 25 insertions(+), 18 deletions(-) + +--- a/drivers/firmware/broadcom/bcm47xx_nvram.c ++++ b/drivers/firmware/broadcom/bcm47xx_nvram.c +@@ -55,11 +55,34 @@ static u32 find_nvram_size(void __iomem + } + + /** ++ * bcm47xx_nvram_copy - copy NVRAM to internal buffer ++ */ ++static void bcm47xx_nvram_copy(void __iomem *nvram_start, size_t res_size) ++{ ++ struct nvram_header __iomem *header = nvram_start; ++ size_t copy_size; ++ ++ copy_size = header->len; ++ if (copy_size > res_size) { ++ pr_err("The nvram size according to the header seems to be bigger than the partition on flash\n"); ++ copy_size = res_size; ++ } ++ if (copy_size >= NVRAM_SPACE) { ++ pr_err("nvram on flash (%zu bytes) is bigger than the reserved space in memory, will just copy the first %i bytes\n", ++ copy_size, NVRAM_SPACE - 1); ++ copy_size = NVRAM_SPACE - 1; ++ } ++ ++ __ioread32_copy(nvram_buf, nvram_start, DIV_ROUND_UP(copy_size, 4)); ++ nvram_buf[NVRAM_SPACE - 1] = '\0'; ++ nvram_len = copy_size; ++} ++ ++/** + * bcm47xx_nvram_find_and_copy - find NVRAM on flash mapping & copy it + */ + static int bcm47xx_nvram_find_and_copy(void __iomem *flash_start, size_t res_size) + { +- struct nvram_header __iomem *header; + size_t flash_size; + size_t offset; + u32 size; +@@ -95,23 +118,7 @@ static int bcm47xx_nvram_find_and_copy(v + return -ENXIO; + + found: +- header = (struct nvram_header *)(flash_start + offset); +- __ioread32_copy(nvram_buf, header, sizeof(*header) / 4); +- nvram_len = ((struct nvram_header *)(nvram_buf))->len; +- size = res_size - offset; +- if (nvram_len > size) { +- pr_err("The nvram size according to the header seems to be bigger than the partition on flash\n"); +- nvram_len = size; +- } +- if (nvram_len >= NVRAM_SPACE) { +- pr_err("nvram on flash (%zu bytes) is bigger than the reserved space in memory, will just copy the first %i bytes\n", +- nvram_len, NVRAM_SPACE - 1); +- nvram_len = NVRAM_SPACE - 1; +- } +- /* proceed reading data after header */ +- __ioread32_copy(nvram_buf + sizeof(*header), header + 1, +- DIV_ROUND_UP(nvram_len, 4)); +- nvram_buf[NVRAM_SPACE - 1] = '\0'; ++ bcm47xx_nvram_copy(flash_start + offset, res_size - offset); + + return 0; + } diff --git a/target/linux/generic/backport-5.15/800-v5.13-0004-firmware-bcm47xx_nvram-look-for-NVRAM-with-for-inste.patch b/target/linux/generic/backport-5.15/800-v5.13-0004-firmware-bcm47xx_nvram-look-for-NVRAM-with-for-inste.patch new file mode 100644 index 0000000000..059a13220b --- /dev/null +++ b/target/linux/generic/backport-5.15/800-v5.13-0004-firmware-bcm47xx_nvram-look-for-NVRAM-with-for-inste.patch @@ -0,0 +1,37 @@ +From 98b68324f67236e8c9152976535dc1f27fb67ba8 Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?Rafa=C5=82=20Mi=C5=82ecki?= <rafal@milecki.pl> +Date: Mon, 8 Mar 2021 10:03:19 +0100 +Subject: [PATCH] firmware: bcm47xx_nvram: look for NVRAM with for instead of + while +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +This loop requires variable initialization, stop condition and post +iteration increment. It's pretty much a for loop definition. + +Signed-off-by: Rafał Miłecki <rafal@milecki.pl> +Signed-off-by: Thomas Bogendoerfer <tsbogend@alpha.franken.de> +--- + drivers/firmware/broadcom/bcm47xx_nvram.c | 4 +--- + 1 file changed, 1 insertion(+), 3 deletions(-) + +--- a/drivers/firmware/broadcom/bcm47xx_nvram.c ++++ b/drivers/firmware/broadcom/bcm47xx_nvram.c +@@ -93,15 +93,13 @@ static int bcm47xx_nvram_find_and_copy(v + } + + /* TODO: when nvram is on nand flash check for bad blocks first. */ +- flash_size = FLASH_MIN; +- while (flash_size <= res_size) { ++ for (flash_size = FLASH_MIN; flash_size <= res_size; flash_size <<= 1) { + /* Windowed flash access */ + size = find_nvram_size(flash_start + flash_size); + if (size) { + offset = flash_size - size; + goto found; + } +- flash_size <<= 1; + } + + /* Try embedded NVRAM at 4 KB and 1 KB as last resorts */ diff --git a/target/linux/generic/backport-5.15/800-v5.13-0005-firmware-bcm47xx_nvram-inline-code-checking-NVRAM-si.patch b/target/linux/generic/backport-5.15/800-v5.13-0005-firmware-bcm47xx_nvram-inline-code-checking-NVRAM-si.patch new file mode 100644 index 0000000000..21d250049e --- /dev/null +++ b/target/linux/generic/backport-5.15/800-v5.13-0005-firmware-bcm47xx_nvram-inline-code-checking-NVRAM-si.patch @@ -0,0 +1,70 @@ +From f52da4ccfec9192e17f5c16260dfdd6d3ea76f65 Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?Rafa=C5=82=20Mi=C5=82ecki?= <rafal@milecki.pl> +Date: Mon, 8 Mar 2021 10:03:20 +0100 +Subject: [PATCH] firmware: bcm47xx_nvram: inline code checking NVRAM size +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +Separated function was not improving code quality much (or at all). +Moreover it expected possible flash end address as argument and it was +returning NVRAM size. + +The new code always operates on offsets which means less logic and less +calculations. + +Signed-off-by: Rafał Miłecki <rafal@milecki.pl> +Signed-off-by: Thomas Bogendoerfer <tsbogend@alpha.franken.de> +--- + drivers/firmware/broadcom/bcm47xx_nvram.c | 25 +++++++---------------- + 1 file changed, 7 insertions(+), 18 deletions(-) + +--- a/drivers/firmware/broadcom/bcm47xx_nvram.c ++++ b/drivers/firmware/broadcom/bcm47xx_nvram.c +@@ -42,18 +42,6 @@ static bool bcm47xx_nvram_is_valid(void + return ((struct nvram_header *)nvram)->magic == NVRAM_MAGIC; + } + +-static u32 find_nvram_size(void __iomem *end) +-{ +- int i; +- +- for (i = 0; i < ARRAY_SIZE(nvram_sizes); i++) { +- if (bcm47xx_nvram_is_valid(end - nvram_sizes[i])) +- return nvram_sizes[i]; +- } +- +- return 0; +-} +- + /** + * bcm47xx_nvram_copy - copy NVRAM to internal buffer + */ +@@ -85,7 +73,7 @@ static int bcm47xx_nvram_find_and_copy(v + { + size_t flash_size; + size_t offset; +- u32 size; ++ int i; + + if (nvram_len) { + pr_warn("nvram already initialized\n"); +@@ -93,12 +81,13 @@ static int bcm47xx_nvram_find_and_copy(v + } + + /* TODO: when nvram is on nand flash check for bad blocks first. */ ++ ++ /* Try every possible flash size and check for NVRAM at its end */ + for (flash_size = FLASH_MIN; flash_size <= res_size; flash_size <<= 1) { +- /* Windowed flash access */ +- size = find_nvram_size(flash_start + flash_size); +- if (size) { +- offset = flash_size - size; +- goto found; ++ for (i = 0; i < ARRAY_SIZE(nvram_sizes); i++) { ++ offset = flash_size - nvram_sizes[i]; ++ if (bcm47xx_nvram_is_valid(flash_start + offset)) ++ goto found; + } + } + diff --git a/target/linux/generic/backport-5.15/810-v5.13-usb-ehci-add-spurious-flag-to-disable-overcurrent-ch.patch b/target/linux/generic/backport-5.15/810-v5.13-usb-ehci-add-spurious-flag-to-disable-overcurrent-ch.patch new file mode 100644 index 0000000000..6b75b08717 --- /dev/null +++ b/target/linux/generic/backport-5.15/810-v5.13-usb-ehci-add-spurious-flag-to-disable-overcurrent-ch.patch @@ -0,0 +1,88 @@ +From 2d5ba37461013253d2ff0a3641b727fd32ea97a9 Mon Sep 17 00:00:00 2001 +From: Florian Fainelli <florian@openwrt.org> +Date: Tue, 23 Feb 2021 18:44:53 +0100 +Subject: [PATCH 1/3] usb: ehci: add spurious flag to disable overcurrent + checking +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +This patch adds an ignore_oc flag which can be set by EHCI controller +not supporting or wanting to disable overcurrent checking. The EHCI +platform data in include/linux/usb/ehci_pdriver.h is also augmented to +take advantage of this new flag. + +Signed-off-by: Florian Fainelli <florian@openwrt.org> +Signed-off-by: Álvaro Fernández Rojas <noltari@gmail.com> +Link: https://lore.kernel.org/r/20210223174455.1378-2-noltari@gmail.com +Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> +--- + drivers/usb/host/ehci-hcd.c | 2 +- + drivers/usb/host/ehci-hub.c | 4 ++-- + drivers/usb/host/ehci-platform.c | 2 ++ + drivers/usb/host/ehci.h | 1 + + include/linux/usb/ehci_pdriver.h | 1 + + 5 files changed, 7 insertions(+), 3 deletions(-) + +--- a/drivers/usb/host/ehci-hcd.c ++++ b/drivers/usb/host/ehci-hcd.c +@@ -660,7 +660,7 @@ static int ehci_run (struct usb_hcd *hcd + "USB %x.%x started, EHCI %x.%02x%s\n", + ((ehci->sbrn & 0xf0)>>4), (ehci->sbrn & 0x0f), + temp >> 8, temp & 0xff, +- ignore_oc ? ", overcurrent ignored" : ""); ++ (ignore_oc || ehci->spurious_oc) ? ", overcurrent ignored" : ""); + + ehci_writel(ehci, INTR_MASK, + &ehci->regs->intr_enable); /* Turn On Interrupts */ +--- a/drivers/usb/host/ehci-hub.c ++++ b/drivers/usb/host/ehci-hub.c +@@ -643,7 +643,7 @@ ehci_hub_status_data (struct usb_hcd *hc + * always set, seem to clear PORT_OCC and PORT_CSC when writing to + * PORT_POWER; that's surprising, but maybe within-spec. + */ +- if (!ignore_oc) ++ if (!ignore_oc && !ehci->spurious_oc) + mask = PORT_CSC | PORT_PEC | PORT_OCC; + else + mask = PORT_CSC | PORT_PEC; +@@ -1013,7 +1013,7 @@ int ehci_hub_control( + if (temp & PORT_PEC) + status |= USB_PORT_STAT_C_ENABLE << 16; + +- if ((temp & PORT_OCC) && !ignore_oc){ ++ if ((temp & PORT_OCC) && (!ignore_oc && !ehci->spurious_oc)){ + status |= USB_PORT_STAT_C_OVERCURRENT << 16; + + /* +--- a/drivers/usb/host/ehci-platform.c ++++ b/drivers/usb/host/ehci-platform.c +@@ -333,6 +333,8 @@ static int ehci_platform_probe(struct pl + hcd->has_tt = 1; + if (pdata->reset_on_resume) + priv->reset_on_resume = true; ++ if (pdata->spurious_oc) ++ ehci->spurious_oc = 1; + + #ifndef CONFIG_USB_EHCI_BIG_ENDIAN_MMIO + if (ehci->big_endian_mmio) { +--- a/drivers/usb/host/ehci.h ++++ b/drivers/usb/host/ehci.h +@@ -219,6 +219,7 @@ struct ehci_hcd { /* one per controlle + unsigned need_oc_pp_cycle:1; /* MPC834X port power */ + unsigned imx28_write_fix:1; /* For Freescale i.MX28 */ + unsigned is_aspeed:1; ++ unsigned spurious_oc:1; + + /* required for usb32 quirk */ + #define OHCI_CTRL_HCFS (3 << 6) +--- a/include/linux/usb/ehci_pdriver.h ++++ b/include/linux/usb/ehci_pdriver.h +@@ -50,6 +50,7 @@ struct usb_ehci_pdata { + unsigned no_io_watchdog:1; + unsigned reset_on_resume:1; + unsigned dma_mask_64:1; ++ unsigned spurious_oc:1; + + /* Turn on all power and clocks */ + int (*power_on)(struct platform_device *pdev); diff --git a/target/linux/generic/backport-5.15/811-v5.13-usb-host-ehci-platform-add-spurious_oc-DT-support.patch b/target/linux/generic/backport-5.15/811-v5.13-usb-host-ehci-platform-add-spurious_oc-DT-support.patch new file mode 100644 index 0000000000..0094d47718 --- /dev/null +++ b/target/linux/generic/backport-5.15/811-v5.13-usb-host-ehci-platform-add-spurious_oc-DT-support.patch @@ -0,0 +1,31 @@ +From 4da57dbbffdfa7fe4e2b70b047fc5ff95ff25a3d Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?=C3=81lvaro=20Fern=C3=A1ndez=20Rojas?= <noltari@gmail.com> +Date: Tue, 23 Feb 2021 18:44:55 +0100 +Subject: [PATCH 3/3] usb: host: ehci-platform: add spurious_oc DT support +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +Over-current reporting isn't supported on some platforms such as bcm63xx. +These devices will incorrectly report over-current if this flag isn't properly +activated. + +Signed-off-by: Álvaro Fernández Rojas <noltari@gmail.com> +Link: https://lore.kernel.org/r/20210223174455.1378-4-noltari@gmail.com +Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> +--- + drivers/usb/host/ehci-platform.c | 3 +++ + 1 file changed, 3 insertions(+) + +--- a/drivers/usb/host/ehci-platform.c ++++ b/drivers/usb/host/ehci-platform.c +@@ -286,6 +286,9 @@ static int ehci_platform_probe(struct pl + if (of_property_read_bool(dev->dev.of_node, "big-endian")) + ehci->big_endian_mmio = ehci->big_endian_desc = 1; + ++ if (of_property_read_bool(dev->dev.of_node, "spurious-oc")) ++ ehci->spurious_oc = 1; ++ + if (of_property_read_bool(dev->dev.of_node, + "needs-reset-on-resume")) + priv->reset_on_resume = true; diff --git a/target/linux/generic/backport-5.15/820-v5.13-make-pci_host_common_probe-declare-its-reliance-on-msi-domains.patch b/target/linux/generic/backport-5.15/820-v5.13-make-pci_host_common_probe-declare-its-reliance-on-msi-domains.patch new file mode 100644 index 0000000000..8ca2b78f74 --- /dev/null +++ b/target/linux/generic/backport-5.15/820-v5.13-make-pci_host_common_probe-declare-its-reliance-on-msi-domains.patch @@ -0,0 +1,34 @@ +From 9ec37efb87832b578d7972fc80b04d94f5d2bbe3 Mon Sep 17 00:00:00 2001 +From: Marc Zyngier <maz@kernel.org> +Date: Tue, 30 Mar 2021 16:11:42 +0100 +Subject: PCI/MSI: Make pci_host_common_probe() declare its reliance on MSI + domains + +The generic PCI host driver relies on MSI domains for MSIs to +be provided to its end-points. Make this dependency explicit. + +This cures the warnings occuring on arm/arm64 VMs when booted +with PCI virtio devices and no MSI controller (no GICv3 ITS, +for example). + +It is likely that other drivers will need to express the same +dependency. + +Link: https://lore.kernel.org/r/20210330151145.997953-12-maz@kernel.org +Signed-off-by: Marc Zyngier <maz@kernel.org> +Signed-off-by: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com> +Acked-by: Bjorn Helgaas <bhelgaas@google.com> +--- + drivers/pci/controller/pci-host-common.c | 1 + + 1 file changed, 1 insertion(+) + +--- a/drivers/pci/controller/pci-host-common.c ++++ b/drivers/pci/controller/pci-host-common.c +@@ -77,6 +77,7 @@ int pci_host_common_probe(struct platfor + + bridge->sysdata = cfg; + bridge->ops = (struct pci_ops *)&ops->pci_ops; ++ bridge->msi_domain = true; + + platform_set_drvdata(pdev, bridge); + diff --git a/target/linux/generic/backport-5.15/821-v5.13-let-pci-host-bridges-declar-their-reliance-on-msi-domains.patch b/target/linux/generic/backport-5.15/821-v5.13-let-pci-host-bridges-declar-their-reliance-on-msi-domains.patch new file mode 100644 index 0000000000..854dc34c42 --- /dev/null +++ b/target/linux/generic/backport-5.15/821-v5.13-let-pci-host-bridges-declar-their-reliance-on-msi-domains.patch @@ -0,0 +1,44 @@ +From 94e89b145371b68fa0ea294855adebcd03e0522e Mon Sep 17 00:00:00 2001 +From: Marc Zyngier <maz@kernel.org> +Date: Tue, 30 Mar 2021 16:11:41 +0100 +Subject: PCI/MSI: Let PCI host bridges declare their reliance on MSI domains + +There is a whole class of host bridges that cannot know whether +MSIs will be provided or not, as they rely on other blocks +to provide the MSI functionnality, using MSI domains. This is +the case for example on systems that use the ARM GIC architecture. + +Introduce a new attribute ('msi_domain') indicating that implicit +dependency, and use this property to set the NO_MSI flag when +no MSI domain is found at probe time. + +Link: https://lore.kernel.org/r/20210330151145.997953-11-maz@kernel.org +Signed-off-by: Marc Zyngier <maz@kernel.org> +Signed-off-by: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com> +Acked-by: Bjorn Helgaas <bhelgaas@google.com> +--- + drivers/pci/probe.c | 2 ++ + include/linux/pci.h | 1 + + 2 files changed, 3 insertions(+) + +--- a/drivers/pci/probe.c ++++ b/drivers/pci/probe.c +@@ -925,6 +925,8 @@ static int pci_register_host_bridge(stru + device_enable_async_suspend(bus->bridge); + pci_set_bus_of_node(bus); + pci_set_bus_msi_domain(bus); ++ if (bridge->msi_domain && !dev_get_msi_domain(&bus->dev)) ++ bus->bus_flags |= PCI_BUS_FLAGS_NO_MSI; + + if (!parent) + set_dev_node(bus->bridge, pcibus_to_node(bus)); +--- a/include/linux/pci.h ++++ b/include/linux/pci.h +@@ -547,6 +547,7 @@ struct pci_host_bridge { + unsigned int native_dpc:1; /* OS may use PCIe DPC */ + unsigned int preserve_config:1; /* Preserve FW resource setup */ + unsigned int size_windows:1; /* Enable root bus sizing */ ++ unsigned int msi_domain:1; /* Bridge wants MSI domain */ + + /* Resource alignment requirements */ + resource_size_t (*align_resource)(struct pci_dev *dev, diff --git a/target/linux/generic/backport-5.15/822-v5.13-advertise-lack-of-built-in-msi-handling.patch b/target/linux/generic/backport-5.15/822-v5.13-advertise-lack-of-built-in-msi-handling.patch new file mode 100644 index 0000000000..c11aedd814 --- /dev/null +++ b/target/linux/generic/backport-5.15/822-v5.13-advertise-lack-of-built-in-msi-handling.patch @@ -0,0 +1,59 @@ +From 645e9c38383d7fcde2784ee537fa18ec9bed54d9 Mon Sep 17 00:00:00 2001 +From: Thomas Gleixner <tglx@linutronix.de> +Date: Tue, 30 Mar 2021 16:11:43 +0100 +Subject: PCI: mediatek: Advertise lack of built-in MSI handling + +Some Mediatek host bridges cannot handle MSIs, which is sad. +This also results in an ugly warning at device probe time, +as the core PCI code wasn't told that MSIs were not available. + +Advertise this fact to the rest of the core PCI code by +using the 'msi_domain' attribute, which still opens the possibility +for another block to provide the MSI functionnality. + +[maz: commit message, switched over to msi_domain attribute] + +Link: https://lore.kernel.org/r/20210330151145.997953-13-maz@kernel.org +Reported-by: Frank Wunderlich <frank-w@public-files.de> +Signed-off-by: Thomas Gleixner <tglx@linutronix.de> +Signed-off-by: Marc Zyngier <maz@kernel.org> +Signed-off-by: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com> +Acked-by: Bjorn Helgaas <bhelgaas@google.com> +--- + drivers/pci/controller/pcie-mediatek.c | 4 ++++ + 1 file changed, 4 insertions(+) + +--- a/drivers/pci/controller/pcie-mediatek.c ++++ b/drivers/pci/controller/pcie-mediatek.c +@@ -143,6 +143,7 @@ struct mtk_pcie_port; + * struct mtk_pcie_soc - differentiate between host generations + * @need_fix_class_id: whether this host's class ID needed to be fixed or not + * @need_fix_device_id: whether this host's device ID needed to be fixed or not ++ * @no_msi: Bridge has no MSI support, and relies on an external block + * @device_id: device ID which this host need to be fixed + * @ops: pointer to configuration access functions + * @startup: pointer to controller setting functions +@@ -151,6 +152,7 @@ struct mtk_pcie_port; + struct mtk_pcie_soc { + bool need_fix_class_id; + bool need_fix_device_id; ++ bool no_msi; + unsigned int device_id; + struct pci_ops *ops; + int (*startup)(struct mtk_pcie_port *port); +@@ -1087,6 +1089,7 @@ static int mtk_pcie_probe(struct platfor + + host->ops = pcie->soc->ops; + host->sysdata = pcie; ++ host->msi_domain = pcie->soc->no_msi; + + err = pci_host_probe(host); + if (err) +@@ -1176,6 +1179,7 @@ static const struct dev_pm_ops mtk_pcie_ + }; + + static const struct mtk_pcie_soc mtk_pcie_soc_v1 = { ++ .no_msi = true, + .ops = &mtk_pcie_ops, + .startup = mtk_pcie_startup_port, + }; diff --git a/target/linux/generic/backport-5.15/830-v5.14-leds-lp55xx-Initialize-enable-GPIO-direction-to-outp.patch b/target/linux/generic/backport-5.15/830-v5.14-leds-lp55xx-Initialize-enable-GPIO-direction-to-outp.patch new file mode 100644 index 0000000000..75b9947392 --- /dev/null +++ b/target/linux/generic/backport-5.15/830-v5.14-leds-lp55xx-Initialize-enable-GPIO-direction-to-outp.patch @@ -0,0 +1,28 @@ +From a5d3d1adc95f4ac5968b7a77ee95a3abbbb96f49 Mon Sep 17 00:00:00 2001 +From: Doug Zobel <dougdev334@gmail.com> +Date: Mon, 10 May 2021 15:40:00 -0500 +Subject: [PATCH] leds: lp55xx: Initialize enable GPIO direction to output + +The "Convert to use GPIO descriptors" commit changed the +initialization of the enable GPIO from GPIOF_DIR_OUT to +GPIOD_ASIS. This breaks systems where the GPIO does not +default to output. Changing the enable initialization +to GPIOD_OUT_LOW. + +Signed-off-by: Doug Zobel <dougdev334@gmail.com> +Signed-off-by: Pavel Machek <pavel@ucw.cz> +--- + drivers/leds/leds-lp55xx-common.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +--- a/drivers/leds/leds-lp55xx-common.c ++++ b/drivers/leds/leds-lp55xx-common.c +@@ -694,7 +694,7 @@ struct lp55xx_platform_data *lp55xx_of_p + of_property_read_u8(np, "clock-mode", &pdata->clock_mode); + + pdata->enable_gpiod = devm_gpiod_get_optional(dev, "enable", +- GPIOD_ASIS); ++ GPIOD_OUT_LOW); + if (IS_ERR(pdata->enable_gpiod)) + return ERR_CAST(pdata->enable_gpiod); + diff --git a/target/linux/generic/backport-5.15/850-v5.17-0001-PCI-pci-bridge-emul-Add-description-for-class_revisi.patch b/target/linux/generic/backport-5.15/850-v5.17-0001-PCI-pci-bridge-emul-Add-description-for-class_revisi.patch new file mode 100644 index 0000000000..19a4be2a9d --- /dev/null +++ b/target/linux/generic/backport-5.15/850-v5.17-0001-PCI-pci-bridge-emul-Add-description-for-class_revisi.patch @@ -0,0 +1,44 @@ +From 9319230ac147067652b58fe849ffe0ceec098665 Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?Pali=20Roh=C3=A1r?= <pali@kernel.org> +Date: Tue, 30 Nov 2021 18:29:03 +0100 +Subject: [PATCH] PCI: pci-bridge-emul: Add description for class_revision + field +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +The current assignment to the class_revision member + + class_revision |= cpu_to_le32(PCI_CLASS_BRIDGE_PCI << 16); + +can make the reader think that class is at high 16 bits of the member and +revision at low 16 bits. + +In reality, class is at high 24 bits, but the class for PCI Bridge Normal +Decode is PCI_CLASS_BRIDGE_PCI << 8. + +Change the assignment and add a comment to make this clearer. + +Link: https://lore.kernel.org/r/20211130172913.9727-2-kabel@kernel.org +Signed-off-by: Pali Rohár <pali@kernel.org> +Signed-off-by: Marek Behún <kabel@kernel.org> +Signed-off-by: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com> +--- + drivers/pci/pci-bridge-emul.c | 6 +++++- + 1 file changed, 5 insertions(+), 1 deletion(-) + +--- a/drivers/pci/pci-bridge-emul.c ++++ b/drivers/pci/pci-bridge-emul.c +@@ -284,7 +284,11 @@ int pci_bridge_emul_init(struct pci_brid + { + BUILD_BUG_ON(sizeof(bridge->conf) != PCI_BRIDGE_CONF_END); + +- bridge->conf.class_revision |= cpu_to_le32(PCI_CLASS_BRIDGE_PCI << 16); ++ /* ++ * class_revision: Class is high 24 bits and revision is low 8 bit of this member, ++ * while class for PCI Bridge Normal Decode has the 24-bit value: PCI_CLASS_BRIDGE_PCI << 8 ++ */ ++ bridge->conf.class_revision |= cpu_to_le32((PCI_CLASS_BRIDGE_PCI << 8) << 8); + bridge->conf.header_type = PCI_HEADER_TYPE_BRIDGE; + bridge->conf.cache_line_size = 0x10; + bridge->conf.status = cpu_to_le16(PCI_STATUS_CAP_LIST); diff --git a/target/linux/generic/backport-5.15/850-v5.17-0002-PCI-pci-bridge-emul-Add-definitions-for-missing-capa.patch b/target/linux/generic/backport-5.15/850-v5.17-0002-PCI-pci-bridge-emul-Add-definitions-for-missing-capa.patch new file mode 100644 index 0000000000..3dd82710e6 --- /dev/null +++ b/target/linux/generic/backport-5.15/850-v5.17-0002-PCI-pci-bridge-emul-Add-definitions-for-missing-capa.patch @@ -0,0 +1,73 @@ +From 8ea673a8b30b4a32516b8adabb15e2a68ff02ec8 Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?Pali=20Roh=C3=A1r?= <pali@kernel.org> +Date: Tue, 30 Nov 2021 18:29:04 +0100 +Subject: [PATCH] PCI: pci-bridge-emul: Add definitions for missing + capabilities registers +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +pci-bridge-emul driver already allocates buffer for capabilities up to the +PCI_EXP_SLTSTA2 register, but does not define bit access behavior for these +registers. Add these missing definitions. + +Link: https://lore.kernel.org/r/20211130172913.9727-3-kabel@kernel.org +Signed-off-by: Pali Rohár <pali@kernel.org> +Signed-off-by: Marek Behún <kabel@kernel.org> +Signed-off-by: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com> +--- + drivers/pci/pci-bridge-emul.c | 43 +++++++++++++++++++++++++++++++++++ + 1 file changed, 43 insertions(+) + +--- a/drivers/pci/pci-bridge-emul.c ++++ b/drivers/pci/pci-bridge-emul.c +@@ -270,6 +270,49 @@ struct pci_bridge_reg_behavior pcie_cap_ + .ro = GENMASK(15, 0) | PCI_EXP_RTSTA_PENDING, + .w1c = PCI_EXP_RTSTA_PME, + }, ++ ++ [PCI_EXP_DEVCAP2 / 4] = { ++ /* ++ * Device capabilities 2 register has reserved bits [30:27]. ++ * Also bits [26:24] are reserved for non-upstream ports. ++ */ ++ .ro = BIT(31) | GENMASK(23, 0), ++ }, ++ ++ [PCI_EXP_DEVCTL2 / 4] = { ++ /* ++ * Device control 2 register is RW. Bit 11 is reserved for ++ * non-upstream ports. ++ * ++ * Device status 2 register is reserved. ++ */ ++ .rw = GENMASK(15, 12) | GENMASK(10, 0), ++ }, ++ ++ [PCI_EXP_LNKCAP2 / 4] = { ++ /* Link capabilities 2 register has reserved bits [30:25] and 0. */ ++ .ro = BIT(31) | GENMASK(24, 1), ++ }, ++ ++ [PCI_EXP_LNKCTL2 / 4] = { ++ /* ++ * Link control 2 register is RW. ++ * ++ * Link status 2 register has bits 5, 15 W1C; ++ * bits 10, 11 reserved and others are RO. ++ */ ++ .rw = GENMASK(15, 0), ++ .w1c = (BIT(15) | BIT(5)) << 16, ++ .ro = (GENMASK(14, 12) | GENMASK(9, 6) | GENMASK(4, 0)) << 16, ++ }, ++ ++ [PCI_EXP_SLTCAP2 / 4] = { ++ /* Slot capabilities 2 register is reserved. */ ++ }, ++ ++ [PCI_EXP_SLTCTL2 / 4] = { ++ /* Both Slot control 2 and Slot status 2 registers are reserved. */ ++ }, + }; + + /* diff --git a/target/linux/generic/backport-5.15/850-v5.17-0003-PCI-aardvark-Add-support-for-DEVCAP2-DEVCTL2-LNKCAP2.patch b/target/linux/generic/backport-5.15/850-v5.17-0003-PCI-aardvark-Add-support-for-DEVCAP2-DEVCTL2-LNKCAP2.patch new file mode 100644 index 0000000000..ce38719009 --- /dev/null +++ b/target/linux/generic/backport-5.15/850-v5.17-0003-PCI-aardvark-Add-support-for-DEVCAP2-DEVCTL2-LNKCAP2.patch @@ -0,0 +1,61 @@ +From 1d3e170344dff2cef8827db6c09909b78cbc11d7 Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?Pali=20Roh=C3=A1r?= <pali@kernel.org> +Date: Tue, 30 Nov 2021 18:29:05 +0100 +Subject: [PATCH] PCI: aardvark: Add support for DEVCAP2, DEVCTL2, LNKCAP2 and + LNKCTL2 registers on emulated bridge +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +PCI aardvark hardware supports access to DEVCAP2, DEVCTL2, LNKCAP2 and +LNKCTL2 configuration registers of PCIe core via PCIE_CORE_PCIEXP_CAP. +Export them via emulated software root bridge. + +Link: https://lore.kernel.org/r/20211130172913.9727-4-kabel@kernel.org +Signed-off-by: Pali Rohár <pali@kernel.org> +Signed-off-by: Marek Behún <kabel@kernel.org> +Signed-off-by: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com> +--- + drivers/pci/controller/pci-aardvark.c | 15 +++++++++++---- + 1 file changed, 11 insertions(+), 4 deletions(-) + +--- a/drivers/pci/controller/pci-aardvark.c ++++ b/drivers/pci/controller/pci-aardvark.c +@@ -881,8 +881,13 @@ advk_pci_bridge_emul_pcie_conf_read(stru + + case PCI_EXP_DEVCAP: + case PCI_EXP_DEVCTL: ++ case PCI_EXP_DEVCAP2: ++ case PCI_EXP_DEVCTL2: ++ case PCI_EXP_LNKCAP2: ++ case PCI_EXP_LNKCTL2: + *value = advk_readl(pcie, PCIE_CORE_PCIEXP_CAP + reg); + return PCI_BRIDGE_EMUL_HANDLED; ++ + default: + return PCI_BRIDGE_EMUL_NOT_HANDLED; + } +@@ -896,10 +901,6 @@ advk_pci_bridge_emul_pcie_conf_write(str + struct advk_pcie *pcie = bridge->data; + + switch (reg) { +- case PCI_EXP_DEVCTL: +- advk_writel(pcie, new, PCIE_CORE_PCIEXP_CAP + reg); +- break; +- + case PCI_EXP_LNKCTL: + advk_writel(pcie, new, PCIE_CORE_PCIEXP_CAP + reg); + if (new & PCI_EXP_LNKCTL_RL) +@@ -921,6 +922,12 @@ advk_pci_bridge_emul_pcie_conf_write(str + advk_writel(pcie, new, PCIE_ISR0_REG); + break; + ++ case PCI_EXP_DEVCTL: ++ case PCI_EXP_DEVCTL2: ++ case PCI_EXP_LNKCTL2: ++ advk_writel(pcie, new, PCIE_CORE_PCIEXP_CAP + reg); ++ break; ++ + default: + break; + } diff --git a/target/linux/generic/backport-5.15/850-v5.17-0004-PCI-aardvark-Clear-all-MSIs-at-setup.patch b/target/linux/generic/backport-5.15/850-v5.17-0004-PCI-aardvark-Clear-all-MSIs-at-setup.patch new file mode 100644 index 0000000000..66c7e60202 --- /dev/null +++ b/target/linux/generic/backport-5.15/850-v5.17-0004-PCI-aardvark-Clear-all-MSIs-at-setup.patch @@ -0,0 +1,59 @@ +From 7d8dc1f7cd007a7ce94c5b4c20d63a8b8d6d7751 Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?Pali=20Roh=C3=A1r?= <pali@kernel.org> +Date: Tue, 30 Nov 2021 18:29:06 +0100 +Subject: [PATCH] PCI: aardvark: Clear all MSIs at setup +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +We already clear all the other interrupts (ISR0, ISR1, HOST_CTRL_INT). + +Define a new macro PCIE_MSI_ALL_MASK and do the same clearing for MSIs, +to ensure that we don't start receiving spurious interrupts. + +Use this new mask in advk_pcie_handle_msi(); + +Link: https://lore.kernel.org/r/20211130172913.9727-5-kabel@kernel.org +Signed-off-by: Pali Rohár <pali@kernel.org> +Signed-off-by: Marek Behún <kabel@kernel.org> +Signed-off-by: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com> +--- + drivers/pci/controller/pci-aardvark.c | 6 ++++-- + 1 file changed, 4 insertions(+), 2 deletions(-) + +--- a/drivers/pci/controller/pci-aardvark.c ++++ b/drivers/pci/controller/pci-aardvark.c +@@ -114,6 +114,7 @@ + #define PCIE_MSI_ADDR_HIGH_REG (CONTROL_BASE_ADDR + 0x54) + #define PCIE_MSI_STATUS_REG (CONTROL_BASE_ADDR + 0x58) + #define PCIE_MSI_MASK_REG (CONTROL_BASE_ADDR + 0x5C) ++#define PCIE_MSI_ALL_MASK GENMASK(31, 0) + #define PCIE_MSI_PAYLOAD_REG (CONTROL_BASE_ADDR + 0x9C) + #define PCIE_MSI_DATA_MASK GENMASK(15, 0) + +@@ -577,6 +578,7 @@ static void advk_pcie_setup_hw(struct ad + advk_writel(pcie, reg, PCIE_CORE_CTRL2_REG); + + /* Clear all interrupts */ ++ advk_writel(pcie, PCIE_MSI_ALL_MASK, PCIE_MSI_STATUS_REG); + advk_writel(pcie, PCIE_ISR0_ALL_MASK, PCIE_ISR0_REG); + advk_writel(pcie, PCIE_ISR1_ALL_MASK, PCIE_ISR1_REG); + advk_writel(pcie, PCIE_IRQ_ALL_MASK, HOST_CTRL_INT_STATUS_REG); +@@ -589,7 +591,7 @@ static void advk_pcie_setup_hw(struct ad + advk_writel(pcie, PCIE_ISR1_ALL_MASK, PCIE_ISR1_MASK_REG); + + /* Unmask all MSIs */ +- advk_writel(pcie, 0, PCIE_MSI_MASK_REG); ++ advk_writel(pcie, ~(u32)PCIE_MSI_ALL_MASK, PCIE_MSI_MASK_REG); + + /* Enable summary interrupt for GIC SPI source */ + reg = PCIE_IRQ_ALL_MASK & (~PCIE_IRQ_ENABLE_INTS_MASK); +@@ -1399,7 +1401,7 @@ static void advk_pcie_handle_msi(struct + + msi_mask = advk_readl(pcie, PCIE_MSI_MASK_REG); + msi_val = advk_readl(pcie, PCIE_MSI_STATUS_REG); +- msi_status = msi_val & ~msi_mask; ++ msi_status = msi_val & ((~msi_mask) & PCIE_MSI_ALL_MASK); + + for (msi_idx = 0; msi_idx < MSI_IRQ_NUM; msi_idx++) { + if (!(BIT(msi_idx) & msi_status)) diff --git a/target/linux/generic/backport-5.15/850-v5.17-0005-PCI-aardvark-Comment-actions-in-driver-remove-method.patch b/target/linux/generic/backport-5.15/850-v5.17-0005-PCI-aardvark-Comment-actions-in-driver-remove-method.patch new file mode 100644 index 0000000000..8870e47b40 --- /dev/null +++ b/target/linux/generic/backport-5.15/850-v5.17-0005-PCI-aardvark-Comment-actions-in-driver-remove-method.patch @@ -0,0 +1,34 @@ +From a4ca7948e1d47275f8f3e5023243440c40561916 Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?Pali=20Roh=C3=A1r?= <pali@kernel.org> +Date: Tue, 30 Nov 2021 18:29:07 +0100 +Subject: [PATCH] PCI: aardvark: Comment actions in driver remove method +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +Add two more comments into the advk_pcie_remove() method. + +Link: https://lore.kernel.org/r/20211130172913.9727-6-kabel@kernel.org +Signed-off-by: Pali Rohár <pali@kernel.org> +Signed-off-by: Marek Behún <kabel@kernel.org> +Signed-off-by: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com> +--- + drivers/pci/controller/pci-aardvark.c | 2 ++ + 1 file changed, 2 insertions(+) + +--- a/drivers/pci/controller/pci-aardvark.c ++++ b/drivers/pci/controller/pci-aardvark.c +@@ -1698,11 +1698,13 @@ static int advk_pcie_remove(struct platf + struct pci_host_bridge *bridge = pci_host_bridge_from_priv(pcie); + int i; + ++ /* Remove PCI bus with all devices */ + pci_lock_rescan_remove(); + pci_stop_root_bus(bridge->bus); + pci_remove_root_bus(bridge->bus); + pci_unlock_rescan_remove(); + ++ /* Remove IRQ domains */ + advk_pcie_remove_msi_irq_domain(pcie); + advk_pcie_remove_irq_domain(pcie); + diff --git a/target/linux/generic/backport-5.15/850-v5.17-0006-PCI-aardvark-Disable-bus-mastering-when-unbinding-dr.patch b/target/linux/generic/backport-5.15/850-v5.17-0006-PCI-aardvark-Disable-bus-mastering-when-unbinding-dr.patch new file mode 100644 index 0000000000..f1c3f3bb13 --- /dev/null +++ b/target/linux/generic/backport-5.15/850-v5.17-0006-PCI-aardvark-Disable-bus-mastering-when-unbinding-dr.patch @@ -0,0 +1,41 @@ +From a46f2f6dd4093438d9615dfbf5c0fea2a9835dba Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?Pali=20Roh=C3=A1r?= <pali@kernel.org> +Date: Tue, 30 Nov 2021 18:29:08 +0100 +Subject: [PATCH] PCI: aardvark: Disable bus mastering when unbinding driver +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +Ensure that after driver unbind PCIe cards are not able to forward +memory and I/O requests in the upstream direction. + +Link: https://lore.kernel.org/r/20211130172913.9727-7-kabel@kernel.org +Signed-off-by: Pali Rohár <pali@kernel.org> +Signed-off-by: Marek Behún <kabel@kernel.org> +Signed-off-by: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com> +--- + drivers/pci/controller/pci-aardvark.c | 6 ++++++ + 1 file changed, 6 insertions(+) + +--- a/drivers/pci/controller/pci-aardvark.c ++++ b/drivers/pci/controller/pci-aardvark.c +@@ -1696,6 +1696,7 @@ static int advk_pcie_remove(struct platf + { + struct advk_pcie *pcie = platform_get_drvdata(pdev); + struct pci_host_bridge *bridge = pci_host_bridge_from_priv(pcie); ++ u32 val; + int i; + + /* Remove PCI bus with all devices */ +@@ -1704,6 +1705,11 @@ static int advk_pcie_remove(struct platf + pci_remove_root_bus(bridge->bus); + pci_unlock_rescan_remove(); + ++ /* Disable Root Bridge I/O space, memory space and bus mastering */ ++ val = advk_readl(pcie, PCIE_CORE_CMD_STATUS_REG); ++ val &= ~(PCI_COMMAND_IO | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER); ++ advk_writel(pcie, val, PCIE_CORE_CMD_STATUS_REG); ++ + /* Remove IRQ domains */ + advk_pcie_remove_msi_irq_domain(pcie); + advk_pcie_remove_irq_domain(pcie); diff --git a/target/linux/generic/backport-5.15/850-v5.17-0007-PCI-aardvark-Mask-all-interrupts-when-unbinding-driv.patch b/target/linux/generic/backport-5.15/850-v5.17-0007-PCI-aardvark-Mask-all-interrupts-when-unbinding-driv.patch new file mode 100644 index 0000000000..7add045cab --- /dev/null +++ b/target/linux/generic/backport-5.15/850-v5.17-0007-PCI-aardvark-Mask-all-interrupts-when-unbinding-driv.patch @@ -0,0 +1,48 @@ +From 13bcdf07cb2ecff5d45d2c141df2539b15211448 Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?Pali=20Roh=C3=A1r?= <pali@kernel.org> +Date: Tue, 30 Nov 2021 18:29:09 +0100 +Subject: [PATCH] PCI: aardvark: Mask all interrupts when unbinding driver +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +Ensure that no interrupt can be triggered after driver unbind. + +Link: https://lore.kernel.org/r/20211130172913.9727-8-kabel@kernel.org +Signed-off-by: Pali Rohár <pali@kernel.org> +Signed-off-by: Marek Behún <kabel@kernel.org> +Signed-off-by: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com> +--- + drivers/pci/controller/pci-aardvark.c | 21 +++++++++++++++++++++ + 1 file changed, 21 insertions(+) + +--- a/drivers/pci/controller/pci-aardvark.c ++++ b/drivers/pci/controller/pci-aardvark.c +@@ -1710,6 +1710,27 @@ static int advk_pcie_remove(struct platf + val &= ~(PCI_COMMAND_IO | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER); + advk_writel(pcie, val, PCIE_CORE_CMD_STATUS_REG); + ++ /* Disable MSI */ ++ val = advk_readl(pcie, PCIE_CORE_CTRL2_REG); ++ val &= ~PCIE_CORE_CTRL2_MSI_ENABLE; ++ advk_writel(pcie, val, PCIE_CORE_CTRL2_REG); ++ ++ /* Clear MSI address */ ++ advk_writel(pcie, 0, PCIE_MSI_ADDR_LOW_REG); ++ advk_writel(pcie, 0, PCIE_MSI_ADDR_HIGH_REG); ++ ++ /* Mask all interrupts */ ++ advk_writel(pcie, PCIE_MSI_ALL_MASK, PCIE_MSI_MASK_REG); ++ advk_writel(pcie, PCIE_ISR0_ALL_MASK, PCIE_ISR0_MASK_REG); ++ advk_writel(pcie, PCIE_ISR1_ALL_MASK, PCIE_ISR1_MASK_REG); ++ advk_writel(pcie, PCIE_IRQ_ALL_MASK, HOST_CTRL_INT_MASK_REG); ++ ++ /* Clear all interrupts */ ++ advk_writel(pcie, PCIE_MSI_ALL_MASK, PCIE_MSI_STATUS_REG); ++ advk_writel(pcie, PCIE_ISR0_ALL_MASK, PCIE_ISR0_REG); ++ advk_writel(pcie, PCIE_ISR1_ALL_MASK, PCIE_ISR1_REG); ++ advk_writel(pcie, PCIE_IRQ_ALL_MASK, HOST_CTRL_INT_STATUS_REG); ++ + /* Remove IRQ domains */ + advk_pcie_remove_msi_irq_domain(pcie); + advk_pcie_remove_irq_domain(pcie); diff --git a/target/linux/generic/backport-5.15/850-v5.17-0008-PCI-aardvark-Fix-memory-leak-in-driver-unbind.patch b/target/linux/generic/backport-5.15/850-v5.17-0008-PCI-aardvark-Fix-memory-leak-in-driver-unbind.patch new file mode 100644 index 0000000000..f86d2bd61f --- /dev/null +++ b/target/linux/generic/backport-5.15/850-v5.17-0008-PCI-aardvark-Fix-memory-leak-in-driver-unbind.patch @@ -0,0 +1,33 @@ +From 2f040a17f5061457ae95035326d3159eddc1e5cc Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?Pali=20Roh=C3=A1r?= <pali@kernel.org> +Date: Tue, 30 Nov 2021 18:29:10 +0100 +Subject: [PATCH] PCI: aardvark: Fix memory leak in driver unbind +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +Free config space for emulated root bridge when unbinding driver to fix +memory leak. Do it after disabling and masking all interrupts, since +aardvark interrupt handler accesses config space of emulated root +bridge. + +Link: https://lore.kernel.org/r/20211130172913.9727-9-kabel@kernel.org +Signed-off-by: Pali Rohár <pali@kernel.org> +Signed-off-by: Marek Behún <kabel@kernel.org> +Signed-off-by: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com> +--- + drivers/pci/controller/pci-aardvark.c | 3 +++ + 1 file changed, 3 insertions(+) + +--- a/drivers/pci/controller/pci-aardvark.c ++++ b/drivers/pci/controller/pci-aardvark.c +@@ -1735,6 +1735,9 @@ static int advk_pcie_remove(struct platf + advk_pcie_remove_msi_irq_domain(pcie); + advk_pcie_remove_irq_domain(pcie); + ++ /* Free config space for emulated root bridge */ ++ pci_bridge_emul_cleanup(&pcie->bridge); ++ + /* Disable outbound address windows mapping */ + for (i = 0; i < OB_WIN_COUNT; i++) + advk_pcie_disable_ob_win(pcie, i); diff --git a/target/linux/generic/backport-5.15/850-v5.17-0009-PCI-aardvark-Assert-PERST-when-unbinding-driver.patch b/target/linux/generic/backport-5.15/850-v5.17-0009-PCI-aardvark-Assert-PERST-when-unbinding-driver.patch new file mode 100644 index 0000000000..1e9e74136c --- /dev/null +++ b/target/linux/generic/backport-5.15/850-v5.17-0009-PCI-aardvark-Assert-PERST-when-unbinding-driver.patch @@ -0,0 +1,33 @@ +From 1f54391be8ce0c981d312cb93acdc5608def576a Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?Pali=20Roh=C3=A1r?= <pali@kernel.org> +Date: Tue, 30 Nov 2021 18:29:11 +0100 +Subject: [PATCH] PCI: aardvark: Assert PERST# when unbinding driver +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +Put the PCIe card into reset by asserting PERST# signal when unbinding +driver. It doesn't make sense to leave the card working if it can't +communicate with the host. This should also save some power. + +Link: https://lore.kernel.org/r/20211130172913.9727-10-kabel@kernel.org +Signed-off-by: Pali Rohár <pali@kernel.org> +Signed-off-by: Marek Behún <kabel@kernel.org> +Signed-off-by: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com> +--- + drivers/pci/controller/pci-aardvark.c | 4 ++++ + 1 file changed, 4 insertions(+) + +--- a/drivers/pci/controller/pci-aardvark.c ++++ b/drivers/pci/controller/pci-aardvark.c +@@ -1738,6 +1738,10 @@ static int advk_pcie_remove(struct platf + /* Free config space for emulated root bridge */ + pci_bridge_emul_cleanup(&pcie->bridge); + ++ /* Assert PERST# signal which prepares PCIe card for power down */ ++ if (pcie->reset_gpio) ++ gpiod_set_value_cansleep(pcie->reset_gpio, 1); ++ + /* Disable outbound address windows mapping */ + for (i = 0; i < OB_WIN_COUNT; i++) + advk_pcie_disable_ob_win(pcie, i); diff --git a/target/linux/generic/backport-5.15/850-v5.17-0010-PCI-aardvark-Disable-link-training-when-unbinding-dr.patch b/target/linux/generic/backport-5.15/850-v5.17-0010-PCI-aardvark-Disable-link-training-when-unbinding-dr.patch new file mode 100644 index 0000000000..3b531deae0 --- /dev/null +++ b/target/linux/generic/backport-5.15/850-v5.17-0010-PCI-aardvark-Disable-link-training-when-unbinding-dr.patch @@ -0,0 +1,34 @@ +From 759dec2e3dfdbd261c41d2279f04f2351c971a49 Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?Pali=20Roh=C3=A1r?= <pali@kernel.org> +Date: Tue, 30 Nov 2021 18:29:12 +0100 +Subject: [PATCH] PCI: aardvark: Disable link training when unbinding driver +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +Disable link training circuit in driver unbind sequence. We want to +leave link training in the same state as it was before the driver was +probed. + +Link: https://lore.kernel.org/r/20211130172913.9727-11-kabel@kernel.org +Signed-off-by: Pali Rohár <pali@kernel.org> +Signed-off-by: Marek Behún <kabel@kernel.org> +Signed-off-by: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com> +--- + drivers/pci/controller/pci-aardvark.c | 5 +++++ + 1 file changed, 5 insertions(+) + +--- a/drivers/pci/controller/pci-aardvark.c ++++ b/drivers/pci/controller/pci-aardvark.c +@@ -1742,6 +1742,11 @@ static int advk_pcie_remove(struct platf + if (pcie->reset_gpio) + gpiod_set_value_cansleep(pcie->reset_gpio, 1); + ++ /* Disable link training */ ++ val = advk_readl(pcie, PCIE_CORE_CTRL0_REG); ++ val &= ~LINK_TRAINING_EN; ++ advk_writel(pcie, val, PCIE_CORE_CTRL0_REG); ++ + /* Disable outbound address windows mapping */ + for (i = 0; i < OB_WIN_COUNT; i++) + advk_pcie_disable_ob_win(pcie, i); diff --git a/target/linux/generic/backport-5.15/850-v5.17-0011-PCI-aardvark-Disable-common-PHY-when-unbinding-drive.patch b/target/linux/generic/backport-5.15/850-v5.17-0011-PCI-aardvark-Disable-common-PHY-when-unbinding-drive.patch new file mode 100644 index 0000000000..6caa6eb362 --- /dev/null +++ b/target/linux/generic/backport-5.15/850-v5.17-0011-PCI-aardvark-Disable-common-PHY-when-unbinding-drive.patch @@ -0,0 +1,30 @@ +From fdbbe242c15a8f2cd0e3ad8a56cd0a447b771d0d Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?Pali=20Roh=C3=A1r?= <pali@kernel.org> +Date: Tue, 30 Nov 2021 18:29:13 +0100 +Subject: [PATCH] PCI: aardvark: Disable common PHY when unbinding driver +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +Disable the PCIe PHY when unbinding driver. This should save some power. + +Link: https://lore.kernel.org/r/20211130172913.9727-12-kabel@kernel.org +Signed-off-by: Pali Rohár <pali@kernel.org> +Signed-off-by: Marek Behún <kabel@kernel.org> +Signed-off-by: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com> +--- + drivers/pci/controller/pci-aardvark.c | 3 +++ + 1 file changed, 3 insertions(+) + +--- a/drivers/pci/controller/pci-aardvark.c ++++ b/drivers/pci/controller/pci-aardvark.c +@@ -1751,6 +1751,9 @@ static int advk_pcie_remove(struct platf + for (i = 0; i < OB_WIN_COUNT; i++) + advk_pcie_disable_ob_win(pcie, i); + ++ /* Disable phy */ ++ advk_pcie_disable_phy(pcie); ++ + return 0; + } + diff --git a/target/linux/generic/backport-5.15/851-v5.15-0001-phy-marvell-phy-mvebu-a3700-comphy-Rename-HS-SGMMI-t.patch b/target/linux/generic/backport-5.15/851-v5.15-0001-phy-marvell-phy-mvebu-a3700-comphy-Rename-HS-SGMMI-t.patch new file mode 100644 index 0000000000..4f867724ac --- /dev/null +++ b/target/linux/generic/backport-5.15/851-v5.15-0001-phy-marvell-phy-mvebu-a3700-comphy-Rename-HS-SGMMI-t.patch @@ -0,0 +1,67 @@ +From 40da06da15c1718b02072687bbfb2d08f5eb9399 Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?Pali=20Roh=C3=A1r?= <pali@kernel.org> +Date: Fri, 27 Aug 2021 11:27:52 +0200 +Subject: [PATCH] phy: marvell: phy-mvebu-a3700-comphy: Rename HS-SGMMI to + 2500Base-X +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +Comphy phy mode 0x3 is incorrectly named. It is not SGMII but rather +2500Base-X mode which runs at 3.125 Gbps speed. + +Rename macro names and comments to 2500Base-X. + +Signed-off-by: Pali Rohár <pali@kernel.org> +Fixes: 9695375a3f4a ("phy: add A3700 COMPHY support") +Signed-off-by: David S. Miller <davem@davemloft.net> +--- + drivers/phy/marvell/phy-mvebu-a3700-comphy.c | 10 +++++----- + 1 file changed, 5 insertions(+), 5 deletions(-) + +--- a/drivers/phy/marvell/phy-mvebu-a3700-comphy.c ++++ b/drivers/phy/marvell/phy-mvebu-a3700-comphy.c +@@ -29,7 +29,7 @@ + + #define COMPHY_FW_MODE_SATA 0x1 + #define COMPHY_FW_MODE_SGMII 0x2 +-#define COMPHY_FW_MODE_HS_SGMII 0x3 ++#define COMPHY_FW_MODE_2500BASEX 0x3 + #define COMPHY_FW_MODE_USB3H 0x4 + #define COMPHY_FW_MODE_USB3D 0x5 + #define COMPHY_FW_MODE_PCIE 0x6 +@@ -40,7 +40,7 @@ + + #define COMPHY_FW_SPEED_1_25G 0 /* SGMII 1G */ + #define COMPHY_FW_SPEED_2_5G 1 +-#define COMPHY_FW_SPEED_3_125G 2 /* SGMII 2.5G */ ++#define COMPHY_FW_SPEED_3_125G 2 /* 2500BASE-X */ + #define COMPHY_FW_SPEED_5G 3 + #define COMPHY_FW_SPEED_5_15625G 4 /* XFI 5G */ + #define COMPHY_FW_SPEED_6G 5 +@@ -84,14 +84,14 @@ static const struct mvebu_a3700_comphy_c + MVEBU_A3700_COMPHY_CONF_ETH(0, PHY_INTERFACE_MODE_SGMII, 1, + COMPHY_FW_MODE_SGMII), + MVEBU_A3700_COMPHY_CONF_ETH(0, PHY_INTERFACE_MODE_2500BASEX, 1, +- COMPHY_FW_MODE_HS_SGMII), ++ COMPHY_FW_MODE_2500BASEX), + /* lane 1 */ + MVEBU_A3700_COMPHY_CONF_GEN(1, PHY_MODE_PCIE, 0, + COMPHY_FW_MODE_PCIE), + MVEBU_A3700_COMPHY_CONF_ETH(1, PHY_INTERFACE_MODE_SGMII, 0, + COMPHY_FW_MODE_SGMII), + MVEBU_A3700_COMPHY_CONF_ETH(1, PHY_INTERFACE_MODE_2500BASEX, 0, +- COMPHY_FW_MODE_HS_SGMII), ++ COMPHY_FW_MODE_2500BASEX), + /* lane 2 */ + MVEBU_A3700_COMPHY_CONF_GEN(2, PHY_MODE_SATA, 0, + COMPHY_FW_MODE_SATA), +@@ -205,7 +205,7 @@ static int mvebu_a3700_comphy_power_on(s + COMPHY_FW_SPEED_1_25G); + break; + case PHY_INTERFACE_MODE_2500BASEX: +- dev_dbg(lane->dev, "set lane %d to HS SGMII mode\n", ++ dev_dbg(lane->dev, "set lane %d to 2500BASEX mode\n", + lane->id); + fw_param = COMPHY_FW_NET(fw_mode, lane->port, + COMPHY_FW_SPEED_3_125G); diff --git a/target/linux/generic/backport-5.15/851-v5.15-0002-phy-marvell-phy-mvebu-a3700-comphy-Remove-unsupporte.patch b/target/linux/generic/backport-5.15/851-v5.15-0002-phy-marvell-phy-mvebu-a3700-comphy-Remove-unsupporte.patch new file mode 100644 index 0000000000..99f56f1c57 --- /dev/null +++ b/target/linux/generic/backport-5.15/851-v5.15-0002-phy-marvell-phy-mvebu-a3700-comphy-Remove-unsupporte.patch @@ -0,0 +1,40 @@ +From e1dbe9ecf621b6f71f3d2df3e50731d583f3d27f Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?Pali=20Roh=C3=A1r?= <pali@kernel.org> +Date: Fri, 27 Aug 2021 11:27:53 +0200 +Subject: [PATCH] phy: marvell: phy-mvebu-a3700-comphy: Remove unsupported + modes +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +Armada 3700 does not support RXAUI, XFI and neither SFI. Remove unused +macros for these unsupported modes. + +Signed-off-by: Pali Rohár <pali@kernel.org> +Fixes: 9695375a3f4a ("phy: add A3700 COMPHY support") +Signed-off-by: David S. Miller <davem@davemloft.net> +--- + drivers/phy/marvell/phy-mvebu-a3700-comphy.c | 6 ------ + 1 file changed, 6 deletions(-) + +--- a/drivers/phy/marvell/phy-mvebu-a3700-comphy.c ++++ b/drivers/phy/marvell/phy-mvebu-a3700-comphy.c +@@ -33,18 +33,12 @@ + #define COMPHY_FW_MODE_USB3H 0x4 + #define COMPHY_FW_MODE_USB3D 0x5 + #define COMPHY_FW_MODE_PCIE 0x6 +-#define COMPHY_FW_MODE_RXAUI 0x7 +-#define COMPHY_FW_MODE_XFI 0x8 +-#define COMPHY_FW_MODE_SFI 0x9 + #define COMPHY_FW_MODE_USB3 0xa + + #define COMPHY_FW_SPEED_1_25G 0 /* SGMII 1G */ + #define COMPHY_FW_SPEED_2_5G 1 + #define COMPHY_FW_SPEED_3_125G 2 /* 2500BASE-X */ + #define COMPHY_FW_SPEED_5G 3 +-#define COMPHY_FW_SPEED_5_15625G 4 /* XFI 5G */ +-#define COMPHY_FW_SPEED_6G 5 +-#define COMPHY_FW_SPEED_10_3125G 6 /* XFI 10G */ + #define COMPHY_FW_SPEED_MAX 0x3F + + #define COMPHY_FW_MODE(mode) ((mode) << 12) diff --git a/target/linux/generic/backport-5.15/860-v5.17-MIPS-ath79-drop-_machine_restart-again.patch b/target/linux/generic/backport-5.15/860-v5.17-MIPS-ath79-drop-_machine_restart-again.patch new file mode 100644 index 0000000000..971562a8f7 --- /dev/null +++ b/target/linux/generic/backport-5.15/860-v5.17-MIPS-ath79-drop-_machine_restart-again.patch @@ -0,0 +1,49 @@ +From d3115128bdafb62628ab41861a4f06f6d02ac320 Mon Sep 17 00:00:00 2001 +From: Lech Perczak <lech.perczak@gmail.com> +Date: Mon, 10 Jan 2022 23:48:44 +0100 +Subject: MIPS: ath79: drop _machine_restart again + +Commit 81424d0ad0d4 ("MIPS: ath79: Use the reset controller to restart +OF machines") removed setup of _machine_restart on OF machines to use +reset handler in reset controller driver. +While removing remnants of non-OF machines in commit 3a77e0d75eed +("MIPS: ath79: drop machfiles"), this was introduced again, making it +impossible to use additional restart handlers registered through device +tree. Drop setting _machine_restart altogether, and ath79_restart +function, which is no longer used after this. + +Fixes: 3a77e0d75eed ("MIPS: ath79: drop machfiles") +Cc: John Crispin <john@phrozen.org> +Cc: Florian Fainelli <f.fainelli@gmail.com> +Signed-off-by: Lech Perczak <lech.perczak@gmail.com> +Signed-off-by: Thomas Bogendoerfer <tsbogend@alpha.franken.de> +--- + arch/mips/ath79/setup.c | 10 ---------- + 1 file changed, 10 deletions(-) + +--- a/arch/mips/ath79/setup.c ++++ b/arch/mips/ath79/setup.c +@@ -34,15 +34,6 @@ + + static char ath79_sys_type[ATH79_SYS_TYPE_LEN]; + +-static void ath79_restart(char *command) +-{ +- local_irq_disable(); +- ath79_device_reset_set(AR71XX_RESET_FULL_CHIP); +- for (;;) +- if (cpu_wait) +- cpu_wait(); +-} +- + static void ath79_halt(void) + { + while (1) +@@ -233,7 +224,6 @@ void __init plat_mem_setup(void) + + detect_memory_region(0, ATH79_MEM_SIZE_MIN, ATH79_MEM_SIZE_MAX); + +- _machine_restart = ath79_restart; + _machine_halt = ath79_halt; + pm_power_off = ath79_halt; + } diff --git a/target/linux/generic/backport-5.15/870-hwmon-next-hwmon-lm70-Add-ti-tmp125-support.patch b/target/linux/generic/backport-5.15/870-hwmon-next-hwmon-lm70-Add-ti-tmp125-support.patch new file mode 100644 index 0000000000..fabf177628 --- /dev/null +++ b/target/linux/generic/backport-5.15/870-hwmon-next-hwmon-lm70-Add-ti-tmp125-support.patch @@ -0,0 +1,71 @@ +From 31d8f414e1596ba54a4315418e4c0086fda9e428 Mon Sep 17 00:00:00 2001 +From: Christian Lamparter <chunkeey@gmail.com> +Date: Fri, 18 Feb 2022 10:06:43 +0100 +Subject: hwmon: (lm70) Add ti,tmp125 support + +The TMP125 is a 2 degree Celsius accurate Digital +Temperature Sensor with a SPI interface. + +The temperature register is a 16-bit, read-only register. +The MSB (Bit 15) is a leading zero and never set. Bits 14 +to 5 are the 1+9 temperature data bits in a two's +complement format. Bits 4 to 0 are useless copies of +Bit 5 value and therefore ignored. + +Signed-off-by: Christian Lamparter <chunkeey@gmail.com> +Link: https://lore.kernel.org/r/43b19cbd4e7f51e9509e561b02b5d8d0e7079fac.1645175187.git.chunkeey@gmail.com +Signed-off-by: Guenter Roeck <linux@roeck-us.net> +--- +--- a/drivers/hwmon/lm70.c ++++ b/drivers/hwmon/lm70.c +@@ -34,6 +34,7 @@ + #define LM70_CHIP_LM71 2 /* NS LM71 */ + #define LM70_CHIP_LM74 3 /* NS LM74 */ + #define LM70_CHIP_TMP122 4 /* TI TMP122/TMP124 */ ++#define LM70_CHIP_TMP125 5 /* TI TMP125 */ + + struct lm70 { + struct spi_device *spi; +@@ -87,6 +88,12 @@ static ssize_t temp1_input_show(struct d + * LM71: + * 14 bits of 2's complement data, discard LSB 2 bits, + * resolution 0.0312 degrees celsius. ++ * ++ * TMP125: ++ * MSB/D15 is a leading zero. D14 is the sign-bit. This is ++ * followed by 9 temperature bits (D13..D5) in 2's complement ++ * data format with a resolution of 0.25 degrees celsius per unit. ++ * LSB 5 bits (D4..D0) share the same value as D5 and get discarded. + */ + switch (p_lm70->chip) { + case LM70_CHIP_LM70: +@@ -102,6 +109,10 @@ static ssize_t temp1_input_show(struct d + case LM70_CHIP_LM71: + val = ((int)raw / 4) * 3125 / 100; + break; ++ ++ case LM70_CHIP_TMP125: ++ val = (sign_extend32(raw, 14) / 32) * 250; ++ break; + } + + status = sprintf(buf, "%d\n", val); /* millidegrees Celsius */ +@@ -136,6 +147,10 @@ static const struct of_device_id lm70_of + .data = (void *) LM70_CHIP_TMP122, + }, + { ++ .compatible = "ti,tmp125", ++ .data = (void *) LM70_CHIP_TMP125, ++ }, ++ { + .compatible = "ti,lm71", + .data = (void *) LM70_CHIP_LM71, + }, +@@ -184,6 +199,7 @@ static const struct spi_device_id lm70_i + { "lm70", LM70_CHIP_LM70 }, + { "tmp121", LM70_CHIP_TMP121 }, + { "tmp122", LM70_CHIP_TMP122 }, ++ { "tmp125", LM70_CHIP_TMP125 }, + { "lm71", LM70_CHIP_LM71 }, + { "lm74", LM70_CHIP_LM74 }, + { }, |