aboutsummaryrefslogtreecommitdiffstats
path: root/target/linux/realtek/files-5.15/drivers/net/dsa/rtl83xx
diff options
context:
space:
mode:
authorINAGAKI Hiroshi <musashino.open@gmail.com>2022-09-09 22:08:16 +0900
committerSander Vanheule <sander@svanheule.net>2022-12-15 20:52:09 +0100
commit8fb15ea52a02b0578c11897afd1a87f8502d8f52 (patch)
treea65ae72710e84420cad6e500a2b781abce0cd9a0 /target/linux/realtek/files-5.15/drivers/net/dsa/rtl83xx
parentfef55d5ffd31bd6e0d157df784f47fe8ce0f1494 (diff)
downloadupstream-8fb15ea52a02b0578c11897afd1a87f8502d8f52.tar.gz
upstream-8fb15ea52a02b0578c11897afd1a87f8502d8f52.tar.bz2
upstream-8fb15ea52a02b0578c11897afd1a87f8502d8f52.zip
realtek: copy dts/files/patches/configs for 5.15
Copy dts/files/patches/configs from 5.10 to 5.15. Signed-off-by: INAGAKI Hiroshi <musashino.open@gmail.com> [refresh with updated DGS-1210 dts files] Signed-off-by: Sander Vanheule <sander@svanheule.net>
Diffstat (limited to 'target/linux/realtek/files-5.15/drivers/net/dsa/rtl83xx')
-rw-r--r--target/linux/realtek/files-5.15/drivers/net/dsa/rtl83xx/Kconfig8
-rw-r--r--target/linux/realtek/files-5.15/drivers/net/dsa/rtl83xx/Makefile3
-rw-r--r--target/linux/realtek/files-5.15/drivers/net/dsa/rtl83xx/common.c1696
-rw-r--r--target/linux/realtek/files-5.15/drivers/net/dsa/rtl83xx/debugfs.c727
-rw-r--r--target/linux/realtek/files-5.15/drivers/net/dsa/rtl83xx/dsa.c2259
-rw-r--r--target/linux/realtek/files-5.15/drivers/net/dsa/rtl83xx/qos.c576
-rw-r--r--target/linux/realtek/files-5.15/drivers/net/dsa/rtl83xx/rtl838x.c2054
-rw-r--r--target/linux/realtek/files-5.15/drivers/net/dsa/rtl83xx/rtl838x.h1093
-rw-r--r--target/linux/realtek/files-5.15/drivers/net/dsa/rtl83xx/rtl839x.c1937
-rw-r--r--target/linux/realtek/files-5.15/drivers/net/dsa/rtl83xx/rtl83xx.h137
-rw-r--r--target/linux/realtek/files-5.15/drivers/net/dsa/rtl83xx/rtl930x.c2560
-rw-r--r--target/linux/realtek/files-5.15/drivers/net/dsa/rtl83xx/rtl931x.c1701
-rw-r--r--target/linux/realtek/files-5.15/drivers/net/dsa/rtl83xx/tc.c409
13 files changed, 15160 insertions, 0 deletions
diff --git a/target/linux/realtek/files-5.15/drivers/net/dsa/rtl83xx/Kconfig b/target/linux/realtek/files-5.15/drivers/net/dsa/rtl83xx/Kconfig
new file mode 100644
index 0000000000..281f08054f
--- /dev/null
+++ b/target/linux/realtek/files-5.15/drivers/net/dsa/rtl83xx/Kconfig
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config NET_DSA_RTL83XX
+ tristate "Realtek RTL838x/RTL839x switch support"
+ depends on RTL83XX
+ select NET_DSA_TAG_TRAILER
+ help
+ This driver adds support for Realtek RTL83xx series switching.
+
diff --git a/target/linux/realtek/files-5.15/drivers/net/dsa/rtl83xx/Makefile b/target/linux/realtek/files-5.15/drivers/net/dsa/rtl83xx/Makefile
new file mode 100644
index 0000000000..8752c79700
--- /dev/null
+++ b/target/linux/realtek/files-5.15/drivers/net/dsa/rtl83xx/Makefile
@@ -0,0 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_NET_DSA_RTL83XX) += common.o dsa.o \
+ rtl838x.o rtl839x.o rtl930x.o rtl931x.o debugfs.o qos.o tc.o
diff --git a/target/linux/realtek/files-5.15/drivers/net/dsa/rtl83xx/common.c b/target/linux/realtek/files-5.15/drivers/net/dsa/rtl83xx/common.c
new file mode 100644
index 0000000000..e86ff9ccdf
--- /dev/null
+++ b/target/linux/realtek/files-5.15/drivers/net/dsa/rtl83xx/common.c
@@ -0,0 +1,1696 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <linux/of_mdio.h>
+#include <linux/of_platform.h>
+#include <net/arp.h>
+#include <net/nexthop.h>
+#include <net/neighbour.h>
+#include <net/netevent.h>
+#include <linux/inetdevice.h>
+#include <linux/rhashtable.h>
+#include <linux/of_net.h>
+
+#include <asm/mach-rtl838x/mach-rtl83xx.h>
+#include "rtl83xx.h"
+
+extern struct rtl83xx_soc_info soc_info;
+
+extern const struct rtl838x_reg rtl838x_reg;
+extern const struct rtl838x_reg rtl839x_reg;
+extern const struct rtl838x_reg rtl930x_reg;
+extern const struct rtl838x_reg rtl931x_reg;
+
+extern const struct dsa_switch_ops rtl83xx_switch_ops;
+extern const struct dsa_switch_ops rtl930x_switch_ops;
+
+DEFINE_MUTEX(smi_lock);
+
+int rtl83xx_port_get_stp_state(struct rtl838x_switch_priv *priv, int port)
+{
+ u32 msti = 0;
+ u32 port_state[4];
+ int index, bit;
+ int pos = port;
+ int n = priv->port_width << 1;
+
+ /* Ports above or equal CPU port can never be configured */
+ if (port >= priv->cpu_port)
+ return -1;
+
+ mutex_lock(&priv->reg_mutex);
+
+ /* For the RTL839x and following, the bits are left-aligned in the 64/128 bit field */
+ if (priv->family_id == RTL8390_FAMILY_ID)
+ pos += 12;
+ if (priv->family_id == RTL9300_FAMILY_ID)
+ pos += 3;
+ if (priv->family_id == RTL9310_FAMILY_ID)
+ pos += 8;
+
+ index = n - (pos >> 4) - 1;
+ bit = (pos << 1) % 32;
+
+ priv->r->stp_get(priv, msti, port_state);
+
+ mutex_unlock(&priv->reg_mutex);
+
+ return (port_state[index] >> bit) & 3;
+}
+
+static struct table_reg rtl838x_tbl_regs[] = {
+ TBL_DESC(0x6900, 0x6908, 3, 15, 13, 1), // RTL8380_TBL_L2
+ TBL_DESC(0x6914, 0x6918, 18, 14, 12, 1), // RTL8380_TBL_0
+ TBL_DESC(0xA4C8, 0xA4CC, 6, 14, 12, 1), // RTL8380_TBL_1
+
+ TBL_DESC(0x1180, 0x1184, 3, 16, 14, 0), // RTL8390_TBL_L2
+ TBL_DESC(0x1190, 0x1194, 17, 15, 12, 0), // RTL8390_TBL_0
+ TBL_DESC(0x6B80, 0x6B84, 4, 14, 12, 0), // RTL8390_TBL_1
+ TBL_DESC(0x611C, 0x6120, 9, 8, 6, 0), // RTL8390_TBL_2
+
+ TBL_DESC(0xB320, 0xB334, 3, 18, 16, 0), // RTL9300_TBL_L2
+ TBL_DESC(0xB340, 0xB344, 19, 16, 12, 0), // RTL9300_TBL_0
+ TBL_DESC(0xB3A0, 0xB3A4, 20, 16, 13, 0), // RTL9300_TBL_1
+ TBL_DESC(0xCE04, 0xCE08, 6, 14, 12, 0), // RTL9300_TBL_2
+ TBL_DESC(0xD600, 0xD604, 30, 7, 6, 0), // RTL9300_TBL_HSB
+ TBL_DESC(0x7880, 0x7884, 22, 9, 8, 0), // RTL9300_TBL_HSA
+
+ TBL_DESC(0x8500, 0x8508, 8, 19, 15, 0), // RTL9310_TBL_0
+ TBL_DESC(0x40C0, 0x40C4, 22, 16, 14, 0), // RTL9310_TBL_1
+ TBL_DESC(0x8528, 0x852C, 6, 18, 14, 0), // RTL9310_TBL_2
+ TBL_DESC(0x0200, 0x0204, 9, 15, 12, 0), // RTL9310_TBL_3
+ TBL_DESC(0x20dc, 0x20e0, 29, 7, 6, 0), // RTL9310_TBL_4
+ TBL_DESC(0x7e1c, 0x7e20, 53, 8, 6, 0), // RTL9310_TBL_5
+};
+
+void rtl_table_init(void)
+{
+ int i;
+
+ for (i = 0; i < RTL_TBL_END; i++)
+ mutex_init(&rtl838x_tbl_regs[i].lock);
+}
+
+/*
+ * Request access to table t in table access register r
+ * Returns a handle to a lock for that table
+ */
+struct table_reg *rtl_table_get(rtl838x_tbl_reg_t r, int t)
+{
+ if (r >= RTL_TBL_END)
+ return NULL;
+
+ if (t >= BIT(rtl838x_tbl_regs[r].c_bit-rtl838x_tbl_regs[r].t_bit))
+ return NULL;
+
+ mutex_lock(&rtl838x_tbl_regs[r].lock);
+ rtl838x_tbl_regs[r].tbl = t;
+
+ return &rtl838x_tbl_regs[r];
+}
+
+/*
+ * Release a table r, unlock the corresponding lock
+ */
+void rtl_table_release(struct table_reg *r)
+{
+ if (!r)
+ return;
+
+// pr_info("Unlocking %08x\n", (u32)r);
+ mutex_unlock(&r->lock);
+// pr_info("Unlock done\n");
+}
+
+/*
+ * Reads table index idx into the data registers of the table
+ */
+void rtl_table_read(struct table_reg *r, int idx)
+{
+ u32 cmd = r->rmode ? BIT(r->c_bit) : 0;
+
+ cmd |= BIT(r->c_bit + 1) | (r->tbl << r->t_bit) | (idx & (BIT(r->t_bit) - 1));
+ sw_w32(cmd, r->addr);
+ do { } while (sw_r32(r->addr) & BIT(r->c_bit + 1));
+}
+
+/*
+ * Writes the content of the table data registers into the table at index idx
+ */
+void rtl_table_write(struct table_reg *r, int idx)
+{
+ u32 cmd = r->rmode ? 0 : BIT(r->c_bit);
+
+ cmd |= BIT(r->c_bit + 1) | (r->tbl << r->t_bit) | (idx & (BIT(r->t_bit) - 1));
+ sw_w32(cmd, r->addr);
+ do { } while (sw_r32(r->addr) & BIT(r->c_bit + 1));
+}
+
+/*
+ * Returns the address of the ith data register of table register r
+ * the address is relative to the beginning of the Switch-IO block at 0xbb000000
+ */
+inline u16 rtl_table_data(struct table_reg *r, int i)
+{
+ if (i >= r->max_data)
+ i = r->max_data - 1;
+ return r->data + i * 4;
+}
+
+inline u32 rtl_table_data_r(struct table_reg *r, int i)
+{
+ return sw_r32(rtl_table_data(r, i));
+}
+
+inline void rtl_table_data_w(struct table_reg *r, u32 v, int i)
+{
+ sw_w32(v, rtl_table_data(r, i));
+}
+
+/* Port register accessor functions for the RTL838x and RTL930X SoCs */
+void rtl838x_mask_port_reg(u64 clear, u64 set, int reg)
+{
+ sw_w32_mask((u32)clear, (u32)set, reg);
+}
+
+void rtl838x_set_port_reg(u64 set, int reg)
+{
+ sw_w32((u32)set, reg);
+}
+
+u64 rtl838x_get_port_reg(int reg)
+{
+ return ((u64) sw_r32(reg));
+}
+
+/* Port register accessor functions for the RTL839x and RTL931X SoCs */
+void rtl839x_mask_port_reg_be(u64 clear, u64 set, int reg)
+{
+ sw_w32_mask((u32)(clear >> 32), (u32)(set >> 32), reg);
+ sw_w32_mask((u32)(clear & 0xffffffff), (u32)(set & 0xffffffff), reg + 4);
+}
+
+u64 rtl839x_get_port_reg_be(int reg)
+{
+ u64 v = sw_r32(reg);
+
+ v <<= 32;
+ v |= sw_r32(reg + 4);
+ return v;
+}
+
+void rtl839x_set_port_reg_be(u64 set, int reg)
+{
+ sw_w32(set >> 32, reg);
+ sw_w32(set & 0xffffffff, reg + 4);
+}
+
+void rtl839x_mask_port_reg_le(u64 clear, u64 set, int reg)
+{
+ sw_w32_mask((u32)clear, (u32)set, reg);
+ sw_w32_mask((u32)(clear >> 32), (u32)(set >> 32), reg + 4);
+}
+
+void rtl839x_set_port_reg_le(u64 set, int reg)
+{
+ sw_w32(set, reg);
+ sw_w32(set >> 32, reg + 4);
+}
+
+u64 rtl839x_get_port_reg_le(int reg)
+{
+ u64 v = sw_r32(reg + 4);
+
+ v <<= 32;
+ v |= sw_r32(reg);
+ return v;
+}
+
+int read_phy(u32 port, u32 page, u32 reg, u32 *val)
+{
+ switch (soc_info.family) {
+ case RTL8380_FAMILY_ID:
+ return rtl838x_read_phy(port, page, reg, val);
+ case RTL8390_FAMILY_ID:
+ return rtl839x_read_phy(port, page, reg, val);
+ case RTL9300_FAMILY_ID:
+ return rtl930x_read_phy(port, page, reg, val);
+ case RTL9310_FAMILY_ID:
+ return rtl931x_read_phy(port, page, reg, val);
+ }
+ return -1;
+}
+
+int write_phy(u32 port, u32 page, u32 reg, u32 val)
+{
+ switch (soc_info.family) {
+ case RTL8380_FAMILY_ID:
+ return rtl838x_write_phy(port, page, reg, val);
+ case RTL8390_FAMILY_ID:
+ return rtl839x_write_phy(port, page, reg, val);
+ case RTL9300_FAMILY_ID:
+ return rtl930x_write_phy(port, page, reg, val);
+ case RTL9310_FAMILY_ID:
+ return rtl931x_write_phy(port, page, reg, val);
+ }
+ return -1;
+}
+
+static int __init rtl83xx_mdio_probe(struct rtl838x_switch_priv *priv)
+{
+ struct device *dev = priv->dev;
+ struct device_node *dn, *phy_node, *mii_np = dev->of_node;
+ struct mii_bus *bus;
+ int ret;
+ u32 pn;
+
+ pr_debug("In %s\n", __func__);
+ mii_np = of_find_compatible_node(NULL, NULL, "realtek,rtl838x-mdio");
+ if (mii_np) {
+ pr_debug("Found compatible MDIO node!\n");
+ } else {
+ dev_err(priv->dev, "no %s child node found", "mdio-bus");
+ return -ENODEV;
+ }
+
+ priv->mii_bus = of_mdio_find_bus(mii_np);
+ if (!priv->mii_bus) {
+ pr_debug("Deferring probe of mdio bus\n");
+ return -EPROBE_DEFER;
+ }
+ if (!of_device_is_available(mii_np))
+ ret = -ENODEV;
+
+ bus = devm_mdiobus_alloc(priv->ds->dev);
+ if (!bus)
+ return -ENOMEM;
+
+ bus->name = "rtl838x slave mii";
+
+ /*
+ * Since the NIC driver is loaded first, we can use the mdio rw functions
+ * assigned there.
+ */
+ bus->read = priv->mii_bus->read;
+ bus->write = priv->mii_bus->write;
+ bus->read_paged = priv->mii_bus->read_paged;
+ bus->write_paged = priv->mii_bus->write_paged;
+ snprintf(bus->id, MII_BUS_ID_SIZE, "%s-%d", bus->name, dev->id);
+
+ bus->parent = dev;
+ priv->ds->slave_mii_bus = bus;
+ priv->ds->slave_mii_bus->priv = priv->mii_bus->priv;
+ priv->ds->slave_mii_bus->access_capabilities = priv->mii_bus->access_capabilities;
+
+ ret = mdiobus_register(priv->ds->slave_mii_bus);
+ if (ret && mii_np) {
+ of_node_put(dn);
+ return ret;
+ }
+
+ dn = of_find_compatible_node(NULL, NULL, "realtek,rtl83xx-switch");
+ if (!dn) {
+ dev_err(priv->dev, "No RTL switch node in DTS\n");
+ return -ENODEV;
+ }
+
+ for_each_node_by_name(dn, "port") {
+ phy_interface_t interface;
+ u32 led_set;
+
+ if (!of_device_is_available(dn))
+ continue;
+
+ if (of_property_read_u32(dn, "reg", &pn))
+ continue;
+
+ pr_info("%s found port %d\n", __func__, pn);
+ phy_node = of_parse_phandle(dn, "phy-handle", 0);
+ if (!phy_node) {
+ if (pn != priv->cpu_port)
+ dev_err(priv->dev, "Port node %d misses phy-handle\n", pn);
+ continue;
+ }
+
+ pr_info("%s port %d has phandle\n", __func__, pn);
+ if (of_property_read_u32(phy_node, "sds", &priv->ports[pn].sds_num))
+ priv->ports[pn].sds_num = -1;
+ else {
+ pr_info("%s sds port %d is %d\n", __func__, pn,
+ priv->ports[pn].sds_num);
+ }
+ pr_info("%s port %d has SDS\n", __func__, priv->ports[pn].sds_num);
+
+ if (of_get_phy_mode(dn, &interface))
+ interface = PHY_INTERFACE_MODE_NA;
+ if (interface == PHY_INTERFACE_MODE_HSGMII)
+ priv->ports[pn].is2G5 = true;
+ if (interface == PHY_INTERFACE_MODE_USXGMII)
+ priv->ports[pn].is2G5 = priv->ports[pn].is10G = true;
+ if (interface == PHY_INTERFACE_MODE_10GBASER)
+ priv->ports[pn].is10G = true;
+
+ if (of_property_read_u32(dn, "led-set", &led_set))
+ led_set = 0;
+ priv->ports[pn].led_set = led_set;
+
+ // Check for the integrated SerDes of the RTL8380M first
+ if (of_property_read_bool(phy_node, "phy-is-integrated")
+ && priv->id == 0x8380 && pn >= 24) {
+ pr_debug("----> FĂ“UND A SERDES\n");
+ priv->ports[pn].phy = PHY_RTL838X_SDS;
+ continue;
+ }
+
+ if (priv->id >= 0x9300) {
+ priv->ports[pn].phy_is_integrated = false;
+ if (of_property_read_bool(phy_node, "phy-is-integrated")) {
+ priv->ports[pn].phy_is_integrated = true;
+ priv->ports[pn].phy = PHY_RTL930X_SDS;
+ }
+ } else {
+ if (of_property_read_bool(phy_node, "phy-is-integrated")
+ && !of_property_read_bool(phy_node, "sfp")) {
+ priv->ports[pn].phy = PHY_RTL8218B_INT;
+ continue;
+ }
+ }
+
+ if (!of_property_read_bool(phy_node, "phy-is-integrated")
+ && of_property_read_bool(phy_node, "sfp")) {
+ priv->ports[pn].phy = PHY_RTL8214FC;
+ continue;
+ }
+
+ if (!of_property_read_bool(phy_node, "phy-is-integrated")
+ && !of_property_read_bool(phy_node, "sfp")) {
+ priv->ports[pn].phy = PHY_RTL8218B_EXT;
+ continue;
+ }
+ }
+
+ /* Disable MAC polling the PHY so that we can start configuration */
+ priv->r->set_port_reg_le(0ULL, priv->r->smi_poll_ctrl);
+
+ /* Enable PHY control via SoC */
+ if (priv->family_id == RTL8380_FAMILY_ID) {
+ /* Enable SerDes NWAY and PHY control via SoC */
+ sw_w32_mask(BIT(7), BIT(15), RTL838X_SMI_GLB_CTRL);
+ } else if (priv->family_id == RTL8390_FAMILY_ID) {
+ /* Disable PHY polling via SoC */
+ sw_w32_mask(BIT(7), 0, RTL839X_SMI_GLB_CTRL);
+ }
+
+ /* Power on fibre ports and reset them if necessary */
+ if (priv->ports[24].phy == PHY_RTL838X_SDS) {
+ pr_debug("Powering on fibre ports & reset\n");
+ rtl8380_sds_power(24, 1);
+ rtl8380_sds_power(26, 1);
+ }
+
+ pr_debug("%s done\n", __func__);
+ return 0;
+}
+
+static int __init rtl83xx_get_l2aging(struct rtl838x_switch_priv *priv)
+{
+ int t = sw_r32(priv->r->l2_ctrl_1);
+
+ t &= priv->family_id == RTL8380_FAMILY_ID ? 0x7fffff : 0x1FFFFF;
+
+ if (priv->family_id == RTL8380_FAMILY_ID)
+ t = t * 128 / 625; /* Aging time in seconds. 0: L2 aging disabled */
+ else
+ t = (t * 3) / 5;
+
+ pr_debug("L2 AGING time: %d sec\n", t);
+ pr_debug("Dynamic aging for ports: %x\n", sw_r32(priv->r->l2_port_aging_out));
+ return t;
+}
+
+/* Caller must hold priv->reg_mutex */
+int rtl83xx_lag_add(struct dsa_switch *ds, int group, int port, struct netdev_lag_upper_info *info)
+{
+ struct rtl838x_switch_priv *priv = ds->priv;
+ int i;
+ u32 algomsk = 0;
+ u32 algoidx = 0;
+
+ if (info->tx_type != NETDEV_LAG_TX_TYPE_HASH) {
+ pr_err("%s: Only mode LACP 802.3ad (4) allowed.\n", __func__);
+ return -EINVAL;
+ }
+
+ if (group >= priv->n_lags) {
+ pr_err("%s: LAG %d invalid.\n", __func__, group);
+ return -EINVAL;
+ }
+
+ if (port >= priv->cpu_port) {
+ pr_err("%s: Port %d invalid.\n", __func__, port);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < priv->n_lags; i++) {
+ if (priv->lags_port_members[i] & BIT_ULL(port))
+ break;
+ }
+ if (i != priv->n_lags) {
+ pr_err("%s: Port %d already member of LAG %d.\n", __func__, port, i);
+ return -ENOSPC;
+ }
+ switch(info->hash_type) {
+ case NETDEV_LAG_HASH_L2:
+ algomsk |= TRUNK_DISTRIBUTION_ALGO_DMAC_BIT;
+ algomsk |= TRUNK_DISTRIBUTION_ALGO_SMAC_BIT;
+ break;
+ case NETDEV_LAG_HASH_L23:
+ algomsk |= TRUNK_DISTRIBUTION_ALGO_DMAC_BIT;
+ algomsk |= TRUNK_DISTRIBUTION_ALGO_SMAC_BIT;
+ algomsk |= TRUNK_DISTRIBUTION_ALGO_SIP_BIT; //source ip
+ algomsk |= TRUNK_DISTRIBUTION_ALGO_DIP_BIT; //dest ip
+ algoidx = 1;
+ break;
+ case NETDEV_LAG_HASH_L34:
+ algomsk |= TRUNK_DISTRIBUTION_ALGO_SRC_L4PORT_BIT; //sport
+ algomsk |= TRUNK_DISTRIBUTION_ALGO_DST_L4PORT_BIT; //dport
+ algomsk |= TRUNK_DISTRIBUTION_ALGO_SIP_BIT; //source ip
+ algomsk |= TRUNK_DISTRIBUTION_ALGO_DIP_BIT; //dest ip
+ algoidx = 2;
+ break;
+ default:
+ algomsk |= 0x7f;
+ }
+ priv->r->set_distribution_algorithm(group, algoidx, algomsk);
+ priv->r->mask_port_reg_be(0, BIT_ULL(port), priv->r->trk_mbr_ctr(group));
+ priv->lags_port_members[group] |= BIT_ULL(port);
+
+ pr_info("%s: Added port %d to LAG %d. Members now %016llx.\n",
+ __func__, port, group, priv->lags_port_members[group]);
+ return 0;
+}
+
+/* Caller must hold priv->reg_mutex */
+int rtl83xx_lag_del(struct dsa_switch *ds, int group, int port)
+{
+ struct rtl838x_switch_priv *priv = ds->priv;
+
+ if (group >= priv->n_lags) {
+ pr_err("%s: LAG %d invalid.\n", __func__, group);
+ return -EINVAL;
+ }
+
+ if (port >= priv->cpu_port) {
+ pr_err("%s: Port %d invalid.\n", __func__, port);
+ return -EINVAL;
+ }
+
+ if (!(priv->lags_port_members[group] & BIT_ULL(port))) {
+ pr_err("%s: Port %d not member of LAG %d.\n", __func__, port, group);
+ return -ENOSPC;
+ }
+
+ // 0x7f algo mask all
+ priv->r->mask_port_reg_be(BIT_ULL(port), 0, priv->r->trk_mbr_ctr(group));
+ priv->lags_port_members[group] &= ~BIT_ULL(port);
+
+ pr_info("%s: Removed port %d from LAG %d. Members now %016llx.\n",
+ __func__, port, group, priv->lags_port_members[group]);
+ return 0;
+}
+
+/*
+ * Allocate a 64 bit octet counter located in the LOG HW table
+ */
+static int rtl83xx_octet_cntr_alloc(struct rtl838x_switch_priv *priv)
+{
+ int idx;
+
+ mutex_lock(&priv->reg_mutex);
+
+ idx = find_first_zero_bit(priv->octet_cntr_use_bm, MAX_COUNTERS);
+ if (idx >= priv->n_counters) {
+ mutex_unlock(&priv->reg_mutex);
+ return -1;
+ }
+
+ set_bit(idx, priv->octet_cntr_use_bm);
+ mutex_unlock(&priv->reg_mutex);
+
+ return idx;
+}
+
+/*
+ * Allocate a 32-bit packet counter
+ * 2 32-bit packet counters share the location of a 64-bit octet counter
+ * Initially there are no free packet counters and 2 new ones need to be freed
+ * by allocating the corresponding octet counter
+ */
+int rtl83xx_packet_cntr_alloc(struct rtl838x_switch_priv *priv)
+{
+ int idx, j;
+
+ mutex_lock(&priv->reg_mutex);
+
+ /* Because initially no packet counters are free, the logic is reversed:
+ * a 0-bit means the counter is already allocated (for octets)
+ */
+ idx = find_first_bit(priv->packet_cntr_use_bm, MAX_COUNTERS * 2);
+ if (idx >= priv->n_counters * 2) {
+ j = find_first_zero_bit(priv->octet_cntr_use_bm, MAX_COUNTERS);
+ if (j >= priv->n_counters) {
+ mutex_unlock(&priv->reg_mutex);
+ return -1;
+ }
+ set_bit(j, priv->octet_cntr_use_bm);
+ idx = j * 2;
+ set_bit(j * 2 + 1, priv->packet_cntr_use_bm);
+
+ } else {
+ clear_bit(idx, priv->packet_cntr_use_bm);
+ }
+
+ mutex_unlock(&priv->reg_mutex);
+
+ return idx;
+}
+
+/*
+ * Add an L2 nexthop entry for the L3 routing system / PIE forwarding in the SoC
+ * Use VID and MAC in rtl838x_l2_entry to identify either a free slot in the L2 hash table
+ * or mark an existing entry as a nexthop by setting it's nexthop bit
+ * Called from the L3 layer
+ * The index in the L2 hash table is filled into nh->l2_id;
+ */
+int rtl83xx_l2_nexthop_add(struct rtl838x_switch_priv *priv, struct rtl83xx_nexthop *nh)
+{
+ struct rtl838x_l2_entry e;
+ u64 seed = priv->r->l2_hash_seed(nh->mac, nh->rvid);
+ u32 key = priv->r->l2_hash_key(priv, seed);
+ int i, idx = -1;
+ u64 entry;
+
+ pr_debug("%s searching for %08llx vid %d with key %d, seed: %016llx\n",
+ __func__, nh->mac, nh->rvid, key, seed);
+
+ e.type = L2_UNICAST;
+ u64_to_ether_addr(nh->mac, &e.mac[0]);
+ e.port = nh->port;
+
+ // Loop over all entries in the hash-bucket and over the second block on 93xx SoCs
+ for (i = 0; i < priv->l2_bucket_size; i++) {
+ entry = priv->r->read_l2_entry_using_hash(key, i, &e);
+
+ if (!e.valid || ((entry & 0x0fffffffffffffffULL) == seed)) {
+ idx = i > 3 ? ((key >> 14) & 0xffff) | i >> 1
+ : ((key << 2) | i) & 0xffff;
+ break;
+ }
+ }
+
+ if (idx < 0) {
+ pr_err("%s: No more L2 forwarding entries available\n", __func__);
+ return -1;
+ }
+
+ // Found an existing (e->valid is true) or empty entry, make it a nexthop entry
+ nh->l2_id = idx;
+ if (e.valid) {
+ nh->port = e.port;
+ nh->vid = e.vid; // Save VID
+ nh->rvid = e.rvid;
+ nh->dev_id = e.stack_dev;
+ // If the entry is already a valid next hop entry, don't change it
+ if (e.next_hop)
+ return 0;
+ } else {
+ e.valid = true;
+ e.is_static = true;
+ e.rvid = nh->rvid;
+ e.is_ip_mc = false;
+ e.is_ipv6_mc = false;
+ e.block_da = false;
+ e.block_sa = false;
+ e.suspended = false;
+ e.age = 0; // With port-ignore
+ e.port = priv->port_ignore;
+ u64_to_ether_addr(nh->mac, &e.mac[0]);
+ }
+ e.next_hop = true;
+ e.nh_route_id = nh->id; // NH route ID takes place of VID
+ e.nh_vlan_target = false;
+
+ priv->r->write_l2_entry_using_hash(idx >> 2, idx & 0x3, &e);
+
+ return 0;
+}
+
+/*
+ * Removes a Layer 2 next hop entry in the forwarding database
+ * If it was static, the entire entry is removed, otherwise the nexthop bit is cleared
+ * and we wait until the entry ages out
+ */
+int rtl83xx_l2_nexthop_rm(struct rtl838x_switch_priv *priv, struct rtl83xx_nexthop *nh)
+{
+ struct rtl838x_l2_entry e;
+ u32 key = nh->l2_id >> 2;
+ int i = nh->l2_id & 0x3;
+ u64 entry = entry = priv->r->read_l2_entry_using_hash(key, i, &e);
+
+ pr_debug("%s: id %d, key %d, index %d\n", __func__, nh->l2_id, key, i);
+ if (!e.valid) {
+ dev_err(priv->dev, "unknown nexthop, id %x\n", nh->l2_id);
+ return -1;
+ }
+
+ if (e.is_static)
+ e.valid = false;
+ e.next_hop = false;
+ e.vid = nh->vid; // Restore VID
+ e.rvid = nh->rvid;
+
+ priv->r->write_l2_entry_using_hash(key, i, &e);
+
+ return 0;
+}
+
+static int rtl83xx_handle_changeupper(struct rtl838x_switch_priv *priv,
+ struct net_device *ndev,
+ struct netdev_notifier_changeupper_info *info)
+{
+ struct net_device *upper = info->upper_dev;
+ struct netdev_lag_upper_info *lag_upper_info = NULL;
+ int i, j, err;
+
+ if (!netif_is_lag_master(upper))
+ return 0;
+
+ mutex_lock(&priv->reg_mutex);
+
+ for (i = 0; i < priv->n_lags; i++) {
+ if ((!priv->lag_devs[i]) || (priv->lag_devs[i] == upper))
+ break;
+ }
+ for (j = 0; j < priv->cpu_port; j++) {
+ if (priv->ports[j].dp->slave == ndev)
+ break;
+ }
+ if (j >= priv->cpu_port) {
+ err = -EINVAL;
+ goto out;
+ }
+
+ if (info->linking) {
+ lag_upper_info = info->upper_info;
+ if (!priv->lag_devs[i])
+ priv->lag_devs[i] = upper;
+ err = rtl83xx_lag_add(priv->ds, i, priv->ports[j].dp->index, lag_upper_info);
+ if (err) {
+ err = -EINVAL;
+ goto out;
+ }
+ } else {
+ if (!priv->lag_devs[i])
+ err = -EINVAL;
+ err = rtl83xx_lag_del(priv->ds, i, priv->ports[j].dp->index);
+ if (err) {
+ err = -EINVAL;
+ goto out;
+ }
+ if (!priv->lags_port_members[i])
+ priv->lag_devs[i] = NULL;
+ }
+
+out:
+ mutex_unlock(&priv->reg_mutex);
+ return 0;
+}
+
+/*
+ * Is the lower network device a DSA slave network device of our RTL930X-switch?
+ * Unfortunately we cannot just follow dev->dsa_prt as this is only set for the
+ * DSA master device.
+ */
+int rtl83xx_port_is_under(const struct net_device * dev, struct rtl838x_switch_priv *priv)
+{
+ int i;
+
+// TODO: On 5.12:
+// if(!dsa_slave_dev_check(dev)) {
+// netdev_info(dev, "%s: not a DSA device.\n", __func__);
+// return -EINVAL;
+// }
+
+ for (i = 0; i < priv->cpu_port; i++) {
+ if (!priv->ports[i].dp)
+ continue;
+ if (priv->ports[i].dp->slave == dev)
+ return i;
+ }
+ return -EINVAL;
+}
+
+static int rtl83xx_netdevice_event(struct notifier_block *this,
+ unsigned long event, void *ptr)
+{
+ struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
+ struct rtl838x_switch_priv *priv;
+ int err;
+
+ pr_debug("In: %s, event: %lu\n", __func__, event);
+
+ if ((event != NETDEV_CHANGEUPPER) && (event != NETDEV_CHANGELOWERSTATE))
+ return NOTIFY_DONE;
+
+ priv = container_of(this, struct rtl838x_switch_priv, nb);
+ switch (event) {
+ case NETDEV_CHANGEUPPER:
+ err = rtl83xx_handle_changeupper(priv, ndev, ptr);
+ break;
+ }
+
+ if (err)
+ return err;
+
+ return NOTIFY_DONE;
+}
+
+const static struct rhashtable_params route_ht_params = {
+ .key_len = sizeof(u32),
+ .key_offset = offsetof(struct rtl83xx_route, gw_ip),
+ .head_offset = offsetof(struct rtl83xx_route, linkage),
+};
+
+/*
+ * Updates an L3 next hop entry in the ROUTING table
+ */
+static int rtl83xx_l3_nexthop_update(struct rtl838x_switch_priv *priv, __be32 ip_addr, u64 mac)
+{
+ struct rtl83xx_route *r;
+ struct rhlist_head *tmp, *list;
+
+ rcu_read_lock();
+ list = rhltable_lookup(&priv->routes, &ip_addr, route_ht_params);
+ if (!list) {
+ rcu_read_unlock();
+ return -ENOENT;
+ }
+
+ rhl_for_each_entry_rcu(r, tmp, list, linkage) {
+ pr_info("%s: Setting up fwding: ip %pI4, GW mac %016llx\n",
+ __func__, &ip_addr, mac);
+
+ // Reads the ROUTING table entry associated with the route
+ priv->r->route_read(r->id, r);
+ pr_info("Route with id %d to %pI4 / %d\n", r->id, &r->dst_ip, r->prefix_len);
+
+ r->nh.mac = r->nh.gw = mac;
+ r->nh.port = priv->port_ignore;
+ r->nh.id = r->id;
+
+ // Do we need to explicitly add a DMAC entry with the route's nh index?
+ if (priv->r->set_l3_egress_mac)
+ priv->r->set_l3_egress_mac(r->id, mac);
+
+ // Update ROUTING table: map gateway-mac and switch-mac id to route id
+ rtl83xx_l2_nexthop_add(priv, &r->nh);
+
+ r->attr.valid = true;
+ r->attr.action = ROUTE_ACT_FORWARD;
+ r->attr.type = 0;
+ r->attr.hit = false; // Reset route-used indicator
+
+ // Add PIE entry with dst_ip and prefix_len
+ r->pr.dip = r->dst_ip;
+ r->pr.dip_m = inet_make_mask(r->prefix_len);
+
+ if (r->is_host_route) {
+ int slot = priv->r->find_l3_slot(r, false);
+
+ pr_info("%s: Got slot for route: %d\n", __func__, slot);
+ priv->r->host_route_write(slot, r);
+ } else {
+ priv->r->route_write(r->id, r);
+ r->pr.fwd_sel = true;
+ r->pr.fwd_data = r->nh.l2_id;
+ r->pr.fwd_act = PIE_ACT_ROUTE_UC;
+ }
+
+ if (priv->r->set_l3_nexthop)
+ priv->r->set_l3_nexthop(r->nh.id, r->nh.l2_id, r->nh.if_id);
+
+ if (r->pr.id < 0) {
+ r->pr.packet_cntr = rtl83xx_packet_cntr_alloc(priv);
+ if (r->pr.packet_cntr >= 0) {
+ pr_info("Using packet counter %d\n", r->pr.packet_cntr);
+ r->pr.log_sel = true;
+ r->pr.log_data = r->pr.packet_cntr;
+ }
+ priv->r->pie_rule_add(priv, &r->pr);
+ } else {
+ int pkts = priv->r->packet_cntr_read(r->pr.packet_cntr);
+ pr_info("%s: total packets: %d\n", __func__, pkts);
+
+ priv->r->pie_rule_write(priv, r->pr.id, &r->pr);
+ }
+ }
+ rcu_read_unlock();
+ return 0;
+}
+
+static int rtl83xx_port_ipv4_resolve(struct rtl838x_switch_priv *priv,
+ struct net_device *dev, __be32 ip_addr)
+{
+ struct neighbour *n = neigh_lookup(&arp_tbl, &ip_addr, dev);
+ int err = 0;
+ u64 mac;
+
+ if (!n) {
+ n = neigh_create(&arp_tbl, &ip_addr, dev);
+ if (IS_ERR(n))
+ return PTR_ERR(n);
+ }
+
+ /* If the neigh is already resolved, then go ahead and
+ * install the entry, otherwise start the ARP process to
+ * resolve the neigh.
+ */
+ if (n->nud_state & NUD_VALID) {
+ mac = ether_addr_to_u64(n->ha);
+ pr_info("%s: resolved mac: %016llx\n", __func__, mac);
+ rtl83xx_l3_nexthop_update(priv, ip_addr, mac);
+ } else {
+ pr_info("%s: need to wait\n", __func__);
+ neigh_event_send(n, NULL);
+ }
+
+ neigh_release(n);
+ return err;
+}
+
+struct rtl83xx_walk_data {
+ struct rtl838x_switch_priv *priv;
+ int port;
+};
+
+static int rtl83xx_port_lower_walk(struct net_device *lower, struct netdev_nested_priv *_priv)
+{
+ struct rtl83xx_walk_data *data = (struct rtl83xx_walk_data *)_priv->data;
+ struct rtl838x_switch_priv *priv = data->priv;
+ int ret = 0;
+ int index;
+
+ index = rtl83xx_port_is_under(lower, priv);
+ data->port = index;
+ if (index >= 0) {
+ pr_debug("Found DSA-port, index %d\n", index);
+ ret = 1;
+ }
+
+ return ret;
+}
+
+int rtl83xx_port_dev_lower_find(struct net_device *dev, struct rtl838x_switch_priv *priv)
+{
+ struct rtl83xx_walk_data data;
+ struct netdev_nested_priv _priv;
+
+ data.priv = priv;
+ data.port = 0;
+ _priv.data = (void *)&data;
+
+ netdev_walk_all_lower_dev(dev, rtl83xx_port_lower_walk, &_priv);
+
+ return data.port;
+}
+
+static struct rtl83xx_route *rtl83xx_route_alloc(struct rtl838x_switch_priv *priv, u32 ip)
+{
+ struct rtl83xx_route *r;
+ int idx = 0, err;
+
+ mutex_lock(&priv->reg_mutex);
+
+ idx = find_first_zero_bit(priv->route_use_bm, MAX_ROUTES);
+ pr_debug("%s id: %d, ip %pI4\n", __func__, idx, &ip);
+
+ r = kzalloc(sizeof(*r), GFP_KERNEL);
+ if (!r) {
+ mutex_unlock(&priv->reg_mutex);
+ return r;
+ }
+
+ r->id = idx;
+ r->gw_ip = ip;
+ r->pr.id = -1; // We still need to allocate a rule in HW
+ r->is_host_route = false;
+
+ err = rhltable_insert(&priv->routes, &r->linkage, route_ht_params);
+ if (err) {
+ pr_err("Could not insert new rule\n");
+ mutex_unlock(&priv->reg_mutex);
+ goto out_free;
+ }
+
+ set_bit(idx, priv->route_use_bm);
+
+ mutex_unlock(&priv->reg_mutex);
+
+ return r;
+
+out_free:
+ kfree(r);
+ return NULL;
+}
+
+
+static struct rtl83xx_route *rtl83xx_host_route_alloc(struct rtl838x_switch_priv *priv, u32 ip)
+{
+ struct rtl83xx_route *r;
+ int idx = 0, err;
+
+ mutex_lock(&priv->reg_mutex);
+
+ idx = find_first_zero_bit(priv->host_route_use_bm, MAX_HOST_ROUTES);
+ pr_debug("%s id: %d, ip %pI4\n", __func__, idx, &ip);
+
+ r = kzalloc(sizeof(*r), GFP_KERNEL);
+ if (!r) {
+ mutex_unlock(&priv->reg_mutex);
+ return r;
+ }
+
+ /* We require a unique route ID irrespective of whether it is a prefix or host
+ * route (on RTL93xx) as we use this ID to associate a DMAC and next-hop entry */
+ r->id = idx + MAX_ROUTES;
+
+ r->gw_ip = ip;
+ r->pr.id = -1; // We still need to allocate a rule in HW
+ r->is_host_route = true;
+
+ err = rhltable_insert(&priv->routes, &r->linkage, route_ht_params);
+ if (err) {
+ pr_err("Could not insert new rule\n");
+ mutex_unlock(&priv->reg_mutex);
+ goto out_free;
+ }
+
+ set_bit(idx, priv->host_route_use_bm);
+
+ mutex_unlock(&priv->reg_mutex);
+
+ return r;
+
+out_free:
+ kfree(r);
+ return NULL;
+}
+
+
+
+static void rtl83xx_route_rm(struct rtl838x_switch_priv *priv, struct rtl83xx_route *r)
+{
+ int id;
+
+ if (rhltable_remove(&priv->routes, &r->linkage, route_ht_params))
+ dev_warn(priv->dev, "Could not remove route\n");
+
+ if (r->is_host_route) {
+ id = priv->r->find_l3_slot(r, false);
+ pr_debug("%s: Got id for host route: %d\n", __func__, id);
+ r->attr.valid = false;
+ priv->r->host_route_write(id, r);
+ clear_bit(r->id - MAX_ROUTES, priv->host_route_use_bm);
+ } else {
+ // If there is a HW representation of the route, delete it
+ if (priv->r->route_lookup_hw) {
+ id = priv->r->route_lookup_hw(r);
+ pr_info("%s: Got id for prefix route: %d\n", __func__, id);
+ r->attr.valid = false;
+ priv->r->route_write(id, r);
+ }
+ clear_bit(r->id, priv->route_use_bm);
+ }
+
+ kfree(r);
+}
+
+static int rtl83xx_fib4_del(struct rtl838x_switch_priv *priv,
+ struct fib_entry_notifier_info *info)
+{
+ struct fib_nh *nh = fib_info_nh(info->fi, 0);
+ struct rtl83xx_route *r;
+ struct rhlist_head *tmp, *list;
+
+ pr_debug("In %s, ip %pI4, len %d\n", __func__, &info->dst, info->dst_len);
+ rcu_read_lock();
+ list = rhltable_lookup(&priv->routes, &nh->fib_nh_gw4, route_ht_params);
+ if (!list) {
+ rcu_read_unlock();
+ pr_err("%s: no such gateway: %pI4\n", __func__, &nh->fib_nh_gw4);
+ return -ENOENT;
+ }
+ rhl_for_each_entry_rcu(r, tmp, list, linkage) {
+ if (r->dst_ip == info->dst && r->prefix_len == info->dst_len) {
+ pr_info("%s: found a route with id %d, nh-id %d\n",
+ __func__, r->id, r->nh.id);
+ break;
+ }
+ }
+ rcu_read_unlock();
+
+ rtl83xx_l2_nexthop_rm(priv, &r->nh);
+
+ pr_debug("%s: Releasing packet counter %d\n", __func__, r->pr.packet_cntr);
+ set_bit(r->pr.packet_cntr, priv->packet_cntr_use_bm);
+ priv->r->pie_rule_rm(priv, &r->pr);
+
+ rtl83xx_route_rm(priv, r);
+
+ nh->fib_nh_flags &= ~RTNH_F_OFFLOAD;
+
+ return 0;
+}
+
+/*
+ * On the RTL93xx, an L3 termination endpoint MAC address on which the router waits
+ * for packets to be routed needs to be allocated.
+ */
+static int rtl83xx_alloc_router_mac(struct rtl838x_switch_priv *priv, u64 mac)
+{
+ int i, free_mac = -1;
+ struct rtl93xx_rt_mac m;
+
+ mutex_lock(&priv->reg_mutex);
+ for (i = 0; i < MAX_ROUTER_MACS; i++) {
+ priv->r->get_l3_router_mac(i, &m);
+ if (free_mac < 0 && !m.valid) {
+ free_mac = i;
+ continue;
+ }
+ if (m.valid && m.mac == mac) {
+ free_mac = i;
+ break;
+ }
+ }
+
+ if (free_mac < 0) {
+ pr_err("No free router MACs, cannot offload\n");
+ mutex_unlock(&priv->reg_mutex);
+ return -1;
+ }
+
+ m.valid = true;
+ m.mac = mac;
+ m.p_type = 0; // An individual port, not a trunk port
+ m.p_id = 0x3f; // Listen on any port
+ m.p_id_mask = 0;
+ m.vid = 0; // Listen on any VLAN...
+ m.vid_mask = 0; // ... so mask needs to be 0
+ m.mac_mask = 0xffffffffffffULL; // We want an exact match of the interface MAC
+ m.action = L3_FORWARD; // Route the packet
+ priv->r->set_l3_router_mac(free_mac, &m);
+
+ mutex_unlock(&priv->reg_mutex);
+
+ return 0;
+}
+
+static int rtl83xx_alloc_egress_intf(struct rtl838x_switch_priv *priv, u64 mac, int vlan)
+{
+ int i, free_mac = -1;
+ struct rtl838x_l3_intf intf;
+ u64 m;
+
+ mutex_lock(&priv->reg_mutex);
+ for (i = 0; i < MAX_SMACS; i++) {
+ m = priv->r->get_l3_egress_mac(L3_EGRESS_DMACS + i);
+ if (free_mac < 0 && !m) {
+ free_mac = i;
+ continue;
+ }
+ if (m == mac) {
+ mutex_unlock(&priv->reg_mutex);
+ return i;
+ }
+ }
+
+ if (free_mac < 0) {
+ pr_err("No free egress interface, cannot offload\n");
+ return -1;
+ }
+
+ // Set up default egress interface 1
+ intf.vid = vlan;
+ intf.smac_idx = free_mac;
+ intf.ip4_mtu_id = 1;
+ intf.ip6_mtu_id = 1;
+ intf.ttl_scope = 1; // TTL
+ intf.hl_scope = 1; // Hop Limit
+ intf.ip4_icmp_redirect = intf.ip6_icmp_redirect = 2; // FORWARD
+ intf.ip4_pbr_icmp_redirect = intf.ip6_pbr_icmp_redirect = 2; // FORWARD;
+ priv->r->set_l3_egress_intf(free_mac, &intf);
+
+ priv->r->set_l3_egress_mac(L3_EGRESS_DMACS + free_mac, mac);
+
+ mutex_unlock(&priv->reg_mutex);
+
+ return free_mac;
+}
+
+static int rtl83xx_fib4_add(struct rtl838x_switch_priv *priv,
+ struct fib_entry_notifier_info *info)
+{
+ struct fib_nh *nh = fib_info_nh(info->fi, 0);
+ struct net_device *dev = fib_info_nh(info->fi, 0)->fib_nh_dev;
+ int port;
+ struct rtl83xx_route *r;
+ bool to_localhost;
+ int vlan = is_vlan_dev(dev) ? vlan_dev_vlan_id(dev) : 0;
+
+ pr_debug("In %s, ip %pI4, len %d\n", __func__, &info->dst, info->dst_len);
+ if (!info->dst) {
+ pr_info("Not offloading default route for now\n");
+ return 0;
+ }
+
+ pr_debug("GW: %pI4, interface name %s, mac %016llx, vlan %d\n", &nh->fib_nh_gw4, dev->name,
+ ether_addr_to_u64(dev->dev_addr), vlan
+ );
+
+ port = rtl83xx_port_dev_lower_find(dev, priv);
+ if (port < 0)
+ return -1;
+
+ // For now we only work with routes that have a gateway and are not ourself
+// if ((!nh->fib_nh_gw4) && (info->dst_len != 32))
+// return 0;
+
+ if ((info->dst & 0xff) == 0xff)
+ return 0;
+
+ // Do not offload routes to 192.168.100.x
+ if ((info->dst & 0xffffff00) == 0xc0a86400)
+ return 0;
+
+ // Do not offload routes to 127.x.x.x
+ if ((info->dst & 0xff000000) == 0x7f000000)
+ return 0;
+
+ // Allocate route or host-route (entry if hardware supports this)
+ if (info->dst_len == 32 && priv->r->host_route_write)
+ r = rtl83xx_host_route_alloc(priv, nh->fib_nh_gw4);
+ else
+ r = rtl83xx_route_alloc(priv, nh->fib_nh_gw4);
+
+ if (!r) {
+ pr_err("%s: No more free route entries\n", __func__);
+ return -1;
+ }
+
+ r->dst_ip = info->dst;
+ r->prefix_len = info->dst_len;
+ r->nh.rvid = vlan;
+ to_localhost = !nh->fib_nh_gw4;
+
+ if (priv->r->set_l3_router_mac) {
+ u64 mac = ether_addr_to_u64(dev->dev_addr);
+
+ pr_debug("Local route and router mac %016llx\n", mac);
+
+ if (rtl83xx_alloc_router_mac(priv, mac))
+ goto out_free_rt;
+
+ // vid = 0: Do not care about VID
+ r->nh.if_id = rtl83xx_alloc_egress_intf(priv, mac, vlan);
+ if (r->nh.if_id < 0)
+ goto out_free_rmac;
+
+ if (to_localhost) {
+ int slot;
+
+ r->nh.mac = mac;
+ r->nh.port = priv->port_ignore;
+ r->attr.valid = true;
+ r->attr.action = ROUTE_ACT_TRAP2CPU;
+ r->attr.type = 0;
+
+ slot = priv->r->find_l3_slot(r, false);
+ pr_debug("%s: Got slot for route: %d\n", __func__, slot);
+ priv->r->host_route_write(slot, r);
+ }
+ }
+
+ // We need to resolve the mac address of the GW
+ if (!to_localhost)
+ rtl83xx_port_ipv4_resolve(priv, dev, nh->fib_nh_gw4);
+
+ nh->fib_nh_flags |= RTNH_F_OFFLOAD;
+
+ return 0;
+
+out_free_rmac:
+out_free_rt:
+ return 0;
+}
+
+static int rtl83xx_fib6_add(struct rtl838x_switch_priv *priv,
+ struct fib6_entry_notifier_info *info)
+{
+ pr_debug("In %s\n", __func__);
+// nh->fib_nh_flags |= RTNH_F_OFFLOAD;
+ return 0;
+}
+
+struct net_event_work {
+ struct work_struct work;
+ struct rtl838x_switch_priv *priv;
+ u64 mac;
+ u32 gw_addr;
+};
+
+static void rtl83xx_net_event_work_do(struct work_struct *work)
+{
+ struct net_event_work *net_work =
+ container_of(work, struct net_event_work, work);
+ struct rtl838x_switch_priv *priv = net_work->priv;
+
+ rtl83xx_l3_nexthop_update(priv, net_work->gw_addr, net_work->mac);
+}
+
+static int rtl83xx_netevent_event(struct notifier_block *this,
+ unsigned long event, void *ptr)
+{
+ struct rtl838x_switch_priv *priv;
+ struct net_device *dev;
+ struct neighbour *n = ptr;
+ int err, port;
+ struct net_event_work *net_work;
+
+ priv = container_of(this, struct rtl838x_switch_priv, ne_nb);
+
+ net_work = kzalloc(sizeof(*net_work), GFP_ATOMIC);
+ if (!net_work)
+ return NOTIFY_BAD;
+
+ INIT_WORK(&net_work->work, rtl83xx_net_event_work_do);
+ net_work->priv = priv;
+
+ switch (event) {
+ case NETEVENT_NEIGH_UPDATE:
+ if (n->tbl != &arp_tbl)
+ return NOTIFY_DONE;
+ dev = n->dev;
+ port = rtl83xx_port_dev_lower_find(dev, priv);
+ if (port < 0 || !(n->nud_state & NUD_VALID)) {
+ pr_debug("%s: Neigbour invalid, not updating\n", __func__);
+ kfree(net_work);
+ return NOTIFY_DONE;
+ }
+
+ net_work->mac = ether_addr_to_u64(n->ha);
+ net_work->gw_addr = *(__be32 *) n->primary_key;
+
+ pr_debug("%s: updating neighbour on port %d, mac %016llx\n",
+ __func__, port, net_work->mac);
+ schedule_work(&net_work->work);
+ if (err)
+ netdev_warn(dev, "failed to handle neigh update (err %d)\n", err);
+ break;
+ }
+
+ return NOTIFY_DONE;
+}
+
+struct rtl83xx_fib_event_work {
+ struct work_struct work;
+ union {
+ struct fib_entry_notifier_info fen_info;
+ struct fib6_entry_notifier_info fen6_info;
+ struct fib_rule_notifier_info fr_info;
+ };
+ struct rtl838x_switch_priv *priv;
+ bool is_fib6;
+ unsigned long event;
+};
+
+static void rtl83xx_fib_event_work_do(struct work_struct *work)
+{
+ struct rtl83xx_fib_event_work *fib_work =
+ container_of(work, struct rtl83xx_fib_event_work, work);
+ struct rtl838x_switch_priv *priv = fib_work->priv;
+ struct fib_rule *rule;
+ int err;
+
+ /* Protect internal structures from changes */
+ rtnl_lock();
+ pr_debug("%s: doing work, event %ld\n", __func__, fib_work->event);
+ switch (fib_work->event) {
+ case FIB_EVENT_ENTRY_ADD:
+ case FIB_EVENT_ENTRY_REPLACE:
+ case FIB_EVENT_ENTRY_APPEND:
+ if (fib_work->is_fib6) {
+ err = rtl83xx_fib6_add(priv, &fib_work->fen6_info);
+ } else {
+ err = rtl83xx_fib4_add(priv, &fib_work->fen_info);
+ fib_info_put(fib_work->fen_info.fi);
+ }
+ if (err)
+ pr_err("%s: FIB4 failed\n", __func__);
+ break;
+ case FIB_EVENT_ENTRY_DEL:
+ rtl83xx_fib4_del(priv, &fib_work->fen_info);
+ fib_info_put(fib_work->fen_info.fi);
+ break;
+ case FIB_EVENT_RULE_ADD:
+ case FIB_EVENT_RULE_DEL:
+ rule = fib_work->fr_info.rule;
+ if (!fib4_rule_default(rule))
+ pr_err("%s: FIB4 default rule failed\n", __func__);
+ fib_rule_put(rule);
+ break;
+ }
+ rtnl_unlock();
+ kfree(fib_work);
+}
+
+/* Called with rcu_read_lock() */
+static int rtl83xx_fib_event(struct notifier_block *this, unsigned long event, void *ptr)
+{
+ struct fib_notifier_info *info = ptr;
+ struct rtl838x_switch_priv *priv;
+ struct rtl83xx_fib_event_work *fib_work;
+
+ if ((info->family != AF_INET && info->family != AF_INET6 &&
+ info->family != RTNL_FAMILY_IPMR &&
+ info->family != RTNL_FAMILY_IP6MR))
+ return NOTIFY_DONE;
+
+ priv = container_of(this, struct rtl838x_switch_priv, fib_nb);
+
+ fib_work = kzalloc(sizeof(*fib_work), GFP_ATOMIC);
+ if (!fib_work)
+ return NOTIFY_BAD;
+
+ INIT_WORK(&fib_work->work, rtl83xx_fib_event_work_do);
+ fib_work->priv = priv;
+ fib_work->event = event;
+ fib_work->is_fib6 = false;
+
+ switch (event) {
+ case FIB_EVENT_ENTRY_ADD:
+ case FIB_EVENT_ENTRY_REPLACE:
+ case FIB_EVENT_ENTRY_APPEND:
+ case FIB_EVENT_ENTRY_DEL:
+ pr_debug("%s: FIB_ENTRY ADD/DEL, event %ld\n", __func__, event);
+ if (info->family == AF_INET) {
+ struct fib_entry_notifier_info *fen_info = ptr;
+
+ if (fen_info->fi->fib_nh_is_v6) {
+ NL_SET_ERR_MSG_MOD(info->extack,
+ "IPv6 gateway with IPv4 route is not supported");
+ kfree(fib_work);
+ return notifier_from_errno(-EINVAL);
+ }
+
+ memcpy(&fib_work->fen_info, ptr, sizeof(fib_work->fen_info));
+ /* Take referece on fib_info to prevent it from being
+ * freed while work is queued. Release it afterwards.
+ */
+ fib_info_hold(fib_work->fen_info.fi);
+
+ } else if (info->family == AF_INET6) {
+ struct fib6_entry_notifier_info *fen6_info = ptr;
+ pr_warn("%s: FIB_RULE ADD/DEL for IPv6 not supported\n", __func__);
+ kfree(fib_work);
+ return NOTIFY_DONE;
+ }
+ break;
+
+ case FIB_EVENT_RULE_ADD:
+ case FIB_EVENT_RULE_DEL:
+ pr_debug("%s: FIB_RULE ADD/DEL, event: %ld\n", __func__, event);
+ memcpy(&fib_work->fr_info, ptr, sizeof(fib_work->fr_info));
+ fib_rule_get(fib_work->fr_info.rule);
+ break;
+ }
+
+ schedule_work(&fib_work->work);
+
+ return NOTIFY_DONE;
+}
+
+static int __init rtl83xx_sw_probe(struct platform_device *pdev)
+{
+ int err = 0, i;
+ struct rtl838x_switch_priv *priv;
+ struct device *dev = &pdev->dev;
+ u64 bpdu_mask;
+
+ pr_debug("Probing RTL838X switch device\n");
+ if (!pdev->dev.of_node) {
+ dev_err(dev, "No DT found\n");
+ return -EINVAL;
+ }
+
+ // Initialize access to RTL switch tables
+ rtl_table_init();
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->ds = devm_kzalloc(dev, sizeof(*priv->ds), GFP_KERNEL);
+
+ if (!priv->ds)
+ return -ENOMEM;
+ priv->ds->dev = dev;
+ priv->ds->priv = priv;
+ priv->ds->ops = &rtl83xx_switch_ops;
+ priv->dev = dev;
+
+ mutex_init(&priv->reg_mutex);
+
+ priv->family_id = soc_info.family;
+ priv->id = soc_info.id;
+ switch(soc_info.family) {
+ case RTL8380_FAMILY_ID:
+ priv->ds->ops = &rtl83xx_switch_ops;
+ priv->cpu_port = RTL838X_CPU_PORT;
+ priv->port_mask = 0x1f;
+ priv->port_width = 1;
+ priv->irq_mask = 0x0FFFFFFF;
+ priv->r = &rtl838x_reg;
+ priv->ds->num_ports = 29;
+ priv->fib_entries = 8192;
+ rtl8380_get_version(priv);
+ priv->n_lags = 8;
+ priv->l2_bucket_size = 4;
+ priv->n_pie_blocks = 12;
+ priv->port_ignore = 0x1f;
+ priv->n_counters = 128;
+ break;
+ case RTL8390_FAMILY_ID:
+ priv->ds->ops = &rtl83xx_switch_ops;
+ priv->cpu_port = RTL839X_CPU_PORT;
+ priv->port_mask = 0x3f;
+ priv->port_width = 2;
+ priv->irq_mask = 0xFFFFFFFFFFFFFULL;
+ priv->r = &rtl839x_reg;
+ priv->ds->num_ports = 53;
+ priv->fib_entries = 16384;
+ rtl8390_get_version(priv);
+ priv->n_lags = 16;
+ priv->l2_bucket_size = 4;
+ priv->n_pie_blocks = 18;
+ priv->port_ignore = 0x3f;
+ priv->n_counters = 1024;
+ break;
+ case RTL9300_FAMILY_ID:
+ priv->ds->ops = &rtl930x_switch_ops;
+ priv->cpu_port = RTL930X_CPU_PORT;
+ priv->port_mask = 0x1f;
+ priv->port_width = 1;
+ priv->irq_mask = 0x0FFFFFFF;
+ priv->r = &rtl930x_reg;
+ priv->ds->num_ports = 29;
+ priv->fib_entries = 16384;
+ priv->version = RTL8390_VERSION_A;
+ priv->n_lags = 16;
+ sw_w32(1, RTL930X_ST_CTRL);
+ priv->l2_bucket_size = 8;
+ priv->n_pie_blocks = 16;
+ priv->port_ignore = 0x3f;
+ priv->n_counters = 2048;
+ break;
+ case RTL9310_FAMILY_ID:
+ priv->ds->ops = &rtl930x_switch_ops;
+ priv->cpu_port = RTL931X_CPU_PORT;
+ priv->port_mask = 0x3f;
+ priv->port_width = 2;
+ priv->irq_mask = 0xFFFFFFFFFFFFFULL;
+ priv->r = &rtl931x_reg;
+ priv->ds->num_ports = 57;
+ priv->fib_entries = 16384;
+ priv->version = RTL8390_VERSION_A;
+ priv->n_lags = 16;
+ priv->l2_bucket_size = 8;
+ break;
+ }
+ pr_debug("Chip version %c\n", priv->version);
+
+ err = rtl83xx_mdio_probe(priv);
+ if (err) {
+ /* Probing fails the 1st time because of missing ethernet driver
+ * initialization. Use this to disable traffic in case the bootloader left if on
+ */
+ return err;
+ }
+ err = dsa_register_switch(priv->ds);
+ if (err) {
+ dev_err(dev, "Error registering switch: %d\n", err);
+ return err;
+ }
+
+ /*
+ * dsa_to_port returns dsa_port from the port list in
+ * dsa_switch_tree, the tree is built when the switch
+ * is registered by dsa_register_switch
+ */
+ for (i = 0; i <= priv->cpu_port; i++)
+ priv->ports[i].dp = dsa_to_port(priv->ds, i);
+
+ /* Enable link and media change interrupts. Are the SERDES masks needed? */
+ sw_w32_mask(0, 3, priv->r->isr_glb_src);
+
+ priv->r->set_port_reg_le(priv->irq_mask, priv->r->isr_port_link_sts_chg);
+ priv->r->set_port_reg_le(priv->irq_mask, priv->r->imr_port_link_sts_chg);
+
+ priv->link_state_irq = platform_get_irq(pdev, 0);
+ pr_info("LINK state irq: %d\n", priv->link_state_irq);
+ switch (priv->family_id) {
+ case RTL8380_FAMILY_ID:
+ err = request_irq(priv->link_state_irq, rtl838x_switch_irq,
+ IRQF_SHARED, "rtl838x-link-state", priv->ds);
+ break;
+ case RTL8390_FAMILY_ID:
+ err = request_irq(priv->link_state_irq, rtl839x_switch_irq,
+ IRQF_SHARED, "rtl839x-link-state", priv->ds);
+ break;
+ case RTL9300_FAMILY_ID:
+ err = request_irq(priv->link_state_irq, rtl930x_switch_irq,
+ IRQF_SHARED, "rtl930x-link-state", priv->ds);
+ break;
+ case RTL9310_FAMILY_ID:
+ err = request_irq(priv->link_state_irq, rtl931x_switch_irq,
+ IRQF_SHARED, "rtl931x-link-state", priv->ds);
+ break;
+ }
+ if (err) {
+ dev_err(dev, "Error setting up switch interrupt.\n");
+ /* Need to free allocated switch here */
+ }
+
+ /* Enable interrupts for switch, on RTL931x, the IRQ is always on globally */
+ if (soc_info.family != RTL9310_FAMILY_ID)
+ sw_w32(0x1, priv->r->imr_glb);
+
+ rtl83xx_get_l2aging(priv);
+
+ rtl83xx_setup_qos(priv);
+
+ priv->r->l3_setup(priv);
+
+ /* Clear all destination ports for mirror groups */
+ for (i = 0; i < 4; i++)
+ priv->mirror_group_ports[i] = -1;
+
+ /*
+ * Register netdevice event callback to catch changes in link aggregation groups
+ */
+ priv->nb.notifier_call = rtl83xx_netdevice_event;
+ if (register_netdevice_notifier(&priv->nb)) {
+ priv->nb.notifier_call = NULL;
+ dev_err(dev, "Failed to register LAG netdev notifier\n");
+ goto err_register_nb;
+ }
+
+ // Initialize hash table for L3 routing
+ rhltable_init(&priv->routes, &route_ht_params);
+
+ /*
+ * Register netevent notifier callback to catch notifications about neighboring
+ * changes to update nexthop entries for L3 routing.
+ */
+ priv->ne_nb.notifier_call = rtl83xx_netevent_event;
+ if (register_netevent_notifier(&priv->ne_nb)) {
+ priv->ne_nb.notifier_call = NULL;
+ dev_err(dev, "Failed to register netevent notifier\n");
+ goto err_register_ne_nb;
+ }
+
+ priv->fib_nb.notifier_call = rtl83xx_fib_event;
+
+ /*
+ * Register Forwarding Information Base notifier to offload routes where
+ * where possible
+ * Only FIBs pointing to our own netdevs are programmed into
+ * the device, so no need to pass a callback.
+ */
+ err = register_fib_notifier(&init_net, &priv->fib_nb, NULL, NULL);
+ if (err)
+ goto err_register_fib_nb;
+
+ // TODO: put this into l2_setup()
+ // Flood BPDUs to all ports including cpu-port
+ if (soc_info.family != RTL9300_FAMILY_ID) {
+ bpdu_mask = soc_info.family == RTL8380_FAMILY_ID ? 0x1FFFFFFF : 0x1FFFFFFFFFFFFF;
+ priv->r->set_port_reg_be(bpdu_mask, priv->r->rma_bpdu_fld_pmask);
+
+ // TRAP 802.1X frames (EAPOL) to the CPU-Port, bypass STP and VLANs
+ sw_w32(7, priv->r->spcl_trap_eapol_ctrl);
+
+ rtl838x_dbgfs_init(priv);
+ } else {
+ rtl930x_dbgfs_init(priv);
+ }
+
+ return 0;
+
+err_register_fib_nb:
+ unregister_netevent_notifier(&priv->ne_nb);
+err_register_ne_nb:
+ unregister_netdevice_notifier(&priv->nb);
+err_register_nb:
+ return err;
+}
+
+static int rtl83xx_sw_remove(struct platform_device *pdev)
+{
+ // TODO:
+ pr_debug("Removing platform driver for rtl83xx-sw\n");
+ return 0;
+}
+
+static const struct of_device_id rtl83xx_switch_of_ids[] = {
+ { .compatible = "realtek,rtl83xx-switch"},
+ { /* sentinel */ }
+};
+
+
+MODULE_DEVICE_TABLE(of, rtl83xx_switch_of_ids);
+
+static struct platform_driver rtl83xx_switch_driver = {
+ .probe = rtl83xx_sw_probe,
+ .remove = rtl83xx_sw_remove,
+ .driver = {
+ .name = "rtl83xx-switch",
+ .pm = NULL,
+ .of_match_table = rtl83xx_switch_of_ids,
+ },
+};
+
+module_platform_driver(rtl83xx_switch_driver);
+
+MODULE_AUTHOR("B. Koblitz");
+MODULE_DESCRIPTION("RTL83XX SoC Switch Driver");
+MODULE_LICENSE("GPL");
diff --git a/target/linux/realtek/files-5.15/drivers/net/dsa/rtl83xx/debugfs.c b/target/linux/realtek/files-5.15/drivers/net/dsa/rtl83xx/debugfs.c
new file mode 100644
index 0000000000..9a7c7714c6
--- /dev/null
+++ b/target/linux/realtek/files-5.15/drivers/net/dsa/rtl83xx/debugfs.c
@@ -0,0 +1,727 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <linux/debugfs.h>
+#include <linux/kernel.h>
+
+#include <asm/mach-rtl838x/mach-rtl83xx.h>
+#include "rtl83xx.h"
+
+#define RTL838X_DRIVER_NAME "rtl838x"
+
+#define RTL8380_LED_GLB_CTRL (0xA000)
+#define RTL8380_LED_MODE_SEL (0x1004)
+#define RTL8380_LED_MODE_CTRL (0xA004)
+#define RTL8380_LED_P_EN_CTRL (0xA008)
+#define RTL8380_LED_SW_CTRL (0xA00C)
+#define RTL8380_LED0_SW_P_EN_CTRL (0xA010)
+#define RTL8380_LED1_SW_P_EN_CTRL (0xA014)
+#define RTL8380_LED2_SW_P_EN_CTRL (0xA018)
+#define RTL8380_LED_SW_P_CTRL(p) (0xA01C + (((p) << 2)))
+
+#define RTL8390_LED_GLB_CTRL (0x00E4)
+#define RTL8390_LED_SET_2_3_CTRL (0x00E8)
+#define RTL8390_LED_SET_0_1_CTRL (0x00EC)
+#define RTL8390_LED_COPR_SET_SEL_CTRL(p) (0x00F0 + (((p >> 4) << 2)))
+#define RTL8390_LED_FIB_SET_SEL_CTRL(p) (0x0100 + (((p >> 4) << 2)))
+#define RTL8390_LED_COPR_PMASK_CTRL(p) (0x0110 + (((p >> 5) << 2)))
+#define RTL8390_LED_FIB_PMASK_CTRL(p) (0x00118 + (((p >> 5) << 2)))
+#define RTL8390_LED_COMBO_CTRL(p) (0x0120 + (((p >> 5) << 2)))
+#define RTL8390_LED_SW_CTRL (0x0128)
+#define RTL8390_LED_SW_P_EN_CTRL(p) (0x012C + (((p / 10) << 2)))
+#define RTL8390_LED_SW_P_CTRL(p) (0x0144 + (((p) << 2)))
+
+#define RTL838X_MIR_QID_CTRL(grp) (0xAD44 + (((grp) << 2)))
+#define RTL838X_MIR_RSPAN_VLAN_CTRL(grp) (0xA340 + (((grp) << 2)))
+#define RTL838X_MIR_RSPAN_VLAN_CTRL_MAC(grp) (0xAA70 + (((grp) << 2)))
+#define RTL838X_MIR_RSPAN_TX_CTRL (0xA350)
+#define RTL838X_MIR_RSPAN_TX_TAG_RM_CTRL (0xAA80)
+#define RTL838X_MIR_RSPAN_TX_TAG_EN_CTRL (0xAA84)
+#define RTL839X_MIR_RSPAN_VLAN_CTRL(grp) (0xA340 + (((grp) << 2)))
+#define RTL839X_MIR_RSPAN_TX_CTRL (0x69b0)
+#define RTL839X_MIR_RSPAN_TX_TAG_RM_CTRL (0x2550)
+#define RTL839X_MIR_RSPAN_TX_TAG_EN_CTRL (0x2554)
+#define RTL839X_MIR_SAMPLE_RATE_CTRL (0x2558)
+
+#define RTL838X_STAT_PRVTE_DROP_COUNTERS (0x6A00)
+#define RTL839X_STAT_PRVTE_DROP_COUNTERS (0x3E00)
+#define RTL930X_STAT_PRVTE_DROP_COUNTERS (0xB5B8)
+#define RTL931X_STAT_PRVTE_DROP_COUNTERS (0xd800)
+
+int rtl83xx_port_get_stp_state(struct rtl838x_switch_priv *priv, int port);
+void rtl83xx_port_stp_state_set(struct dsa_switch *ds, int port, u8 state);
+void rtl83xx_fast_age(struct dsa_switch *ds, int port);
+u32 rtl838x_get_egress_rate(struct rtl838x_switch_priv *priv, int port);
+u32 rtl839x_get_egress_rate(struct rtl838x_switch_priv *priv, int port);
+int rtl838x_set_egress_rate(struct rtl838x_switch_priv *priv, int port, u32 rate);
+int rtl839x_set_egress_rate(struct rtl838x_switch_priv *priv, int port, u32 rate);
+
+
+const char *rtl838x_drop_cntr[] = {
+ "ALE_TX_GOOD_PKTS", "MAC_RX_DROP", "ACL_FWD_DROP", "HW_ATTACK_PREVENTION_DROP",
+ "RMA_DROP", "VLAN_IGR_FLTR_DROP", "INNER_OUTER_CFI_EQUAL_1_DROP", "PORT_MOVE_DROP",
+ "NEW_SA_DROP", "MAC_LIMIT_SYS_DROP", "MAC_LIMIT_VLAN_DROP", "MAC_LIMIT_PORT_DROP",
+ "SWITCH_MAC_DROP", "ROUTING_EXCEPTION_DROP", "DA_LKMISS_DROP", "RSPAN_DROP",
+ "ACL_LKMISS_DROP", "ACL_DROP", "INBW_DROP", "IGR_METER_DROP",
+ "ACCEPT_FRAME_TYPE_DROP", "STP_IGR_DROP", "INVALID_SA_DROP", "SA_BLOCKING_DROP",
+ "DA_BLOCKING_DROP", "L2_INVALID_DPM_DROP", "MCST_INVALID_DPM_DROP", "RX_FLOW_CONTROL_DROP",
+ "STORM_SPPRS_DROP", "LALS_DROP", "VLAN_EGR_FILTER_DROP", "STP_EGR_DROP",
+ "SRC_PORT_FILTER_DROP", "PORT_ISOLATION_DROP", "ACL_FLTR_DROP", "MIRROR_FLTR_DROP",
+ "TX_MAX_DROP", "LINK_DOWN_DROP", "FLOW_CONTROL_DROP", "BRIDGE .1d discards"
+};
+
+const char *rtl839x_drop_cntr[] = {
+ "ALE_TX_GOOD_PKTS", "ERROR_PKTS", "EGR_ACL_DROP", "EGR_METER_DROP",
+ "OAM", "CFM" "VLAN_IGR_FLTR", "VLAN_ERR",
+ "INNER_OUTER_CFI_EQUAL_1", "VLAN_TAG_FORMAT", "SRC_PORT_SPENDING_TREE", "INBW",
+ "RMA", "HW_ATTACK_PREVENTION", "PROTO_STORM", "MCAST_SA",
+ "IGR_ACL_DROP", "IGR_METER_DROP", "DFLT_ACTION_FOR_MISS_ACL_AND_C2SC", "NEW_SA",
+ "PORT_MOVE", "SA_BLOCKING", "ROUTING_EXCEPTION", "SRC_PORT_SPENDING_TREE_NON_FWDING",
+ "MAC_LIMIT", "UNKNOW_STORM", "MISS_DROP", "CPU_MAC_DROP",
+ "DA_BLOCKING", "SRC_PORT_FILTER_BEFORE_EGR_ACL", "VLAN_EGR_FILTER", "SPANNING_TRE",
+ "PORT_ISOLATION", "OAM_EGRESS_DROP", "MIRROR_ISOLATION", "MAX_LEN_BEFORE_EGR_ACL",
+ "SRC_PORT_FILTER_BEFORE_MIRROR", "MAX_LEN_BEFORE_MIRROR", "SPECIAL_CONGEST_BEFORE_MIRROR",
+ "LINK_STATUS_BEFORE_MIRROR",
+ "WRED_BEFORE_MIRROR", "MAX_LEN_AFTER_MIRROR", "SPECIAL_CONGEST_AFTER_MIRROR",
+ "LINK_STATUS_AFTER_MIRROR",
+ "WRED_AFTER_MIRROR"
+};
+
+const char *rtl930x_drop_cntr[] = {
+ "OAM_PARSER", "UC_RPF", "DEI_CFI", "MAC_IP_SUBNET_BASED_VLAN", "VLAN_IGR_FILTER",
+ "L2_UC_MC", "IPV_IP6_MC_BRIDGE", "PTP", "USER_DEF_0_3", "RESERVED",
+ "RESERVED1", "RESERVED2", "BPDU_RMA", "LACP", "LLDP",
+ "EAPOL", "XX_RMA", "L3_IPUC_NON_IP", "IP4_IP6_HEADER_ERROR", "L3_BAD_IP",
+ "L3_DIP_DMAC_MISMATCH", "IP4_IP_OPTION", "IP_UC_MC_ROUTING_LOOK_UP_MISS", "L3_DST_NULL_INTF",
+ "L3_PBR_NULL_INTF",
+ "HOST_NULL_INTF", "ROUTE_NULL_INTF", "BRIDGING_ACTION", "ROUTING_ACTION", "IPMC_RPF",
+ "L2_NEXTHOP_AGE_OUT", "L3_UC_TTL_FAIL", "L3_MC_TTL_FAIL", "L3_UC_MTU_FAIL", "L3_MC_MTU_FAIL",
+ "L3_UC_ICMP_REDIR", "IP6_MLD_OTHER_ACT", "ND", "IP_MC_RESERVED", "IP6_HBH",
+ "INVALID_SA", "L2_HASH_FULL", "NEW_SA", "PORT_MOVE_FORBID", "STATIC_PORT_MOVING",
+ "DYNMIC_PORT_MOVING", "L3_CRC", "MAC_LIMIT", "ATTACK_PREVENT", "ACL_FWD_ACTION",
+ "OAMPDU", "OAM_MUX", "TRUNK_FILTER", "ACL_DROP", "IGR_BW",
+ "ACL_METER", "VLAN_ACCEPT_FRAME_TYPE", "MSTP_SRC_DROP_DISABLED_BLOCKING", "SA_BLOCK", "DA_BLOCK",
+ "STORM_CONTROL", "VLAN_EGR_FILTER", "MSTP_DESTINATION_DROP", "SRC_PORT_FILTER", "PORT_ISOLATION",
+ "TX_MAX_FRAME_SIZE", "EGR_LINK_STATUS", "MAC_TX_DISABLE", "MAC_PAUSE_FRAME", "MAC_RX_DROP",
+ "MIRROR_ISOLATE", "RX_FC", "EGR_QUEUE", "HSM_RUNOUT", "ROUTING_DISABLE", "INVALID_L2_NEXTHOP_ENTRY",
+ "L3_MC_SRC_FLT", "CPUTAG_FLT", "FWD_PMSK_NULL", "IPUC_ROUTING_LOOKUP_MISS", "MY_DEV_DROP",
+ "STACK_NONUC_BLOCKING_PMSK", "STACK_PORT_NOT_FOUND", "ACL_LOOPBACK_DROP", "IP6_ROUTING_EXT_HEADER"
+};
+
+const char *rtl931x_drop_cntr[] = {
+ "ALE_RX_GOOD_PKTS", "RX_MAX_FRAME_SIZE", "MAC_RX_DROP", "OPENFLOW_IP_MPLS_TTL", "OPENFLOW_TBL_MISS",
+ "IGR_BW", "SPECIAL_CONGEST", "EGR_QUEUE", "RESERVED", "EGR_LINK_STATUS", "STACK_UCAST_NONUCAST_TTL", // 10
+ "STACK_NONUC_BLOCKING_PMSK", "L2_CRC", "SRC_PORT_FILTER", "PARSER_PACKET_TOO_LONG", "PARSER_MALFORM_PACKET",
+ "MPLS_OVER_2_LBL", "EACL_METER", "IACL_METER", "PROTO_STORM", "INVALID_CAPWAP_HEADER", // 20
+ "MAC_IP_SUBNET_BASED_VLAN", "OAM_PARSER", "UC_MC_RPF", "IP_MAC_BINDING_MATCH_MISMATCH", "SA_BLOCK",
+ "TUNNEL_IP_ADDRESS_CHECK", "EACL_DROP", "IACL_DROP", "ATTACK_PREVENT", "SYSTEM_PORT_LIMIT_LEARN", // 30,
+ "OAMPDU", "CCM_RX", "CFM_UNKNOWN_TYPE", "LBM_LBR_LTM_LTR", "Y_1731", "VLAN_LIMIT_LEARN",
+ "VLAN_ACCEPT_FRAME_TYPE", "CFI_1", "STATIC_DYNAMIC_PORT_MOVING", "PORT_MOVE_FORBID", // 40
+ "L3_CRC", "BPDU_PTP_LLDP_EAPOL_RMA", "MSTP_SRC_DROP_DISABLED_BLOCKING", "INVALID_SA", "NEW_SA",
+ "VLAN_IGR_FILTER", "IGR_VLAN_CONVERT", "GRATUITOUS_ARP", "MSTP_SRC_DROP", "L2_HASH_FULL", // 50
+ "MPLS_UNKNOWN_LBL", "L3_IPUC_NON_IP", "TTL", "MTU", "ICMP_REDIRECT", "STORM_CONTROL", "L3_DIP_DMAC_MISMATCH",
+ "IP4_IP_OPTION", "IP6_HBH_EXT_HEADER", "IP4_IP6_HEADER_ERROR", // 60
+ "ROUTING_IP_ADDR_CHECK", "ROUTING_EXCEPTION", "DA_BLOCK", "OAM_MUX", "PORT_ISOLATION", "VLAN_EGR_FILTER",
+ "MIRROR_ISOLATE", "MSTP_DESTINATION_DROP", "L2_MC_BRIDGE", "IP_UC_MC_ROUTING_LOOK_UP_MISS", // 70
+ "L2_UC", "L2_MC", "IP4_MC", "IP6_MC", "L3_UC_MC_ROUTE", "UNKNOWN_L2_UC_FLPM", "BC_FLPM",
+ "VLAN_PRO_UNKNOWN_L2_MC_FLPM", "VLAN_PRO_UNKNOWN_IP4_MC_FLPM", "VLAN_PROFILE_UNKNOWN_IP6_MC_FLPM" // 80,
+};
+
+static ssize_t rtl838x_common_read(char __user *buffer, size_t count,
+ loff_t *ppos, unsigned int value)
+{
+ char *buf;
+ ssize_t len;
+
+ if (*ppos != 0)
+ return 0;
+
+ buf = kasprintf(GFP_KERNEL, "0x%08x\n", value);
+ if (!buf)
+ return -ENOMEM;
+
+ if (count < strlen(buf)) {
+ kfree(buf);
+ return -ENOSPC;
+ }
+
+ len = simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
+ kfree(buf);
+
+ return len;
+}
+
+static ssize_t rtl838x_common_write(const char __user *buffer, size_t count,
+ loff_t *ppos, unsigned int *value)
+{
+ char b[32];
+ ssize_t len;
+ int ret;
+
+ if (*ppos != 0)
+ return -EINVAL;
+
+ if (count >= sizeof(b))
+ return -ENOSPC;
+
+ len = simple_write_to_buffer(b, sizeof(b) - 1, ppos,
+ buffer, count);
+ if (len < 0)
+ return len;
+
+ b[len] = '\0';
+ ret = kstrtouint(b, 16, value);
+ if (ret)
+ return -EIO;
+
+ return len;
+}
+
+static ssize_t stp_state_read(struct file *filp, char __user *buffer, size_t count,
+ loff_t *ppos)
+{
+ struct rtl838x_port *p = filp->private_data;
+ struct dsa_switch *ds = p->dp->ds;
+ int value = rtl83xx_port_get_stp_state(ds->priv, p->dp->index);
+
+ if (value < 0)
+ return -EINVAL;
+
+ return rtl838x_common_read(buffer, count, ppos, (u32)value);
+}
+
+static ssize_t stp_state_write(struct file *filp, const char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ struct rtl838x_port *p = filp->private_data;
+ u32 value;
+ size_t res = rtl838x_common_write(buffer, count, ppos, &value);
+ if (res < 0)
+ return res;
+
+ rtl83xx_port_stp_state_set(p->dp->ds, p->dp->index, (u8)value);
+
+ return res;
+}
+
+static const struct file_operations stp_state_fops = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .read = stp_state_read,
+ .write = stp_state_write,
+};
+
+static ssize_t drop_counter_read(struct file *filp, char __user *buffer, size_t count,
+ loff_t *ppos)
+{
+ struct rtl838x_switch_priv *priv = filp->private_data;
+ int i;
+ const char **d;
+ u32 v;
+ char *buf;
+ int n = 0, len, offset;
+ int num;
+
+ switch (priv->family_id) {
+ case RTL8380_FAMILY_ID:
+ d = rtl838x_drop_cntr;
+ offset = RTL838X_STAT_PRVTE_DROP_COUNTERS;
+ num = 40;
+ break;
+ case RTL8390_FAMILY_ID:
+ d = rtl839x_drop_cntr;
+ offset = RTL839X_STAT_PRVTE_DROP_COUNTERS;
+ num = 45;
+ break;
+ case RTL9300_FAMILY_ID:
+ d = rtl930x_drop_cntr;
+ offset = RTL930X_STAT_PRVTE_DROP_COUNTERS;
+ num = 85;
+ break;
+ case RTL9310_FAMILY_ID:
+ d = rtl931x_drop_cntr;
+ offset = RTL931X_STAT_PRVTE_DROP_COUNTERS;
+ num = 81;
+ break;
+ }
+
+ buf = kmalloc(30 * num, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ for (i = 0; i < num; i++) {
+ v = sw_r32(offset + (i << 2)) & 0xffff;
+ n += sprintf(buf + n, "%s: %d\n", d[i], v);
+ }
+
+ if (count < strlen(buf)) {
+ kfree(buf);
+ return -ENOSPC;
+ }
+
+ len = simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
+ kfree(buf);
+
+ return len;
+}
+
+static const struct file_operations drop_counter_fops = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .read = drop_counter_read,
+};
+
+static void l2_table_print_entry(struct seq_file *m, struct rtl838x_switch_priv *priv,
+ struct rtl838x_l2_entry *e)
+{
+ u64 portmask;
+ int i;
+
+ if (e->type == L2_UNICAST) {
+ seq_puts(m, "L2_UNICAST\n");
+
+ seq_printf(m, " mac %02x:%02x:%02x:%02x:%02x:%02x vid %u rvid %u\n",
+ e->mac[0], e->mac[1], e->mac[2], e->mac[3], e->mac[4], e->mac[5],
+ e->vid, e->rvid);
+
+ seq_printf(m, " port %d age %d", e->port, e->age);
+ if (e->is_static)
+ seq_puts(m, " static");
+ if (e->block_da)
+ seq_puts(m, " block_da");
+ if (e->block_sa)
+ seq_puts(m, " block_sa");
+ if (e->suspended)
+ seq_puts(m, " suspended");
+ if (e->next_hop)
+ seq_printf(m, " next_hop route_id %u", e->nh_route_id);
+ seq_puts(m, "\n");
+
+ } else {
+ if (e->type == L2_MULTICAST) {
+ seq_puts(m, "L2_MULTICAST\n");
+
+ seq_printf(m, " mac %02x:%02x:%02x:%02x:%02x:%02x vid %u rvid %u\n",
+ e->mac[0], e->mac[1], e->mac[2], e->mac[3], e->mac[4], e->mac[5],
+ e->vid, e->rvid);
+ }
+
+ if (e->type == IP4_MULTICAST || e->type == IP6_MULTICAST) {
+ seq_puts(m, (e->type == IP4_MULTICAST) ?
+ "IP4_MULTICAST\n" : "IP6_MULTICAST\n");
+
+ seq_printf(m, " gip %08x sip %08x vid %u rvid %u\n",
+ e->mc_gip, e->mc_sip, e->vid, e->rvid);
+ }
+
+ portmask = priv->r->read_mcast_pmask(e->mc_portmask_index);
+ seq_printf(m, " index %u ports", e->mc_portmask_index);
+ for (i = 0; i < 64; i++) {
+ if (portmask & BIT_ULL(i))
+ seq_printf(m, " %d", i);
+ }
+ seq_puts(m, "\n");
+ }
+
+ seq_puts(m, "\n");
+}
+
+static int l2_table_show(struct seq_file *m, void *v)
+{
+ struct rtl838x_switch_priv *priv = m->private;
+ struct rtl838x_l2_entry e;
+ int i, bucket, index;
+
+ mutex_lock(&priv->reg_mutex);
+
+ for (i = 0; i < priv->fib_entries; i++) {
+ bucket = i >> 2;
+ index = i & 0x3;
+ priv->r->read_l2_entry_using_hash(bucket, index, &e);
+
+ if (!e.valid)
+ continue;
+
+ seq_printf(m, "Hash table bucket %d index %d ", bucket, index);
+ l2_table_print_entry(m, priv, &e);
+ }
+
+ for (i = 0; i < 64; i++) {
+ priv->r->read_cam(i, &e);
+
+ if (!e.valid)
+ continue;
+
+ seq_printf(m, "CAM index %d ", i);
+ l2_table_print_entry(m, priv, &e);
+ }
+
+ mutex_unlock(&priv->reg_mutex);
+
+ return 0;
+}
+
+static int l2_table_open(struct inode *inode, struct file *filp)
+{
+ return single_open(filp, l2_table_show, inode->i_private);
+}
+
+static const struct file_operations l2_table_fops = {
+ .owner = THIS_MODULE,
+ .open = l2_table_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static ssize_t age_out_read(struct file *filp, char __user *buffer, size_t count,
+ loff_t *ppos)
+{
+ struct rtl838x_port *p = filp->private_data;
+ struct dsa_switch *ds = p->dp->ds;
+ struct rtl838x_switch_priv *priv = ds->priv;
+ int value = sw_r32(priv->r->l2_port_aging_out);
+
+ if (value < 0)
+ return -EINVAL;
+
+ return rtl838x_common_read(buffer, count, ppos, (u32)value);
+}
+
+static ssize_t age_out_write(struct file *filp, const char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ struct rtl838x_port *p = filp->private_data;
+ u32 value;
+ size_t res = rtl838x_common_write(buffer, count, ppos, &value);
+ if (res < 0)
+ return res;
+
+ rtl83xx_fast_age(p->dp->ds, p->dp->index);
+
+ return res;
+}
+
+static const struct file_operations age_out_fops = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .read = age_out_read,
+ .write = age_out_write,
+};
+
+static ssize_t port_egress_rate_read(struct file *filp, char __user *buffer, size_t count,
+ loff_t *ppos)
+{
+ struct rtl838x_port *p = filp->private_data;
+ struct dsa_switch *ds = p->dp->ds;
+ struct rtl838x_switch_priv *priv = ds->priv;
+ int value;
+ if (priv->family_id == RTL8380_FAMILY_ID)
+ value = rtl838x_get_egress_rate(priv, p->dp->index);
+ else
+ value = rtl839x_get_egress_rate(priv, p->dp->index);
+
+ if (value < 0)
+ return -EINVAL;
+
+ return rtl838x_common_read(buffer, count, ppos, (u32)value);
+}
+
+static ssize_t port_egress_rate_write(struct file *filp, const char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ struct rtl838x_port *p = filp->private_data;
+ struct dsa_switch *ds = p->dp->ds;
+ struct rtl838x_switch_priv *priv = ds->priv;
+ u32 value;
+ size_t res = rtl838x_common_write(buffer, count, ppos, &value);
+ if (res < 0)
+ return res;
+
+ if (priv->family_id == RTL8380_FAMILY_ID)
+ rtl838x_set_egress_rate(priv, p->dp->index, value);
+ else
+ rtl839x_set_egress_rate(priv, p->dp->index, value);
+
+ return res;
+}
+
+static const struct file_operations port_egress_fops = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .read = port_egress_rate_read,
+ .write = port_egress_rate_write,
+};
+
+
+static const struct debugfs_reg32 port_ctrl_regs[] = {
+ { .name = "port_isolation", .offset = RTL838X_PORT_ISO_CTRL(0), },
+ { .name = "mac_force_mode", .offset = RTL838X_MAC_FORCE_MODE_CTRL, },
+};
+
+void rtl838x_dbgfs_cleanup(struct rtl838x_switch_priv *priv)
+{
+ debugfs_remove_recursive(priv->dbgfs_dir);
+
+// kfree(priv->dbgfs_entries);
+}
+
+static int rtl838x_dbgfs_port_init(struct dentry *parent, struct rtl838x_switch_priv *priv,
+ int port)
+{
+ struct dentry *port_dir;
+ struct debugfs_regset32 *port_ctrl_regset;
+
+ port_dir = debugfs_create_dir(priv->ports[port].dp->name, parent);
+
+ if (priv->family_id == RTL8380_FAMILY_ID) {
+ debugfs_create_x32("storm_rate_uc", 0644, port_dir,
+ (u32 *)(RTL838X_SW_BASE + RTL838X_STORM_CTRL_PORT_UC(port)));
+
+ debugfs_create_x32("storm_rate_mc", 0644, port_dir,
+ (u32 *)(RTL838X_SW_BASE + RTL838X_STORM_CTRL_PORT_MC(port)));
+
+ debugfs_create_x32("storm_rate_bc", 0644, port_dir,
+ (u32 *)(RTL838X_SW_BASE + RTL838X_STORM_CTRL_PORT_BC(port)));
+ } else {
+ debugfs_create_x32("storm_rate_uc", 0644, port_dir,
+ (u32 *)(RTL838X_SW_BASE + RTL839X_STORM_CTRL_PORT_UC_0(port)));
+
+ debugfs_create_x32("storm_rate_mc", 0644, port_dir,
+ (u32 *)(RTL838X_SW_BASE + RTL839X_STORM_CTRL_PORT_MC_0(port)));
+
+ debugfs_create_x32("storm_rate_bc", 0644, port_dir,
+ (u32 *)(RTL838X_SW_BASE + RTL839X_STORM_CTRL_PORT_BC_0(port)));
+ }
+
+ debugfs_create_u32("id", 0444, port_dir, (u32 *)&priv->ports[port].dp->index);
+
+ port_ctrl_regset = devm_kzalloc(priv->dev, sizeof(*port_ctrl_regset), GFP_KERNEL);
+ if (!port_ctrl_regset)
+ return -ENOMEM;
+
+ port_ctrl_regset->regs = port_ctrl_regs;
+ port_ctrl_regset->nregs = ARRAY_SIZE(port_ctrl_regs);
+ port_ctrl_regset->base = (void *)(RTL838X_SW_BASE + (port << 2));
+ debugfs_create_regset32("port_ctrl", 0400, port_dir, port_ctrl_regset);
+
+ debugfs_create_file("stp_state", 0600, port_dir, &priv->ports[port], &stp_state_fops);
+ debugfs_create_file("age_out", 0600, port_dir, &priv->ports[port], &age_out_fops);
+ debugfs_create_file("port_egress_rate", 0600, port_dir, &priv->ports[port],
+ &port_egress_fops);
+ return 0;
+}
+
+static int rtl838x_dbgfs_leds(struct dentry *parent, struct rtl838x_switch_priv *priv)
+{
+ struct dentry *led_dir;
+ int p;
+ char led_sw_p_ctrl_name[20];
+ char port_led_name[20];
+
+ led_dir = debugfs_create_dir("led", parent);
+
+ if (priv->family_id == RTL8380_FAMILY_ID) {
+ debugfs_create_x32("led_glb_ctrl", 0644, led_dir,
+ (u32 *)(RTL838X_SW_BASE + RTL8380_LED_GLB_CTRL));
+ debugfs_create_x32("led_mode_sel", 0644, led_dir,
+ (u32 *)(RTL838X_SW_BASE + RTL8380_LED_MODE_SEL));
+ debugfs_create_x32("led_mode_ctrl", 0644, led_dir,
+ (u32 *)(RTL838X_SW_BASE + RTL8380_LED_MODE_CTRL));
+ debugfs_create_x32("led_p_en_ctrl", 0644, led_dir,
+ (u32 *)(RTL838X_SW_BASE + RTL8380_LED_P_EN_CTRL));
+ debugfs_create_x32("led_sw_ctrl", 0644, led_dir,
+ (u32 *)(RTL838X_SW_BASE + RTL8380_LED_SW_CTRL));
+ debugfs_create_x32("led0_sw_p_en_ctrl", 0644, led_dir,
+ (u32 *)(RTL838X_SW_BASE + RTL8380_LED0_SW_P_EN_CTRL));
+ debugfs_create_x32("led1_sw_p_en_ctrl", 0644, led_dir,
+ (u32 *)(RTL838X_SW_BASE + RTL8380_LED1_SW_P_EN_CTRL));
+ debugfs_create_x32("led2_sw_p_en_ctrl", 0644, led_dir,
+ (u32 *)(RTL838X_SW_BASE + RTL8380_LED2_SW_P_EN_CTRL));
+ for (p = 0; p < 28; p++) {
+ snprintf(led_sw_p_ctrl_name, sizeof(led_sw_p_ctrl_name),
+ "led_sw_p_ctrl.%02d", p);
+ debugfs_create_x32(led_sw_p_ctrl_name, 0644, led_dir,
+ (u32 *)(RTL838X_SW_BASE + RTL8380_LED_SW_P_CTRL(p)));
+ }
+ } else if (priv->family_id == RTL8390_FAMILY_ID) {
+ debugfs_create_x32("led_glb_ctrl", 0644, led_dir,
+ (u32 *)(RTL838X_SW_BASE + RTL8390_LED_GLB_CTRL));
+ debugfs_create_x32("led_set_2_3", 0644, led_dir,
+ (u32 *)(RTL838X_SW_BASE + RTL8390_LED_SET_2_3_CTRL));
+ debugfs_create_x32("led_set_0_1", 0644, led_dir,
+ (u32 *)(RTL838X_SW_BASE + RTL8390_LED_SET_0_1_CTRL));
+ for (p = 0; p < 4; p++) {
+ snprintf(port_led_name, sizeof(port_led_name), "led_copr_set_sel.%1d", p);
+ debugfs_create_x32(port_led_name, 0644, led_dir,
+ (u32 *)(RTL838X_SW_BASE + RTL8390_LED_COPR_SET_SEL_CTRL(p << 4)));
+ snprintf(port_led_name, sizeof(port_led_name), "led_fib_set_sel.%1d", p);
+ debugfs_create_x32(port_led_name, 0644, led_dir,
+ (u32 *)(RTL838X_SW_BASE + RTL8390_LED_FIB_SET_SEL_CTRL(p << 4)));
+ }
+ debugfs_create_x32("led_copr_pmask_ctrl_0", 0644, led_dir,
+ (u32 *)(RTL838X_SW_BASE + RTL8390_LED_COPR_PMASK_CTRL(0)));
+ debugfs_create_x32("led_copr_pmask_ctrl_1", 0644, led_dir,
+ (u32 *)(RTL838X_SW_BASE + RTL8390_LED_COPR_PMASK_CTRL(32)));
+ debugfs_create_x32("led_fib_pmask_ctrl_0", 0644, led_dir,
+ (u32 *)(RTL838X_SW_BASE + RTL8390_LED_FIB_PMASK_CTRL(0)));
+ debugfs_create_x32("led_fib_pmask_ctrl_1", 0644, led_dir,
+ (u32 *)(RTL838X_SW_BASE + RTL8390_LED_FIB_PMASK_CTRL(32)));
+ debugfs_create_x32("led_combo_ctrl_0", 0644, led_dir,
+ (u32 *)(RTL838X_SW_BASE + RTL8390_LED_COMBO_CTRL(0)));
+ debugfs_create_x32("led_combo_ctrl_1", 0644, led_dir,
+ (u32 *)(RTL838X_SW_BASE + RTL8390_LED_COMBO_CTRL(32)));
+ debugfs_create_x32("led_sw_ctrl", 0644, led_dir,
+ (u32 *)(RTL838X_SW_BASE + RTL8390_LED_SW_CTRL));
+ for (p = 0; p < 5; p++) {
+ snprintf(port_led_name, sizeof(port_led_name), "led_sw_p_en_ctrl.%1d", p);
+ debugfs_create_x32(port_led_name, 0644, led_dir,
+ (u32 *)(RTL838X_SW_BASE + RTL8390_LED_SW_P_EN_CTRL(p * 10)));
+ }
+ for (p = 0; p < 28; p++) {
+ snprintf(port_led_name, sizeof(port_led_name), "led_sw_p_ctrl.%02d", p);
+ debugfs_create_x32(port_led_name, 0644, led_dir,
+ (u32 *)(RTL838X_SW_BASE + RTL8390_LED_SW_P_CTRL(p)));
+ }
+ }
+ return 0;
+}
+
+void rtl838x_dbgfs_init(struct rtl838x_switch_priv *priv)
+{
+ struct dentry *rtl838x_dir;
+ struct dentry *port_dir;
+ struct dentry *mirror_dir;
+ struct debugfs_regset32 *port_ctrl_regset;
+ int ret, i;
+ char lag_name[10];
+ char mirror_name[10];
+
+ pr_info("%s called\n", __func__);
+ rtl838x_dir = debugfs_lookup(RTL838X_DRIVER_NAME, NULL);
+ if (!rtl838x_dir)
+ rtl838x_dir = debugfs_create_dir(RTL838X_DRIVER_NAME, NULL);
+
+ priv->dbgfs_dir = rtl838x_dir;
+
+ debugfs_create_u32("soc", 0444, rtl838x_dir,
+ (u32 *)(RTL838X_SW_BASE + RTL838X_MODEL_NAME_INFO));
+
+ /* Create one directory per port */
+ for (i = 0; i < priv->cpu_port; i++) {
+ if (priv->ports[i].phy) {
+ ret = rtl838x_dbgfs_port_init(rtl838x_dir, priv, i);
+ if (ret)
+ goto err;
+ }
+ }
+
+ /* Create directory for CPU-port */
+ port_dir = debugfs_create_dir("cpu_port", rtl838x_dir);
+ port_ctrl_regset = devm_kzalloc(priv->dev, sizeof(*port_ctrl_regset), GFP_KERNEL);
+ if (!port_ctrl_regset) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ port_ctrl_regset->regs = port_ctrl_regs;
+ port_ctrl_regset->nregs = ARRAY_SIZE(port_ctrl_regs);
+ port_ctrl_regset->base = (void *)(RTL838X_SW_BASE + (priv->cpu_port << 2));
+ debugfs_create_regset32("port_ctrl", 0400, port_dir, port_ctrl_regset);
+ debugfs_create_u8("id", 0444, port_dir, &priv->cpu_port);
+
+ /* Create entries for LAGs */
+ for (i = 0; i < priv->n_lags; i++) {
+ snprintf(lag_name, sizeof(lag_name), "lag.%02d", i);
+ if (priv->family_id == RTL8380_FAMILY_ID)
+ debugfs_create_x32(lag_name, 0644, rtl838x_dir,
+ (u32 *)(RTL838X_SW_BASE + priv->r->trk_mbr_ctr(i)));
+ else
+ debugfs_create_x64(lag_name, 0644, rtl838x_dir,
+ (u64 *)(RTL838X_SW_BASE + priv->r->trk_mbr_ctr(i)));
+ }
+
+ /* Create directories for mirror groups */
+ for (i = 0; i < 4; i++) {
+ snprintf(mirror_name, sizeof(mirror_name), "mirror.%1d", i);
+ mirror_dir = debugfs_create_dir(mirror_name, rtl838x_dir);
+ if (priv->family_id == RTL8380_FAMILY_ID) {
+ debugfs_create_x32("ctrl", 0644, mirror_dir,
+ (u32 *)(RTL838X_SW_BASE + RTL838X_MIR_CTRL + i * 4));
+ debugfs_create_x32("ingress_pm", 0644, mirror_dir,
+ (u32 *)(RTL838X_SW_BASE + priv->r->mir_spm + i * 4));
+ debugfs_create_x32("egress_pm", 0644, mirror_dir,
+ (u32 *)(RTL838X_SW_BASE + priv->r->mir_dpm + i * 4));
+ debugfs_create_x32("qid", 0644, mirror_dir,
+ (u32 *)(RTL838X_SW_BASE + RTL838X_MIR_QID_CTRL(i)));
+ debugfs_create_x32("rspan_vlan", 0644, mirror_dir,
+ (u32 *)(RTL838X_SW_BASE + RTL838X_MIR_RSPAN_VLAN_CTRL(i)));
+ debugfs_create_x32("rspan_vlan_mac", 0644, mirror_dir,
+ (u32 *)(RTL838X_SW_BASE + RTL838X_MIR_RSPAN_VLAN_CTRL_MAC(i)));
+ debugfs_create_x32("rspan_tx", 0644, mirror_dir,
+ (u32 *)(RTL838X_SW_BASE + RTL838X_MIR_RSPAN_TX_CTRL));
+ debugfs_create_x32("rspan_tx_tag_rm", 0644, mirror_dir,
+ (u32 *)(RTL838X_SW_BASE + RTL838X_MIR_RSPAN_TX_TAG_RM_CTRL));
+ debugfs_create_x32("rspan_tx_tag_en", 0644, mirror_dir,
+ (u32 *)(RTL838X_SW_BASE + RTL838X_MIR_RSPAN_TX_TAG_EN_CTRL));
+ } else {
+ debugfs_create_x32("ctrl", 0644, mirror_dir,
+ (u32 *)(RTL838X_SW_BASE + RTL839X_MIR_CTRL + i * 4));
+ debugfs_create_x64("ingress_pm", 0644, mirror_dir,
+ (u64 *)(RTL838X_SW_BASE + priv->r->mir_spm + i * 8));
+ debugfs_create_x64("egress_pm", 0644, mirror_dir,
+ (u64 *)(RTL838X_SW_BASE + priv->r->mir_dpm + i * 8));
+ debugfs_create_x32("rspan_vlan", 0644, mirror_dir,
+ (u32 *)(RTL838X_SW_BASE + RTL839X_MIR_RSPAN_VLAN_CTRL(i)));
+ debugfs_create_x32("rspan_tx", 0644, mirror_dir,
+ (u32 *)(RTL838X_SW_BASE + RTL839X_MIR_RSPAN_TX_CTRL));
+ debugfs_create_x32("rspan_tx_tag_rm", 0644, mirror_dir,
+ (u32 *)(RTL838X_SW_BASE + RTL839X_MIR_RSPAN_TX_TAG_RM_CTRL));
+ debugfs_create_x32("rspan_tx_tag_en", 0644, mirror_dir,
+ (u32 *)(RTL838X_SW_BASE + RTL839X_MIR_RSPAN_TX_TAG_EN_CTRL));
+ debugfs_create_x64("sample_rate", 0644, mirror_dir,
+ (u64 *)(RTL838X_SW_BASE + RTL839X_MIR_SAMPLE_RATE_CTRL));
+ }
+ }
+
+ if (priv->family_id == RTL8380_FAMILY_ID)
+ debugfs_create_x32("bpdu_flood_mask", 0644, rtl838x_dir,
+ (u32 *)(RTL838X_SW_BASE + priv->r->rma_bpdu_fld_pmask));
+ else
+ debugfs_create_x64("bpdu_flood_mask", 0644, rtl838x_dir,
+ (u64 *)(RTL838X_SW_BASE + priv->r->rma_bpdu_fld_pmask));
+
+ if (priv->family_id == RTL8380_FAMILY_ID)
+ debugfs_create_x32("vlan_ctrl", 0644, rtl838x_dir,
+ (u32 *)(RTL838X_SW_BASE + RTL838X_VLAN_CTRL));
+ else
+ debugfs_create_x32("vlan_ctrl", 0644, rtl838x_dir,
+ (u32 *)(RTL838X_SW_BASE + RTL839X_VLAN_CTRL));
+
+ ret = rtl838x_dbgfs_leds(rtl838x_dir, priv);
+ if (ret)
+ goto err;
+
+ debugfs_create_file("drop_counters", 0400, rtl838x_dir, priv, &drop_counter_fops);
+
+ debugfs_create_file("l2_table", 0400, rtl838x_dir, priv, &l2_table_fops);
+
+ return;
+err:
+ rtl838x_dbgfs_cleanup(priv);
+}
+
+void rtl930x_dbgfs_init(struct rtl838x_switch_priv *priv)
+{
+ struct dentry *dbg_dir;
+
+ pr_info("%s called\n", __func__);
+ dbg_dir = debugfs_lookup(RTL838X_DRIVER_NAME, NULL);
+ if (!dbg_dir)
+ dbg_dir = debugfs_create_dir(RTL838X_DRIVER_NAME, NULL);
+
+ priv->dbgfs_dir = dbg_dir;
+
+ debugfs_create_file("drop_counters", 0400, dbg_dir, priv, &drop_counter_fops);
+
+ debugfs_create_file("l2_table", 0400, dbg_dir, priv, &l2_table_fops);
+}
diff --git a/target/linux/realtek/files-5.15/drivers/net/dsa/rtl83xx/dsa.c b/target/linux/realtek/files-5.15/drivers/net/dsa/rtl83xx/dsa.c
new file mode 100644
index 0000000000..6eea0dc936
--- /dev/null
+++ b/target/linux/realtek/files-5.15/drivers/net/dsa/rtl83xx/dsa.c
@@ -0,0 +1,2259 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <net/dsa.h>
+#include <linux/if_bridge.h>
+
+#include <asm/mach-rtl838x/mach-rtl83xx.h>
+#include "rtl83xx.h"
+
+
+extern struct rtl83xx_soc_info soc_info;
+
+
+static void rtl83xx_init_stats(struct rtl838x_switch_priv *priv)
+{
+ mutex_lock(&priv->reg_mutex);
+
+ /* Enable statistics module: all counters plus debug.
+ * On RTL839x all counters are enabled by default
+ */
+ if (priv->family_id == RTL8380_FAMILY_ID)
+ sw_w32_mask(0, 3, RTL838X_STAT_CTRL);
+
+ /* Reset statistics counters */
+ sw_w32_mask(0, 1, priv->r->stat_rst);
+
+ mutex_unlock(&priv->reg_mutex);
+}
+
+static void rtl83xx_enable_phy_polling(struct rtl838x_switch_priv *priv)
+{
+ int i;
+ u64 v = 0;
+
+ msleep(1000);
+ /* Enable all ports with a PHY, including the SFP-ports */
+ for (i = 0; i < priv->cpu_port; i++) {
+ if (priv->ports[i].phy)
+ v |= BIT_ULL(i);
+ }
+
+ pr_info("%s: %16llx\n", __func__, v);
+ priv->r->set_port_reg_le(v, priv->r->smi_poll_ctrl);
+
+ /* PHY update complete, there is no global PHY polling enable bit on the 9300 */
+ if (priv->family_id == RTL8390_FAMILY_ID)
+ sw_w32_mask(0, BIT(7), RTL839X_SMI_GLB_CTRL);
+ else if(priv->family_id == RTL9300_FAMILY_ID)
+ sw_w32_mask(0, 0x8000, RTL838X_SMI_GLB_CTRL);
+}
+
+const struct rtl83xx_mib_desc rtl83xx_mib[] = {
+ MIB_DESC(2, 0xf8, "ifInOctets"),
+ MIB_DESC(2, 0xf0, "ifOutOctets"),
+ MIB_DESC(1, 0xec, "dot1dTpPortInDiscards"),
+ MIB_DESC(1, 0xe8, "ifInUcastPkts"),
+ MIB_DESC(1, 0xe4, "ifInMulticastPkts"),
+ MIB_DESC(1, 0xe0, "ifInBroadcastPkts"),
+ MIB_DESC(1, 0xdc, "ifOutUcastPkts"),
+ MIB_DESC(1, 0xd8, "ifOutMulticastPkts"),
+ MIB_DESC(1, 0xd4, "ifOutBroadcastPkts"),
+ MIB_DESC(1, 0xd0, "ifOutDiscards"),
+ MIB_DESC(1, 0xcc, ".3SingleCollisionFrames"),
+ MIB_DESC(1, 0xc8, ".3MultipleCollisionFrames"),
+ MIB_DESC(1, 0xc4, ".3DeferredTransmissions"),
+ MIB_DESC(1, 0xc0, ".3LateCollisions"),
+ MIB_DESC(1, 0xbc, ".3ExcessiveCollisions"),
+ MIB_DESC(1, 0xb8, ".3SymbolErrors"),
+ MIB_DESC(1, 0xb4, ".3ControlInUnknownOpcodes"),
+ MIB_DESC(1, 0xb0, ".3InPauseFrames"),
+ MIB_DESC(1, 0xac, ".3OutPauseFrames"),
+ MIB_DESC(1, 0xa8, "DropEvents"),
+ MIB_DESC(1, 0xa4, "tx_BroadcastPkts"),
+ MIB_DESC(1, 0xa0, "tx_MulticastPkts"),
+ MIB_DESC(1, 0x9c, "CRCAlignErrors"),
+ MIB_DESC(1, 0x98, "tx_UndersizePkts"),
+ MIB_DESC(1, 0x94, "rx_UndersizePkts"),
+ MIB_DESC(1, 0x90, "rx_UndersizedropPkts"),
+ MIB_DESC(1, 0x8c, "tx_OversizePkts"),
+ MIB_DESC(1, 0x88, "rx_OversizePkts"),
+ MIB_DESC(1, 0x84, "Fragments"),
+ MIB_DESC(1, 0x80, "Jabbers"),
+ MIB_DESC(1, 0x7c, "Collisions"),
+ MIB_DESC(1, 0x78, "tx_Pkts64Octets"),
+ MIB_DESC(1, 0x74, "rx_Pkts64Octets"),
+ MIB_DESC(1, 0x70, "tx_Pkts65to127Octets"),
+ MIB_DESC(1, 0x6c, "rx_Pkts65to127Octets"),
+ MIB_DESC(1, 0x68, "tx_Pkts128to255Octets"),
+ MIB_DESC(1, 0x64, "rx_Pkts128to255Octets"),
+ MIB_DESC(1, 0x60, "tx_Pkts256to511Octets"),
+ MIB_DESC(1, 0x5c, "rx_Pkts256to511Octets"),
+ MIB_DESC(1, 0x58, "tx_Pkts512to1023Octets"),
+ MIB_DESC(1, 0x54, "rx_Pkts512to1023Octets"),
+ MIB_DESC(1, 0x50, "tx_Pkts1024to1518Octets"),
+ MIB_DESC(1, 0x4c, "rx_StatsPkts1024to1518Octets"),
+ MIB_DESC(1, 0x48, "tx_Pkts1519toMaxOctets"),
+ MIB_DESC(1, 0x44, "rx_Pkts1519toMaxOctets"),
+ MIB_DESC(1, 0x40, "rxMacDiscards")
+};
+
+
+/* DSA callbacks */
+
+
+static enum dsa_tag_protocol rtl83xx_get_tag_protocol(struct dsa_switch *ds,
+ int port,
+ enum dsa_tag_protocol mprot)
+{
+ /* The switch does not tag the frames, instead internally the header
+ * structure for each packet is tagged accordingly.
+ */
+ return DSA_TAG_PROTO_TRAILER;
+}
+
+/*
+ * Initialize all VLANS
+ */
+static void rtl83xx_vlan_setup(struct rtl838x_switch_priv *priv)
+{
+ struct rtl838x_vlan_info info;
+ int i;
+
+ pr_info("In %s\n", __func__);
+
+ priv->r->vlan_profile_setup(0);
+ priv->r->vlan_profile_setup(1);
+ pr_info("UNKNOWN_MC_PMASK: %016llx\n", priv->r->read_mcast_pmask(UNKNOWN_MC_PMASK));
+ priv->r->vlan_profile_dump(0);
+
+ info.fid = 0; // Default Forwarding ID / MSTI
+ info.hash_uc_fid = false; // Do not build the L2 lookup hash with FID, but VID
+ info.hash_mc_fid = false; // Do the same for Multicast packets
+ info.profile_id = 0; // Use default Vlan Profile 0
+ info.tagged_ports = 0; // Initially no port members
+ if (priv->family_id == RTL9310_FAMILY_ID) {
+ info.if_id = 0;
+ info.multicast_grp_mask = 0;
+ info.l2_tunnel_list_id = -1;
+ }
+
+ // Initialize all vlans 0-4095
+ for (i = 0; i < MAX_VLANS; i ++)
+ priv->r->vlan_set_tagged(i, &info);
+
+ // reset PVIDs; defaults to 1 on reset
+ for (i = 0; i <= priv->ds->num_ports; i++) {
+ priv->r->vlan_port_pvid_set(i, PBVLAN_TYPE_INNER, 0);
+ priv->r->vlan_port_pvid_set(i, PBVLAN_TYPE_OUTER, 0);
+ priv->r->vlan_port_pvidmode_set(i, PBVLAN_TYPE_INNER, PBVLAN_MODE_UNTAG_AND_PRITAG);
+ priv->r->vlan_port_pvidmode_set(i, PBVLAN_TYPE_OUTER, PBVLAN_MODE_UNTAG_AND_PRITAG);
+ }
+
+ // Set forwarding action based on inner VLAN tag
+ for (i = 0; i < priv->cpu_port; i++)
+ priv->r->vlan_fwd_on_inner(i, true);
+}
+
+static void rtl83xx_setup_bpdu_traps(struct rtl838x_switch_priv *priv)
+{
+ int i;
+
+ for (i = 0; i < priv->cpu_port; i++)
+ priv->r->set_receive_management_action(i, BPDU, COPY2CPU);
+}
+
+static void rtl83xx_port_set_salrn(struct rtl838x_switch_priv *priv,
+ int port, bool enable)
+{
+ int shift = SALRN_PORT_SHIFT(port);
+ int val = enable ? SALRN_MODE_HARDWARE : SALRN_MODE_DISABLED;
+
+ sw_w32_mask(SALRN_MODE_MASK << shift, val << shift,
+ priv->r->l2_port_new_salrn(port));
+}
+
+static int rtl83xx_setup(struct dsa_switch *ds)
+{
+ int i;
+ struct rtl838x_switch_priv *priv = ds->priv;
+ u64 port_bitmap = BIT_ULL(priv->cpu_port);
+
+ pr_debug("%s called\n", __func__);
+
+ /* Disable MAC polling the PHY so that we can start configuration */
+ priv->r->set_port_reg_le(0ULL, priv->r->smi_poll_ctrl);
+
+ for (i = 0; i < ds->num_ports; i++)
+ priv->ports[i].enable = false;
+ priv->ports[priv->cpu_port].enable = true;
+
+ /* Isolate ports from each other: traffic only CPU <-> port */
+ /* Setting bit j in register RTL838X_PORT_ISO_CTRL(i) allows
+ * traffic from source port i to destination port j
+ */
+ for (i = 0; i < priv->cpu_port; i++) {
+ if (priv->ports[i].phy) {
+ priv->r->set_port_reg_be(BIT_ULL(priv->cpu_port) | BIT_ULL(i),
+ priv->r->port_iso_ctrl(i));
+ port_bitmap |= BIT_ULL(i);
+ }
+ }
+ priv->r->set_port_reg_be(port_bitmap, priv->r->port_iso_ctrl(priv->cpu_port));
+
+ if (priv->family_id == RTL8380_FAMILY_ID)
+ rtl838x_print_matrix();
+ else
+ rtl839x_print_matrix();
+
+ rtl83xx_init_stats(priv);
+
+ rtl83xx_vlan_setup(priv);
+
+ rtl83xx_setup_bpdu_traps(priv);
+
+ ds->configure_vlan_while_not_filtering = true;
+
+ priv->r->l2_learning_setup();
+
+ rtl83xx_port_set_salrn(priv, priv->cpu_port, false);
+ ds->assisted_learning_on_cpu_port = true;
+
+ /*
+ * Make sure all frames sent to the switch's MAC are trapped to the CPU-port
+ * 0: FWD, 1: DROP, 2: TRAP2CPU
+ */
+ if (priv->family_id == RTL8380_FAMILY_ID)
+ sw_w32(0x2, RTL838X_SPCL_TRAP_SWITCH_MAC_CTRL);
+ else
+ sw_w32(0x2, RTL839X_SPCL_TRAP_SWITCH_MAC_CTRL);
+
+ /* Enable MAC Polling PHY again */
+ rtl83xx_enable_phy_polling(priv);
+ pr_debug("Please wait until PHY is settled\n");
+ msleep(1000);
+ priv->r->pie_init(priv);
+
+ return 0;
+}
+
+static int rtl93xx_setup(struct dsa_switch *ds)
+{
+ int i;
+ struct rtl838x_switch_priv *priv = ds->priv;
+ u32 port_bitmap = BIT(priv->cpu_port);
+
+ pr_info("%s called\n", __func__);
+
+ /* Disable MAC polling the PHY so that we can start configuration */
+ if (priv->family_id == RTL9300_FAMILY_ID)
+ sw_w32(0, RTL930X_SMI_POLL_CTRL);
+
+ if (priv->family_id == RTL9310_FAMILY_ID) {
+ sw_w32(0, RTL931X_SMI_PORT_POLLING_CTRL);
+ sw_w32(0, RTL931X_SMI_PORT_POLLING_CTRL + 4);
+ }
+
+ // Disable all ports except CPU port
+ for (i = 0; i < ds->num_ports; i++)
+ priv->ports[i].enable = false;
+ priv->ports[priv->cpu_port].enable = true;
+
+ for (i = 0; i < priv->cpu_port; i++) {
+ if (priv->ports[i].phy) {
+ priv->r->traffic_set(i, BIT_ULL(priv->cpu_port) | BIT_ULL(i));
+ port_bitmap |= BIT_ULL(i);
+ }
+ }
+ priv->r->traffic_set(priv->cpu_port, port_bitmap);
+
+ rtl930x_print_matrix();
+
+ // TODO: Initialize statistics
+
+ rtl83xx_vlan_setup(priv);
+
+ ds->configure_vlan_while_not_filtering = true;
+
+ priv->r->l2_learning_setup();
+
+ rtl83xx_port_set_salrn(priv, priv->cpu_port, false);
+ ds->assisted_learning_on_cpu_port = true;
+
+ rtl83xx_enable_phy_polling(priv);
+
+ priv->r->pie_init(priv);
+
+ priv->r->led_init(priv);
+
+ return 0;
+}
+
+static int rtl93xx_get_sds(struct phy_device *phydev)
+{
+ struct device *dev = &phydev->mdio.dev;
+ struct device_node *dn;
+ u32 sds_num;
+
+ if (!dev)
+ return -1;
+ if (dev->of_node) {
+ dn = dev->of_node;
+ if (of_property_read_u32(dn, "sds", &sds_num))
+ sds_num = -1;
+ } else {
+ dev_err(dev, "No DT node.\n");
+ return -1;
+ }
+
+ return sds_num;
+}
+
+static void rtl83xx_phylink_validate(struct dsa_switch *ds, int port,
+ unsigned long *supported,
+ struct phylink_link_state *state)
+{
+ struct rtl838x_switch_priv *priv = ds->priv;
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
+
+ pr_debug("In %s port %d, state is %d", __func__, port, state->interface);
+
+ if (!phy_interface_mode_is_rgmii(state->interface) &&
+ state->interface != PHY_INTERFACE_MODE_NA &&
+ state->interface != PHY_INTERFACE_MODE_1000BASEX &&
+ state->interface != PHY_INTERFACE_MODE_MII &&
+ state->interface != PHY_INTERFACE_MODE_REVMII &&
+ state->interface != PHY_INTERFACE_MODE_GMII &&
+ state->interface != PHY_INTERFACE_MODE_QSGMII &&
+ state->interface != PHY_INTERFACE_MODE_INTERNAL &&
+ state->interface != PHY_INTERFACE_MODE_SGMII) {
+ bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS);
+ dev_err(ds->dev,
+ "Unsupported interface: %d for port %d\n",
+ state->interface, port);
+ return;
+ }
+
+ /* Allow all the expected bits */
+ phylink_set(mask, Autoneg);
+ phylink_set_port_modes(mask);
+ phylink_set(mask, Pause);
+ phylink_set(mask, Asym_Pause);
+
+ /* With the exclusion of MII and Reverse MII, we support Gigabit,
+ * including Half duplex
+ */
+ if (state->interface != PHY_INTERFACE_MODE_MII &&
+ state->interface != PHY_INTERFACE_MODE_REVMII) {
+ phylink_set(mask, 1000baseT_Full);
+ phylink_set(mask, 1000baseT_Half);
+ }
+
+ /* On both the 8380 and 8382, ports 24-27 are SFP ports */
+ if (port >= 24 && port <= 27 && priv->family_id == RTL8380_FAMILY_ID)
+ phylink_set(mask, 1000baseX_Full);
+
+ /* On the RTL839x family of SoCs, ports 48 to 51 are SFP ports */
+ if (port >= 48 && port <= 51 && priv->family_id == RTL8390_FAMILY_ID)
+ phylink_set(mask, 1000baseX_Full);
+
+ phylink_set(mask, 10baseT_Half);
+ phylink_set(mask, 10baseT_Full);
+ phylink_set(mask, 100baseT_Half);
+ phylink_set(mask, 100baseT_Full);
+
+ bitmap_and(supported, supported, mask,
+ __ETHTOOL_LINK_MODE_MASK_NBITS);
+ bitmap_and(state->advertising, state->advertising, mask,
+ __ETHTOOL_LINK_MODE_MASK_NBITS);
+}
+
+static void rtl93xx_phylink_validate(struct dsa_switch *ds, int port,
+ unsigned long *supported,
+ struct phylink_link_state *state)
+{
+ struct rtl838x_switch_priv *priv = ds->priv;
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
+
+ pr_debug("In %s port %d, state is %d (%s)", __func__, port, state->interface,
+ phy_modes(state->interface));
+
+ if (!phy_interface_mode_is_rgmii(state->interface) &&
+ state->interface != PHY_INTERFACE_MODE_NA &&
+ state->interface != PHY_INTERFACE_MODE_1000BASEX &&
+ state->interface != PHY_INTERFACE_MODE_MII &&
+ state->interface != PHY_INTERFACE_MODE_REVMII &&
+ state->interface != PHY_INTERFACE_MODE_GMII &&
+ state->interface != PHY_INTERFACE_MODE_QSGMII &&
+ state->interface != PHY_INTERFACE_MODE_XGMII &&
+ state->interface != PHY_INTERFACE_MODE_HSGMII &&
+ state->interface != PHY_INTERFACE_MODE_10GBASER &&
+ state->interface != PHY_INTERFACE_MODE_10GKR &&
+ state->interface != PHY_INTERFACE_MODE_USXGMII &&
+ state->interface != PHY_INTERFACE_MODE_INTERNAL &&
+ state->interface != PHY_INTERFACE_MODE_SGMII) {
+ bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS);
+ dev_err(ds->dev,
+ "Unsupported interface: %d for port %d\n",
+ state->interface, port);
+ return;
+ }
+
+ /* Allow all the expected bits */
+ phylink_set(mask, Autoneg);
+ phylink_set_port_modes(mask);
+ phylink_set(mask, Pause);
+ phylink_set(mask, Asym_Pause);
+
+ /* With the exclusion of MII and Reverse MII, we support Gigabit,
+ * including Half duplex
+ */
+ if (state->interface != PHY_INTERFACE_MODE_MII &&
+ state->interface != PHY_INTERFACE_MODE_REVMII) {
+ phylink_set(mask, 1000baseT_Full);
+ phylink_set(mask, 1000baseT_Half);
+ }
+
+ // Internal phys of the RTL93xx family provide 10G
+ if (priv->ports[port].phy_is_integrated
+ && state->interface == PHY_INTERFACE_MODE_1000BASEX) {
+ phylink_set(mask, 1000baseX_Full);
+ } else if (priv->ports[port].phy_is_integrated) {
+ phylink_set(mask, 1000baseX_Full);
+ phylink_set(mask, 10000baseKR_Full);
+ phylink_set(mask, 10000baseSR_Full);
+ phylink_set(mask, 10000baseCR_Full);
+ }
+ if (state->interface == PHY_INTERFACE_MODE_INTERNAL) {
+ phylink_set(mask, 1000baseX_Full);
+ phylink_set(mask, 1000baseT_Full);
+ phylink_set(mask, 10000baseKR_Full);
+ phylink_set(mask, 10000baseT_Full);
+ phylink_set(mask, 10000baseSR_Full);
+ phylink_set(mask, 10000baseCR_Full);
+ }
+
+ if (state->interface == PHY_INTERFACE_MODE_USXGMII)
+ phylink_set(mask, 10000baseT_Full);
+
+ phylink_set(mask, 10baseT_Half);
+ phylink_set(mask, 10baseT_Full);
+ phylink_set(mask, 100baseT_Half);
+ phylink_set(mask, 100baseT_Full);
+
+ bitmap_and(supported, supported, mask,
+ __ETHTOOL_LINK_MODE_MASK_NBITS);
+ bitmap_and(state->advertising, state->advertising, mask,
+ __ETHTOOL_LINK_MODE_MASK_NBITS);
+ pr_debug("%s leaving supported: %*pb", __func__, __ETHTOOL_LINK_MODE_MASK_NBITS, supported);
+}
+
+static int rtl83xx_phylink_mac_link_state(struct dsa_switch *ds, int port,
+ struct phylink_link_state *state)
+{
+ struct rtl838x_switch_priv *priv = ds->priv;
+ u64 speed;
+ u64 link;
+
+ if (port < 0 || port > priv->cpu_port)
+ return -EINVAL;
+
+ state->link = 0;
+ link = priv->r->get_port_reg_le(priv->r->mac_link_sts);
+ if (link & BIT_ULL(port))
+ state->link = 1;
+ pr_debug("%s: link state port %d: %llx\n", __func__, port, link & BIT_ULL(port));
+
+ state->duplex = 0;
+ if (priv->r->get_port_reg_le(priv->r->mac_link_dup_sts) & BIT_ULL(port))
+ state->duplex = 1;
+
+ speed = priv->r->get_port_reg_le(priv->r->mac_link_spd_sts(port));
+ speed >>= (port % 16) << 1;
+ switch (speed & 0x3) {
+ case 0:
+ state->speed = SPEED_10;
+ break;
+ case 1:
+ state->speed = SPEED_100;
+ break;
+ case 2:
+ state->speed = SPEED_1000;
+ break;
+ case 3:
+ if (priv->family_id == RTL9300_FAMILY_ID
+ && (port == 24 || port == 26)) /* Internal serdes */
+ state->speed = SPEED_2500;
+ else
+ state->speed = SPEED_100; /* Is in fact 500Mbit */
+ }
+
+ state->pause &= (MLO_PAUSE_RX | MLO_PAUSE_TX);
+ if (priv->r->get_port_reg_le(priv->r->mac_rx_pause_sts) & BIT_ULL(port))
+ state->pause |= MLO_PAUSE_RX;
+ if (priv->r->get_port_reg_le(priv->r->mac_tx_pause_sts) & BIT_ULL(port))
+ state->pause |= MLO_PAUSE_TX;
+ return 1;
+}
+
+static int rtl93xx_phylink_mac_link_state(struct dsa_switch *ds, int port,
+ struct phylink_link_state *state)
+{
+ struct rtl838x_switch_priv *priv = ds->priv;
+ u64 speed;
+ u64 link;
+ u64 media;
+
+ if (port < 0 || port > priv->cpu_port)
+ return -EINVAL;
+
+ /*
+ * On the RTL9300 for at least the RTL8226B PHY, the MAC-side link
+ * state needs to be read twice in order to read a correct result.
+ * This would not be necessary for ports connected e.g. to RTL8218D
+ * PHYs.
+ */
+ state->link = 0;
+ link = priv->r->get_port_reg_le(priv->r->mac_link_sts);
+ link = priv->r->get_port_reg_le(priv->r->mac_link_sts);
+ if (link & BIT_ULL(port))
+ state->link = 1;
+
+ if (priv->family_id == RTL9310_FAMILY_ID)
+ media = priv->r->get_port_reg_le(RTL931X_MAC_LINK_MEDIA_STS);
+
+ if (priv->family_id == RTL9300_FAMILY_ID)
+ media = sw_r32(RTL930X_MAC_LINK_MEDIA_STS);
+
+ if (media & BIT_ULL(port))
+ state->link = 1;
+
+ pr_debug("%s: link state port %d: %llx, media %llx\n", __func__, port,
+ link & BIT_ULL(port), media);
+
+ state->duplex = 0;
+ if (priv->r->get_port_reg_le(priv->r->mac_link_dup_sts) & BIT_ULL(port))
+ state->duplex = 1;
+
+ speed = priv->r->get_port_reg_le(priv->r->mac_link_spd_sts(port));
+ speed >>= (port % 8) << 2;
+ switch (speed & 0xf) {
+ case 0:
+ state->speed = SPEED_10;
+ break;
+ case 1:
+ state->speed = SPEED_100;
+ break;
+ case 2:
+ case 7:
+ state->speed = SPEED_1000;
+ break;
+ case 4:
+ state->speed = SPEED_10000;
+ break;
+ case 5:
+ case 8:
+ state->speed = SPEED_2500;
+ break;
+ case 6:
+ state->speed = SPEED_5000;
+ break;
+ default:
+ pr_err("%s: unknown speed: %d\n", __func__, (u32)speed & 0xf);
+ }
+
+ if (priv->family_id == RTL9310_FAMILY_ID
+ && (port >= 52 || port <= 55)) { /* Internal serdes */
+ state->speed = SPEED_10000;
+ state->link = 1;
+ state->duplex = 1;
+ }
+
+ pr_debug("%s: speed is: %d %d\n", __func__, (u32)speed & 0xf, state->speed);
+ state->pause &= (MLO_PAUSE_RX | MLO_PAUSE_TX);
+ if (priv->r->get_port_reg_le(priv->r->mac_rx_pause_sts) & BIT_ULL(port))
+ state->pause |= MLO_PAUSE_RX;
+ if (priv->r->get_port_reg_le(priv->r->mac_tx_pause_sts) & BIT_ULL(port))
+ state->pause |= MLO_PAUSE_TX;
+ return 1;
+}
+
+static void rtl83xx_config_interface(int port, phy_interface_t interface)
+{
+ u32 old, int_shift, sds_shift;
+
+ switch (port) {
+ case 24:
+ int_shift = 0;
+ sds_shift = 5;
+ break;
+ case 26:
+ int_shift = 3;
+ sds_shift = 0;
+ break;
+ default:
+ return;
+ }
+
+ old = sw_r32(RTL838X_SDS_MODE_SEL);
+ switch (interface) {
+ case PHY_INTERFACE_MODE_1000BASEX:
+ if ((old >> sds_shift & 0x1f) == 4)
+ return;
+ sw_w32_mask(0x7 << int_shift, 1 << int_shift, RTL838X_INT_MODE_CTRL);
+ sw_w32_mask(0x1f << sds_shift, 4 << sds_shift, RTL838X_SDS_MODE_SEL);
+ break;
+ case PHY_INTERFACE_MODE_SGMII:
+ if ((old >> sds_shift & 0x1f) == 2)
+ return;
+ sw_w32_mask(0x7 << int_shift, 2 << int_shift, RTL838X_INT_MODE_CTRL);
+ sw_w32_mask(0x1f << sds_shift, 2 << sds_shift, RTL838X_SDS_MODE_SEL);
+ break;
+ default:
+ return;
+ }
+ pr_debug("configured port %d for interface %s\n", port, phy_modes(interface));
+}
+
+static void rtl83xx_phylink_mac_config(struct dsa_switch *ds, int port,
+ unsigned int mode,
+ const struct phylink_link_state *state)
+{
+ struct rtl838x_switch_priv *priv = ds->priv;
+ u32 reg;
+ int speed_bit = priv->family_id == RTL8380_FAMILY_ID ? 4 : 3;
+
+ pr_debug("%s port %d, mode %x\n", __func__, port, mode);
+
+ if (port == priv->cpu_port) {
+ /* Set Speed, duplex, flow control
+ * FORCE_EN | LINK_EN | NWAY_EN | DUP_SEL
+ * | SPD_SEL = 0b10 | FORCE_FC_EN | PHY_MASTER_SLV_MANUAL_EN
+ * | MEDIA_SEL
+ */
+ if (priv->family_id == RTL8380_FAMILY_ID) {
+ sw_w32(0x6192F, priv->r->mac_force_mode_ctrl(priv->cpu_port));
+ /* allow CRC errors on CPU-port */
+ sw_w32_mask(0, 0x8, RTL838X_MAC_PORT_CTRL(priv->cpu_port));
+ } else {
+ sw_w32_mask(0, 3, priv->r->mac_force_mode_ctrl(priv->cpu_port));
+ }
+ return;
+ }
+
+ reg = sw_r32(priv->r->mac_force_mode_ctrl(port));
+ /* Auto-Negotiation does not work for MAC in RTL8390 */
+ if (priv->family_id == RTL8380_FAMILY_ID) {
+ if (mode == MLO_AN_PHY || phylink_autoneg_inband(mode)) {
+ pr_debug("PHY autonegotiates\n");
+ reg |= RTL838X_NWAY_EN;
+ sw_w32(reg, priv->r->mac_force_mode_ctrl(port));
+ rtl83xx_config_interface(port, state->interface);
+ return;
+ }
+ }
+
+ if (mode != MLO_AN_FIXED)
+ pr_debug("Fixed state.\n");
+
+ /* Clear id_mode_dis bit, and the existing port mode, let
+ * RGMII_MODE_EN bet set by mac_link_{up,down} */
+ if (priv->family_id == RTL8380_FAMILY_ID) {
+ reg &= ~(RTL838X_RX_PAUSE_EN | RTL838X_TX_PAUSE_EN);
+ if (state->pause & MLO_PAUSE_TXRX_MASK) {
+ if (state->pause & MLO_PAUSE_TX)
+ reg |= RTL838X_TX_PAUSE_EN;
+ reg |= RTL838X_RX_PAUSE_EN;
+ }
+ } else if (priv->family_id == RTL8390_FAMILY_ID) {
+ reg &= ~(RTL839X_RX_PAUSE_EN | RTL839X_TX_PAUSE_EN);
+ if (state->pause & MLO_PAUSE_TXRX_MASK) {
+ if (state->pause & MLO_PAUSE_TX)
+ reg |= RTL839X_TX_PAUSE_EN;
+ reg |= RTL839X_RX_PAUSE_EN;
+ }
+ }
+
+
+ reg &= ~(3 << speed_bit);
+ switch (state->speed) {
+ case SPEED_1000:
+ reg |= 2 << speed_bit;
+ break;
+ case SPEED_100:
+ reg |= 1 << speed_bit;
+ break;
+ default:
+ break; // Ignore, including 10MBit which has a speed value of 0
+ }
+
+ if (priv->family_id == RTL8380_FAMILY_ID) {
+ reg &= ~(RTL838X_DUPLEX_MODE | RTL838X_FORCE_LINK_EN);
+ if (state->link)
+ reg |= RTL838X_FORCE_LINK_EN;
+ if (state->duplex == RTL838X_DUPLEX_MODE)
+ reg |= RTL838X_DUPLEX_MODE;
+ } else if (priv->family_id == RTL8390_FAMILY_ID) {
+ reg &= ~(RTL839X_DUPLEX_MODE | RTL839X_FORCE_LINK_EN);
+ if (state->link)
+ reg |= RTL839X_FORCE_LINK_EN;
+ if (state->duplex == RTL839X_DUPLEX_MODE)
+ reg |= RTL839X_DUPLEX_MODE;
+ }
+
+ // LAG members must use DUPLEX and we need to enable the link
+ if (priv->lagmembers & BIT_ULL(port)) {
+ switch(priv->family_id) {
+ case RTL8380_FAMILY_ID:
+ reg |= (RTL838X_DUPLEX_MODE | RTL838X_FORCE_LINK_EN);
+ break;
+ case RTL8390_FAMILY_ID:
+ reg |= (RTL839X_DUPLEX_MODE | RTL839X_FORCE_LINK_EN);
+ break;
+ }
+ }
+
+ // Disable AN
+ if (priv->family_id == RTL8380_FAMILY_ID)
+ reg &= ~RTL838X_NWAY_EN;
+ sw_w32(reg, priv->r->mac_force_mode_ctrl(port));
+}
+
+static void rtl931x_phylink_mac_config(struct dsa_switch *ds, int port,
+ unsigned int mode,
+ const struct phylink_link_state *state)
+{
+ struct rtl838x_switch_priv *priv = ds->priv;
+ int sds_num;
+ u32 reg, band;
+
+ sds_num = priv->ports[port].sds_num;
+ pr_info("%s: speed %d sds_num %d\n", __func__, state->speed, sds_num);
+
+ switch (state->interface) {
+ case PHY_INTERFACE_MODE_HSGMII:
+ pr_info("%s setting mode PHY_INTERFACE_MODE_HSGMII\n", __func__);
+ band = rtl931x_sds_cmu_band_get(sds_num, PHY_INTERFACE_MODE_HSGMII);
+ rtl931x_sds_init(sds_num, PHY_INTERFACE_MODE_HSGMII);
+ band = rtl931x_sds_cmu_band_set(sds_num, true, 62, PHY_INTERFACE_MODE_HSGMII);
+ break;
+ case PHY_INTERFACE_MODE_1000BASEX:
+ band = rtl931x_sds_cmu_band_get(sds_num, PHY_INTERFACE_MODE_1000BASEX);
+ rtl931x_sds_init(sds_num, PHY_INTERFACE_MODE_1000BASEX);
+ break;
+ case PHY_INTERFACE_MODE_XGMII:
+ band = rtl931x_sds_cmu_band_get(sds_num, PHY_INTERFACE_MODE_XGMII);
+ rtl931x_sds_init(sds_num, PHY_INTERFACE_MODE_XGMII);
+ break;
+ case PHY_INTERFACE_MODE_10GBASER:
+ case PHY_INTERFACE_MODE_10GKR:
+ band = rtl931x_sds_cmu_band_get(sds_num, PHY_INTERFACE_MODE_10GBASER);
+ rtl931x_sds_init(sds_num, PHY_INTERFACE_MODE_10GBASER);
+ break;
+ case PHY_INTERFACE_MODE_USXGMII:
+ // Translates to MII_USXGMII_10GSXGMII
+ band = rtl931x_sds_cmu_band_get(sds_num, PHY_INTERFACE_MODE_USXGMII);
+ rtl931x_sds_init(sds_num, PHY_INTERFACE_MODE_USXGMII);
+ break;
+ case PHY_INTERFACE_MODE_SGMII:
+ pr_info("%s setting mode PHY_INTERFACE_MODE_SGMII\n", __func__);
+ band = rtl931x_sds_cmu_band_get(sds_num, PHY_INTERFACE_MODE_SGMII);
+ rtl931x_sds_init(sds_num, PHY_INTERFACE_MODE_SGMII);
+ band = rtl931x_sds_cmu_band_set(sds_num, true, 62, PHY_INTERFACE_MODE_SGMII);
+ break;
+ case PHY_INTERFACE_MODE_QSGMII:
+ band = rtl931x_sds_cmu_band_get(sds_num, PHY_INTERFACE_MODE_QSGMII);
+ rtl931x_sds_init(sds_num, PHY_INTERFACE_MODE_QSGMII);
+ break;
+ default:
+ pr_err("%s: unknown serdes mode: %s\n",
+ __func__, phy_modes(state->interface));
+ return;
+ }
+
+ reg = sw_r32(priv->r->mac_force_mode_ctrl(port));
+ pr_info("%s reading FORCE_MODE_CTRL: %08x\n", __func__, reg);
+
+ reg &= ~(RTL931X_DUPLEX_MODE | RTL931X_FORCE_EN | RTL931X_FORCE_LINK_EN);
+
+ reg &= ~(0xf << 12);
+ reg |= 0x2 << 12; // Set SMI speed to 0x2
+
+ reg |= RTL931X_TX_PAUSE_EN | RTL931X_RX_PAUSE_EN;
+
+ if (priv->lagmembers & BIT_ULL(port))
+ reg |= RTL931X_DUPLEX_MODE;
+
+ if (state->duplex == DUPLEX_FULL)
+ reg |= RTL931X_DUPLEX_MODE;
+
+ sw_w32(reg, priv->r->mac_force_mode_ctrl(port));
+
+}
+
+static void rtl93xx_phylink_mac_config(struct dsa_switch *ds, int port,
+ unsigned int mode,
+ const struct phylink_link_state *state)
+{
+ struct rtl838x_switch_priv *priv = ds->priv;
+ int sds_num, sds_mode;
+ u32 reg;
+
+ pr_info("%s port %d, mode %x, phy-mode: %s, speed %d, link %d\n", __func__,
+ port, mode, phy_modes(state->interface), state->speed, state->link);
+
+ // Nothing to be done for the CPU-port
+ if (port == priv->cpu_port)
+ return;
+
+ if (priv->family_id == RTL9310_FAMILY_ID)
+ return rtl931x_phylink_mac_config(ds, port, mode, state);
+
+ sds_num = priv->ports[port].sds_num;
+ pr_info("%s SDS is %d\n", __func__, sds_num);
+ if (sds_num >= 0) {
+ switch (state->interface) {
+ case PHY_INTERFACE_MODE_HSGMII:
+ sds_mode = 0x12;
+ break;
+ case PHY_INTERFACE_MODE_1000BASEX:
+ sds_mode = 0x04;
+ break;
+ case PHY_INTERFACE_MODE_XGMII:
+ sds_mode = 0x10;
+ break;
+ case PHY_INTERFACE_MODE_10GBASER:
+ case PHY_INTERFACE_MODE_10GKR:
+ sds_mode = 0x1b; // 10G 1000X Auto
+ break;
+ case PHY_INTERFACE_MODE_USXGMII:
+ sds_mode = 0x0d;
+ break;
+ default:
+ pr_err("%s: unknown serdes mode: %s\n",
+ __func__, phy_modes(state->interface));
+ return;
+ }
+ if (state->interface == PHY_INTERFACE_MODE_10GBASER)
+ rtl9300_serdes_setup(sds_num, state->interface);
+ }
+
+ reg = sw_r32(priv->r->mac_force_mode_ctrl(port));
+ reg &= ~(0xf << 3);
+
+ switch (state->speed) {
+ case SPEED_10000:
+ reg |= 4 << 3;
+ break;
+ case SPEED_5000:
+ reg |= 6 << 3;
+ break;
+ case SPEED_2500:
+ reg |= 5 << 3;
+ break;
+ case SPEED_1000:
+ reg |= 2 << 3;
+ break;
+ default:
+ reg |= 2 << 3;
+ break;
+ }
+
+ if (state->link)
+ reg |= RTL930X_FORCE_LINK_EN;
+
+ if (priv->lagmembers & BIT_ULL(port))
+ reg |= RTL930X_DUPLEX_MODE | RTL930X_FORCE_LINK_EN;
+
+ if (state->duplex == DUPLEX_FULL)
+ reg |= RTL930X_DUPLEX_MODE;
+
+ if (priv->ports[port].phy_is_integrated)
+ reg &= ~RTL930X_FORCE_EN; // Clear MAC_FORCE_EN to allow SDS-MAC link
+ else
+ reg |= RTL930X_FORCE_EN;
+
+ sw_w32(reg, priv->r->mac_force_mode_ctrl(port));
+}
+
+static void rtl83xx_phylink_mac_link_down(struct dsa_switch *ds, int port,
+ unsigned int mode,
+ phy_interface_t interface)
+{
+ struct rtl838x_switch_priv *priv = ds->priv;
+
+ /* Stop TX/RX to port */
+ sw_w32_mask(0x3, 0, priv->r->mac_port_ctrl(port));
+
+ // No longer force link
+ sw_w32_mask(0x3, 0, priv->r->mac_force_mode_ctrl(port));
+}
+
+static void rtl93xx_phylink_mac_link_down(struct dsa_switch *ds, int port,
+ unsigned int mode,
+ phy_interface_t interface)
+{
+ struct rtl838x_switch_priv *priv = ds->priv;
+ u32 v = 0;
+
+ /* Stop TX/RX to port */
+ sw_w32_mask(0x3, 0, priv->r->mac_port_ctrl(port));
+
+ // No longer force link
+ if (priv->family_id == RTL9300_FAMILY_ID)
+ v = RTL930X_FORCE_EN | RTL930X_FORCE_LINK_EN;
+ else if (priv->family_id == RTL9310_FAMILY_ID)
+ v = RTL931X_FORCE_EN | RTL931X_FORCE_LINK_EN;
+ sw_w32_mask(v, 0, priv->r->mac_force_mode_ctrl(port));
+}
+
+static void rtl83xx_phylink_mac_link_up(struct dsa_switch *ds, int port,
+ unsigned int mode,
+ phy_interface_t interface,
+ struct phy_device *phydev,
+ int speed, int duplex,
+ bool tx_pause, bool rx_pause)
+{
+ struct rtl838x_switch_priv *priv = ds->priv;
+ /* Restart TX/RX to port */
+ sw_w32_mask(0, 0x3, priv->r->mac_port_ctrl(port));
+ // TODO: Set speed/duplex/pauses
+}
+
+static void rtl93xx_phylink_mac_link_up(struct dsa_switch *ds, int port,
+ unsigned int mode,
+ phy_interface_t interface,
+ struct phy_device *phydev,
+ int speed, int duplex,
+ bool tx_pause, bool rx_pause)
+{
+ struct rtl838x_switch_priv *priv = ds->priv;
+
+ /* Restart TX/RX to port */
+ sw_w32_mask(0, 0x3, priv->r->mac_port_ctrl(port));
+ // TODO: Set speed/duplex/pauses
+}
+
+static void rtl83xx_get_strings(struct dsa_switch *ds,
+ int port, u32 stringset, u8 *data)
+{
+ int i;
+
+ if (stringset != ETH_SS_STATS)
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(rtl83xx_mib); i++)
+ strncpy(data + i * ETH_GSTRING_LEN, rtl83xx_mib[i].name,
+ ETH_GSTRING_LEN);
+}
+
+static void rtl83xx_get_ethtool_stats(struct dsa_switch *ds, int port,
+ uint64_t *data)
+{
+ struct rtl838x_switch_priv *priv = ds->priv;
+ const struct rtl83xx_mib_desc *mib;
+ int i;
+ u64 h;
+
+ for (i = 0; i < ARRAY_SIZE(rtl83xx_mib); i++) {
+ mib = &rtl83xx_mib[i];
+
+ data[i] = sw_r32(priv->r->stat_port_std_mib + (port << 8) + 252 - mib->offset);
+ if (mib->size == 2) {
+ h = sw_r32(priv->r->stat_port_std_mib + (port << 8) + 248 - mib->offset);
+ data[i] |= h << 32;
+ }
+ }
+}
+
+static int rtl83xx_get_sset_count(struct dsa_switch *ds, int port, int sset)
+{
+ if (sset != ETH_SS_STATS)
+ return 0;
+
+ return ARRAY_SIZE(rtl83xx_mib);
+}
+
+static int rtl83xx_mc_group_alloc(struct rtl838x_switch_priv *priv, int port)
+{
+ int mc_group = find_first_zero_bit(priv->mc_group_bm, MAX_MC_GROUPS - 1);
+ u64 portmask;
+
+ if (mc_group >= MAX_MC_GROUPS - 1)
+ return -1;
+
+ if (priv->is_lagmember[port]) {
+ pr_info("%s: %d is lag slave. ignore\n", __func__, port);
+ return 0;
+ }
+
+ set_bit(mc_group, priv->mc_group_bm);
+ mc_group++; // We cannot use group 0, as this is used for lookup miss flooding
+ portmask = BIT_ULL(port) | BIT_ULL(priv->cpu_port);
+ priv->r->write_mcast_pmask(mc_group, portmask);
+
+ return mc_group;
+}
+
+static u64 rtl83xx_mc_group_add_port(struct rtl838x_switch_priv *priv, int mc_group, int port)
+{
+ u64 portmask = priv->r->read_mcast_pmask(mc_group);
+
+ pr_debug("%s: %d\n", __func__, port);
+ if (priv->is_lagmember[port]) {
+ pr_info("%s: %d is lag slave. ignore\n", __func__, port);
+ return portmask;
+ }
+ portmask |= BIT_ULL(port);
+ priv->r->write_mcast_pmask(mc_group, portmask);
+
+ return portmask;
+}
+
+static u64 rtl83xx_mc_group_del_port(struct rtl838x_switch_priv *priv, int mc_group, int port)
+{
+ u64 portmask = priv->r->read_mcast_pmask(mc_group);
+
+ pr_debug("%s: %d\n", __func__, port);
+ if (priv->is_lagmember[port]) {
+ pr_info("%s: %d is lag slave. ignore\n", __func__, port);
+ return portmask;
+ }
+ priv->r->write_mcast_pmask(mc_group, portmask);
+ if (portmask == BIT_ULL(priv->cpu_port)) {
+ portmask &= ~BIT_ULL(priv->cpu_port);
+ priv->r->write_mcast_pmask(mc_group, portmask);
+ clear_bit(mc_group, priv->mc_group_bm);
+ }
+
+ return portmask;
+}
+
+static void store_mcgroups(struct rtl838x_switch_priv *priv, int port)
+{
+ int mc_group;
+
+ for (mc_group = 0; mc_group < MAX_MC_GROUPS; mc_group++) {
+ u64 portmask = priv->r->read_mcast_pmask(mc_group);
+ if (portmask & BIT_ULL(port)) {
+ priv->mc_group_saves[mc_group] = port;
+ rtl83xx_mc_group_del_port(priv, mc_group, port);
+ }
+ }
+}
+
+static void load_mcgroups(struct rtl838x_switch_priv *priv, int port)
+{
+ int mc_group;
+
+ for (mc_group = 0; mc_group < MAX_MC_GROUPS; mc_group++) {
+ if (priv->mc_group_saves[mc_group] == port) {
+ rtl83xx_mc_group_add_port(priv, mc_group, port);
+ priv->mc_group_saves[mc_group] = -1;
+ }
+ }
+}
+
+static int rtl83xx_port_enable(struct dsa_switch *ds, int port,
+ struct phy_device *phydev)
+{
+ struct rtl838x_switch_priv *priv = ds->priv;
+ u64 v;
+
+ pr_debug("%s: %x %d", __func__, (u32) priv, port);
+ priv->ports[port].enable = true;
+
+ /* enable inner tagging on egress, do not keep any tags */
+ priv->r->vlan_port_keep_tag_set(port, 0, 1);
+
+ if (dsa_is_cpu_port(ds, port))
+ return 0;
+
+ /* add port to switch mask of CPU_PORT */
+ priv->r->traffic_enable(priv->cpu_port, port);
+
+ load_mcgroups(priv, port);
+
+ if (priv->is_lagmember[port]) {
+ pr_debug("%s: %d is lag slave. ignore\n", __func__, port);
+ return 0;
+ }
+
+ /* add all other ports in the same bridge to switch mask of port */
+ v = priv->r->traffic_get(port);
+ v |= priv->ports[port].pm;
+ priv->r->traffic_set(port, v);
+
+ // TODO: Figure out if this is necessary
+ if (priv->family_id == RTL9300_FAMILY_ID) {
+ sw_w32_mask(0, BIT(port), RTL930X_L2_PORT_SABLK_CTRL);
+ sw_w32_mask(0, BIT(port), RTL930X_L2_PORT_DABLK_CTRL);
+ }
+
+ if (priv->ports[port].sds_num < 0)
+ priv->ports[port].sds_num = rtl93xx_get_sds(phydev);
+
+ return 0;
+}
+
+static void rtl83xx_port_disable(struct dsa_switch *ds, int port)
+{
+ struct rtl838x_switch_priv *priv = ds->priv;
+ u64 v;
+
+ pr_debug("%s %x: %d", __func__, (u32)priv, port);
+ /* you can only disable user ports */
+ if (!dsa_is_user_port(ds, port))
+ return;
+
+ // BUG: This does not work on RTL931X
+ /* remove port from switch mask of CPU_PORT */
+ priv->r->traffic_disable(priv->cpu_port, port);
+ store_mcgroups(priv, port);
+
+ /* remove all other ports in the same bridge from switch mask of port */
+ v = priv->r->traffic_get(port);
+ v &= ~priv->ports[port].pm;
+ priv->r->traffic_set(port, v);
+
+ priv->ports[port].enable = false;
+}
+
+static int rtl83xx_set_mac_eee(struct dsa_switch *ds, int port,
+ struct ethtool_eee *e)
+{
+ struct rtl838x_switch_priv *priv = ds->priv;
+
+ if (e->eee_enabled && !priv->eee_enabled) {
+ pr_info("Globally enabling EEE\n");
+ priv->r->init_eee(priv, true);
+ }
+
+ priv->r->port_eee_set(priv, port, e->eee_enabled);
+
+ if (e->eee_enabled)
+ pr_info("Enabled EEE for port %d\n", port);
+ else
+ pr_info("Disabled EEE for port %d\n", port);
+ return 0;
+}
+
+static int rtl83xx_get_mac_eee(struct dsa_switch *ds, int port,
+ struct ethtool_eee *e)
+{
+ struct rtl838x_switch_priv *priv = ds->priv;
+
+ e->supported = SUPPORTED_100baseT_Full | SUPPORTED_1000baseT_Full;
+
+ priv->r->eee_port_ability(priv, e, port);
+
+ e->eee_enabled = priv->ports[port].eee_enabled;
+
+ e->eee_active = !!(e->advertised & e->lp_advertised);
+
+ return 0;
+}
+
+static int rtl93xx_get_mac_eee(struct dsa_switch *ds, int port,
+ struct ethtool_eee *e)
+{
+ struct rtl838x_switch_priv *priv = ds->priv;
+
+ e->supported = SUPPORTED_100baseT_Full | SUPPORTED_1000baseT_Full
+ | SUPPORTED_2500baseX_Full;
+
+ priv->r->eee_port_ability(priv, e, port);
+
+ e->eee_enabled = priv->ports[port].eee_enabled;
+
+ e->eee_active = !!(e->advertised & e->lp_advertised);
+
+ return 0;
+}
+
+static int rtl83xx_set_ageing_time(struct dsa_switch *ds, unsigned int msec)
+{
+ struct rtl838x_switch_priv *priv = ds->priv;
+
+ priv->r->set_ageing_time(msec);
+ return 0;
+}
+
+static int rtl83xx_port_bridge_join(struct dsa_switch *ds, int port,
+ struct net_device *bridge)
+{
+ struct rtl838x_switch_priv *priv = ds->priv;
+ u64 port_bitmap = BIT_ULL(priv->cpu_port), v;
+ int i;
+
+ pr_debug("%s %x: %d %llx", __func__, (u32)priv, port, port_bitmap);
+
+ if (priv->is_lagmember[port]) {
+ pr_debug("%s: %d is lag slave. ignore\n", __func__, port);
+ return 0;
+ }
+
+ mutex_lock(&priv->reg_mutex);
+ for (i = 0; i < ds->num_ports; i++) {
+ /* Add this port to the port matrix of the other ports in the
+ * same bridge. If the port is disabled, port matrix is kept
+ * and not being setup until the port becomes enabled.
+ */
+ if (dsa_is_user_port(ds, i) && !priv->is_lagmember[i] && i != port) {
+ if (dsa_to_port(ds, i)->bridge_dev != bridge)
+ continue;
+ if (priv->ports[i].enable)
+ priv->r->traffic_enable(i, port);
+
+ priv->ports[i].pm |= BIT_ULL(port);
+ port_bitmap |= BIT_ULL(i);
+ }
+ }
+ load_mcgroups(priv, port);
+
+ /* Add all other ports to this port matrix. */
+ if (priv->ports[port].enable) {
+ priv->r->traffic_enable(priv->cpu_port, port);
+ v = priv->r->traffic_get(port);
+ v |= port_bitmap;
+ priv->r->traffic_set(port, v);
+ }
+ priv->ports[port].pm |= port_bitmap;
+ mutex_unlock(&priv->reg_mutex);
+
+ return 0;
+}
+
+static void rtl83xx_port_bridge_leave(struct dsa_switch *ds, int port,
+ struct net_device *bridge)
+{
+ struct rtl838x_switch_priv *priv = ds->priv;
+ u64 port_bitmap = BIT_ULL(priv->cpu_port), v;
+ int i;
+
+ pr_debug("%s %x: %d", __func__, (u32)priv, port);
+ mutex_lock(&priv->reg_mutex);
+ for (i = 0; i < ds->num_ports; i++) {
+ /* Remove this port from the port matrix of the other ports
+ * in the same bridge. If the port is disabled, port matrix
+ * is kept and not being setup until the port becomes enabled.
+ * And the other port's port matrix cannot be broken when the
+ * other port is still a VLAN-aware port.
+ */
+ if (dsa_is_user_port(ds, i) && i != port) {
+ if (dsa_to_port(ds, i)->bridge_dev != bridge)
+ continue;
+ if (priv->ports[i].enable)
+ priv->r->traffic_disable(i, port);
+
+ priv->ports[i].pm |= BIT_ULL(port);
+ port_bitmap &= ~BIT_ULL(i);
+ }
+ }
+ store_mcgroups(priv, port);
+
+ /* Add all other ports to this port matrix. */
+ if (priv->ports[port].enable) {
+ v = priv->r->traffic_get(port);
+ v |= port_bitmap;
+ priv->r->traffic_set(port, v);
+ }
+ priv->ports[port].pm &= ~port_bitmap;
+
+ mutex_unlock(&priv->reg_mutex);
+}
+
+void rtl83xx_port_stp_state_set(struct dsa_switch *ds, int port, u8 state)
+{
+ u32 msti = 0;
+ u32 port_state[4];
+ int index, bit;
+ int pos = port;
+ struct rtl838x_switch_priv *priv = ds->priv;
+ int n = priv->port_width << 1;
+
+ /* Ports above or equal CPU port can never be configured */
+ if (port >= priv->cpu_port)
+ return;
+
+ mutex_lock(&priv->reg_mutex);
+
+ /* For the RTL839x and following, the bits are left-aligned, 838x and 930x
+ * have 64 bit fields, 839x and 931x have 128 bit fields
+ */
+ if (priv->family_id == RTL8390_FAMILY_ID)
+ pos += 12;
+ if (priv->family_id == RTL9300_FAMILY_ID)
+ pos += 3;
+ if (priv->family_id == RTL9310_FAMILY_ID)
+ pos += 8;
+
+ index = n - (pos >> 4) - 1;
+ bit = (pos << 1) % 32;
+
+ priv->r->stp_get(priv, msti, port_state);
+
+ pr_debug("Current state, port %d: %d\n", port, (port_state[index] >> bit) & 3);
+ port_state[index] &= ~(3 << bit);
+
+ switch (state) {
+ case BR_STATE_DISABLED: /* 0 */
+ port_state[index] |= (0 << bit);
+ break;
+ case BR_STATE_BLOCKING: /* 4 */
+ case BR_STATE_LISTENING: /* 1 */
+ port_state[index] |= (1 << bit);
+ break;
+ case BR_STATE_LEARNING: /* 2 */
+ port_state[index] |= (2 << bit);
+ break;
+ case BR_STATE_FORWARDING: /* 3*/
+ port_state[index] |= (3 << bit);
+ default:
+ break;
+ }
+
+ priv->r->stp_set(priv, msti, port_state);
+
+ mutex_unlock(&priv->reg_mutex);
+}
+
+void rtl83xx_fast_age(struct dsa_switch *ds, int port)
+{
+ struct rtl838x_switch_priv *priv = ds->priv;
+ int s = priv->family_id == RTL8390_FAMILY_ID ? 2 : 0;
+
+ pr_debug("FAST AGE port %d\n", port);
+ mutex_lock(&priv->reg_mutex);
+ /* RTL838X_L2_TBL_FLUSH_CTRL register bits, 839x has 1 bit larger
+ * port fields:
+ * 0-4: Replacing port
+ * 5-9: Flushed/replaced port
+ * 10-21: FVID
+ * 22: Entry types: 1: dynamic, 0: also static
+ * 23: Match flush port
+ * 24: Match FVID
+ * 25: Flush (0) or replace (1) L2 entries
+ * 26: Status of action (1: Start, 0: Done)
+ */
+ sw_w32(1 << (26 + s) | 1 << (23 + s) | port << (5 + (s / 2)), priv->r->l2_tbl_flush_ctrl);
+
+ do { } while (sw_r32(priv->r->l2_tbl_flush_ctrl) & BIT(26 + s));
+
+ mutex_unlock(&priv->reg_mutex);
+}
+
+void rtl931x_fast_age(struct dsa_switch *ds, int port)
+{
+ struct rtl838x_switch_priv *priv = ds->priv;
+
+ pr_info("%s port %d\n", __func__, port);
+ mutex_lock(&priv->reg_mutex);
+ sw_w32(port << 11, RTL931X_L2_TBL_FLUSH_CTRL + 4);
+
+ sw_w32(BIT(24) | BIT(28), RTL931X_L2_TBL_FLUSH_CTRL);
+
+ do { } while (sw_r32(RTL931X_L2_TBL_FLUSH_CTRL) & BIT (28));
+
+ mutex_unlock(&priv->reg_mutex);
+}
+
+void rtl930x_fast_age(struct dsa_switch *ds, int port)
+{
+ struct rtl838x_switch_priv *priv = ds->priv;
+
+ if (priv->family_id == RTL9310_FAMILY_ID)
+ return rtl931x_fast_age(ds, port);
+
+ pr_debug("FAST AGE port %d\n", port);
+ mutex_lock(&priv->reg_mutex);
+ sw_w32(port << 11, RTL930X_L2_TBL_FLUSH_CTRL + 4);
+
+ sw_w32(BIT(26) | BIT(30), RTL930X_L2_TBL_FLUSH_CTRL);
+
+ do { } while (sw_r32(priv->r->l2_tbl_flush_ctrl) & BIT(30));
+
+ mutex_unlock(&priv->reg_mutex);
+}
+
+static int rtl83xx_vlan_filtering(struct dsa_switch *ds, int port,
+ bool vlan_filtering,
+ struct switchdev_trans *trans)
+{
+ struct rtl838x_switch_priv *priv = ds->priv;
+
+ pr_debug("%s: port %d\n", __func__, port);
+ mutex_lock(&priv->reg_mutex);
+
+ if (vlan_filtering) {
+ /* Enable ingress and egress filtering
+ * The VLAN_PORT_IGR_FILTER register uses 2 bits for each port to define
+ * the filter action:
+ * 0: Always Forward
+ * 1: Drop packet
+ * 2: Trap packet to CPU port
+ * The Egress filter used 1 bit per state (0: DISABLED, 1: ENABLED)
+ */
+ if (port != priv->cpu_port)
+ priv->r->set_vlan_igr_filter(port, IGR_DROP);
+
+ priv->r->set_vlan_egr_filter(port, EGR_ENABLE);
+ } else {
+ /* Disable ingress and egress filtering */
+ if (port != priv->cpu_port)
+ priv->r->set_vlan_igr_filter(port, IGR_FORWARD);
+
+ priv->r->set_vlan_egr_filter(port, EGR_DISABLE);
+ }
+
+ /* Do we need to do something to the CPU-Port, too? */
+ mutex_unlock(&priv->reg_mutex);
+
+ return 0;
+}
+
+static int rtl83xx_vlan_prepare(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan)
+{
+ struct rtl838x_vlan_info info;
+ struct rtl838x_switch_priv *priv = ds->priv;
+
+ priv->r->vlan_tables_read(0, &info);
+
+ pr_debug("VLAN 0: Tagged ports %llx, untag %llx, profile %d, MC# %d, UC# %d, FID %x\n",
+ info.tagged_ports, info.untagged_ports, info.profile_id,
+ info.hash_mc_fid, info.hash_uc_fid, info.fid);
+
+ priv->r->vlan_tables_read(1, &info);
+ pr_debug("VLAN 1: Tagged ports %llx, untag %llx, profile %d, MC# %d, UC# %d, FID %x\n",
+ info.tagged_ports, info.untagged_ports, info.profile_id,
+ info.hash_mc_fid, info.hash_uc_fid, info.fid);
+ priv->r->vlan_set_untagged(1, info.untagged_ports);
+ pr_debug("SET: Untagged ports, VLAN %d: %llx\n", 1, info.untagged_ports);
+
+ priv->r->vlan_set_tagged(1, &info);
+ pr_debug("SET: Tagged ports, VLAN %d: %llx\n", 1, info.tagged_ports);
+
+ return 0;
+}
+
+static void rtl83xx_vlan_add(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan)
+{
+ struct rtl838x_vlan_info info;
+ struct rtl838x_switch_priv *priv = ds->priv;
+ int v;
+
+ pr_debug("%s port %d, vid_begin %d, vid_end %d, flags %x\n", __func__,
+ port, vlan->vid_begin, vlan->vid_end, vlan->flags);
+
+ if (vlan->vid_begin > 4095 || vlan->vid_end > 4095) {
+ dev_err(priv->dev, "VLAN out of range: %d - %d",
+ vlan->vid_begin, vlan->vid_end);
+ return;
+ }
+
+ mutex_lock(&priv->reg_mutex);
+
+ if (vlan->flags & BRIDGE_VLAN_INFO_PVID) {
+ for (v = vlan->vid_begin; v <= vlan->vid_end; v++) {
+ if (!v)
+ continue;
+ /* Set both inner and outer PVID of the port */
+ priv->r->vlan_port_pvid_set(port, PBVLAN_TYPE_INNER, v);
+ priv->r->vlan_port_pvid_set(port, PBVLAN_TYPE_OUTER, v);
+ priv->r->vlan_port_pvidmode_set(port, PBVLAN_TYPE_INNER,
+ PBVLAN_MODE_UNTAG_AND_PRITAG);
+ priv->r->vlan_port_pvidmode_set(port, PBVLAN_TYPE_OUTER,
+ PBVLAN_MODE_UNTAG_AND_PRITAG);
+
+ priv->ports[port].pvid = vlan->vid_end;
+ }
+ }
+
+ for (v = vlan->vid_begin; v <= vlan->vid_end; v++) {
+ /* Get port memberships of this vlan */
+ priv->r->vlan_tables_read(v, &info);
+
+ /* new VLAN? */
+ if (!info.tagged_ports) {
+ info.fid = 0;
+ info.hash_mc_fid = false;
+ info.hash_uc_fid = false;
+ info.profile_id = 0;
+ }
+
+ /* sanitize untagged_ports - must be a subset */
+ if (info.untagged_ports & ~info.tagged_ports)
+ info.untagged_ports = 0;
+
+ info.tagged_ports |= BIT_ULL(port);
+ if (vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED)
+ info.untagged_ports |= BIT_ULL(port);
+
+ priv->r->vlan_set_untagged(v, info.untagged_ports);
+ pr_debug("Untagged ports, VLAN %d: %llx\n", v, info.untagged_ports);
+
+ priv->r->vlan_set_tagged(v, &info);
+ pr_debug("Tagged ports, VLAN %d: %llx\n", v, info.tagged_ports);
+ }
+
+ mutex_unlock(&priv->reg_mutex);
+}
+
+static int rtl83xx_vlan_del(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan)
+{
+ struct rtl838x_vlan_info info;
+ struct rtl838x_switch_priv *priv = ds->priv;
+ int v;
+ u16 pvid;
+
+ pr_debug("%s: port %d, vid_begin %d, vid_end %d, flags %x\n", __func__,
+ port, vlan->vid_begin, vlan->vid_end, vlan->flags);
+
+ if (vlan->vid_begin > 4095 || vlan->vid_end > 4095) {
+ dev_err(priv->dev, "VLAN out of range: %d - %d",
+ vlan->vid_begin, vlan->vid_end);
+ return -ENOTSUPP;
+ }
+
+ mutex_lock(&priv->reg_mutex);
+ pvid = priv->ports[port].pvid;
+
+ for (v = vlan->vid_begin; v <= vlan->vid_end; v++) {
+ /* Reset to default if removing the current PVID */
+ if (v == pvid) {
+ priv->r->vlan_port_pvid_set(port, PBVLAN_TYPE_INNER, 0);
+ priv->r->vlan_port_pvid_set(port, PBVLAN_TYPE_OUTER, 0);
+ priv->r->vlan_port_pvidmode_set(port, PBVLAN_TYPE_INNER,
+ PBVLAN_MODE_UNTAG_AND_PRITAG);
+ priv->r->vlan_port_pvidmode_set(port, PBVLAN_TYPE_OUTER,
+ PBVLAN_MODE_UNTAG_AND_PRITAG);
+ }
+ /* Get port memberships of this vlan */
+ priv->r->vlan_tables_read(v, &info);
+
+ /* remove port from both tables */
+ info.untagged_ports &= (~BIT_ULL(port));
+ info.tagged_ports &= (~BIT_ULL(port));
+
+ priv->r->vlan_set_untagged(v, info.untagged_ports);
+ pr_debug("Untagged ports, VLAN %d: %llx\n", v, info.untagged_ports);
+
+ priv->r->vlan_set_tagged(v, &info);
+ pr_debug("Tagged ports, VLAN %d: %llx\n", v, info.tagged_ports);
+ }
+ mutex_unlock(&priv->reg_mutex);
+
+ return 0;
+}
+
+static void rtl83xx_setup_l2_uc_entry(struct rtl838x_l2_entry *e, int port, int vid, u64 mac)
+{
+ memset(e, 0, sizeof(*e));
+
+ e->type = L2_UNICAST;
+ e->valid = true;
+
+ e->age = 3;
+ e->is_static = true;
+
+ e->port = port;
+
+ e->rvid = e->vid = vid;
+ e->is_ip_mc = e->is_ipv6_mc = false;
+ u64_to_ether_addr(mac, e->mac);
+}
+
+static void rtl83xx_setup_l2_mc_entry(struct rtl838x_l2_entry *e, int vid, u64 mac, int mc_group)
+{
+ memset(e, 0, sizeof(*e));
+
+ e->type = L2_MULTICAST;
+ e->valid = true;
+
+ e->mc_portmask_index = mc_group;
+
+ e->rvid = e->vid = vid;
+ e->is_ip_mc = e->is_ipv6_mc = false;
+ u64_to_ether_addr(mac, e->mac);
+}
+
+/*
+ * Uses the seed to identify a hash bucket in the L2 using the derived hash key and then loops
+ * over the entries in the bucket until either a matching entry is found or an empty slot
+ * Returns the filled in rtl838x_l2_entry and the index in the bucket when an entry was found
+ * when an empty slot was found and must exist is false, the index of the slot is returned
+ * when no slots are available returns -1
+ */
+static int rtl83xx_find_l2_hash_entry(struct rtl838x_switch_priv *priv, u64 seed,
+ bool must_exist, struct rtl838x_l2_entry *e)
+{
+ int i, idx = -1;
+ u32 key = priv->r->l2_hash_key(priv, seed);
+ u64 entry;
+
+ pr_debug("%s: using key %x, for seed %016llx\n", __func__, key, seed);
+ // Loop over all entries in the hash-bucket and over the second block on 93xx SoCs
+ for (i = 0; i < priv->l2_bucket_size; i++) {
+ entry = priv->r->read_l2_entry_using_hash(key, i, e);
+ pr_debug("valid %d, mac %016llx\n", e->valid, ether_addr_to_u64(&e->mac[0]));
+ if (must_exist && !e->valid)
+ continue;
+ if (!e->valid || ((entry & 0x0fffffffffffffffULL) == seed)) {
+ idx = i > 3 ? ((key >> 14) & 0xffff) | i >> 1 : ((key << 2) | i) & 0xffff;
+ break;
+ }
+ }
+
+ return idx;
+}
+
+/*
+ * Uses the seed to identify an entry in the CAM by looping over all its entries
+ * Returns the filled in rtl838x_l2_entry and the index in the CAM when an entry was found
+ * when an empty slot was found the index of the slot is returned
+ * when no slots are available returns -1
+ */
+static int rtl83xx_find_l2_cam_entry(struct rtl838x_switch_priv *priv, u64 seed,
+ bool must_exist, struct rtl838x_l2_entry *e)
+{
+ int i, idx = -1;
+ u64 entry;
+
+ for (i = 0; i < 64; i++) {
+ entry = priv->r->read_cam(i, e);
+ if (!must_exist && !e->valid) {
+ if (idx < 0) /* First empty entry? */
+ idx = i;
+ break;
+ } else if ((entry & 0x0fffffffffffffffULL) == seed) {
+ pr_debug("Found entry in CAM\n");
+ idx = i;
+ break;
+ }
+ }
+ return idx;
+}
+
+static int rtl83xx_port_fdb_add(struct dsa_switch *ds, int port,
+ const unsigned char *addr, u16 vid)
+{
+ struct rtl838x_switch_priv *priv = ds->priv;
+ u64 mac = ether_addr_to_u64(addr);
+ struct rtl838x_l2_entry e;
+ int err = 0, idx;
+ u64 seed = priv->r->l2_hash_seed(mac, vid);
+
+ if (priv->is_lagmember[port]) {
+ pr_debug("%s: %d is lag slave. ignore\n", __func__, port);
+ return 0;
+ }
+
+ mutex_lock(&priv->reg_mutex);
+
+ idx = rtl83xx_find_l2_hash_entry(priv, seed, false, &e);
+
+ // Found an existing or empty entry
+ if (idx >= 0) {
+ rtl83xx_setup_l2_uc_entry(&e, port, vid, mac);
+ priv->r->write_l2_entry_using_hash(idx >> 2, idx & 0x3, &e);
+ goto out;
+ }
+
+ // Hash buckets full, try CAM
+ rtl83xx_find_l2_cam_entry(priv, seed, false, &e);
+
+ if (idx >= 0) {
+ rtl83xx_setup_l2_uc_entry(&e, port, vid, mac);
+ priv->r->write_cam(idx, &e);
+ goto out;
+ }
+
+ err = -ENOTSUPP;
+out:
+ mutex_unlock(&priv->reg_mutex);
+ return err;
+}
+
+static int rtl83xx_port_fdb_del(struct dsa_switch *ds, int port,
+ const unsigned char *addr, u16 vid)
+{
+ struct rtl838x_switch_priv *priv = ds->priv;
+ u64 mac = ether_addr_to_u64(addr);
+ struct rtl838x_l2_entry e;
+ int err = 0, idx;
+ u64 seed = priv->r->l2_hash_seed(mac, vid);
+
+ pr_debug("In %s, mac %llx, vid: %d\n", __func__, mac, vid);
+ mutex_lock(&priv->reg_mutex);
+
+ idx = rtl83xx_find_l2_hash_entry(priv, seed, true, &e);
+
+ if (idx >= 0) {
+ pr_debug("Found entry index %d, key %d and bucket %d\n", idx, idx >> 2, idx & 3);
+ e.valid = false;
+ priv->r->write_l2_entry_using_hash(idx >> 2, idx & 0x3, &e);
+ goto out;
+ }
+
+ /* Check CAM for spillover from hash buckets */
+ rtl83xx_find_l2_cam_entry(priv, seed, true, &e);
+
+ if (idx >= 0) {
+ e.valid = false;
+ priv->r->write_cam(idx, &e);
+ goto out;
+ }
+ err = -ENOENT;
+out:
+ mutex_unlock(&priv->reg_mutex);
+ return err;
+}
+
+static int rtl83xx_port_fdb_dump(struct dsa_switch *ds, int port,
+ dsa_fdb_dump_cb_t *cb, void *data)
+{
+ struct rtl838x_l2_entry e;
+ struct rtl838x_switch_priv *priv = ds->priv;
+ int i;
+
+ mutex_lock(&priv->reg_mutex);
+
+ for (i = 0; i < priv->fib_entries; i++) {
+ priv->r->read_l2_entry_using_hash(i >> 2, i & 0x3, &e);
+
+ if (!e.valid)
+ continue;
+
+ if (e.port == port || e.port == RTL930X_PORT_IGNORE)
+ cb(e.mac, e.vid, e.is_static, data);
+ }
+
+ for (i = 0; i < 64; i++) {
+ priv->r->read_cam(i, &e);
+
+ if (!e.valid)
+ continue;
+
+ if (e.port == port)
+ cb(e.mac, e.vid, e.is_static, data);
+ }
+
+ mutex_unlock(&priv->reg_mutex);
+ return 0;
+}
+
+static int rtl83xx_port_mdb_prepare(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_mdb *mdb)
+{
+ struct rtl838x_switch_priv *priv = ds->priv;
+
+ if (priv->id >= 0x9300)
+ return -EOPNOTSUPP;
+
+ return 0;
+}
+
+static void rtl83xx_port_mdb_add(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_mdb *mdb)
+{
+ struct rtl838x_switch_priv *priv = ds->priv;
+ u64 mac = ether_addr_to_u64(mdb->addr);
+ struct rtl838x_l2_entry e;
+ int err = 0, idx;
+ int vid = mdb->vid;
+ u64 seed = priv->r->l2_hash_seed(mac, vid);
+ int mc_group;
+
+ pr_debug("In %s port %d, mac %llx, vid: %d\n", __func__, port, mac, vid);
+
+ if (priv->is_lagmember[port]) {
+ pr_debug("%s: %d is lag slave. ignore\n", __func__, port);
+ return;
+ }
+
+ mutex_lock(&priv->reg_mutex);
+
+ idx = rtl83xx_find_l2_hash_entry(priv, seed, false, &e);
+
+ // Found an existing or empty entry
+ if (idx >= 0) {
+ if (e.valid) {
+ pr_debug("Found an existing entry %016llx, mc_group %d\n",
+ ether_addr_to_u64(e.mac), e.mc_portmask_index);
+ rtl83xx_mc_group_add_port(priv, e.mc_portmask_index, port);
+ } else {
+ pr_debug("New entry for seed %016llx\n", seed);
+ mc_group = rtl83xx_mc_group_alloc(priv, port);
+ if (mc_group < 0) {
+ err = -ENOTSUPP;
+ goto out;
+ }
+ rtl83xx_setup_l2_mc_entry(&e, vid, mac, mc_group);
+ priv->r->write_l2_entry_using_hash(idx >> 2, idx & 0x3, &e);
+ }
+ goto out;
+ }
+
+ // Hash buckets full, try CAM
+ rtl83xx_find_l2_cam_entry(priv, seed, false, &e);
+
+ if (idx >= 0) {
+ if (e.valid) {
+ pr_debug("Found existing CAM entry %016llx, mc_group %d\n",
+ ether_addr_to_u64(e.mac), e.mc_portmask_index);
+ rtl83xx_mc_group_add_port(priv, e.mc_portmask_index, port);
+ } else {
+ pr_debug("New entry\n");
+ mc_group = rtl83xx_mc_group_alloc(priv, port);
+ if (mc_group < 0) {
+ err = -ENOTSUPP;
+ goto out;
+ }
+ rtl83xx_setup_l2_mc_entry(&e, vid, mac, mc_group);
+ priv->r->write_cam(idx, &e);
+ }
+ goto out;
+ }
+
+ err = -ENOTSUPP;
+out:
+ mutex_unlock(&priv->reg_mutex);
+ if (err)
+ dev_err(ds->dev, "failed to add MDB entry\n");
+}
+
+int rtl83xx_port_mdb_del(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_mdb *mdb)
+{
+ struct rtl838x_switch_priv *priv = ds->priv;
+ u64 mac = ether_addr_to_u64(mdb->addr);
+ struct rtl838x_l2_entry e;
+ int err = 0, idx;
+ int vid = mdb->vid;
+ u64 seed = priv->r->l2_hash_seed(mac, vid);
+ u64 portmask;
+
+ pr_debug("In %s, port %d, mac %llx, vid: %d\n", __func__, port, mac, vid);
+
+ if (priv->is_lagmember[port]) {
+ pr_info("%s: %d is lag slave. ignore\n", __func__, port);
+ return 0;
+ }
+
+ mutex_lock(&priv->reg_mutex);
+
+ idx = rtl83xx_find_l2_hash_entry(priv, seed, true, &e);
+
+ if (idx >= 0) {
+ pr_debug("Found entry index %d, key %d and bucket %d\n", idx, idx >> 2, idx & 3);
+ portmask = rtl83xx_mc_group_del_port(priv, e.mc_portmask_index, port);
+ if (!portmask) {
+ e.valid = false;
+ priv->r->write_l2_entry_using_hash(idx >> 2, idx & 0x3, &e);
+ }
+ goto out;
+ }
+
+ /* Check CAM for spillover from hash buckets */
+ rtl83xx_find_l2_cam_entry(priv, seed, true, &e);
+
+ if (idx >= 0) {
+ portmask = rtl83xx_mc_group_del_port(priv, e.mc_portmask_index, port);
+ if (!portmask) {
+ e.valid = false;
+ priv->r->write_cam(idx, &e);
+ }
+ goto out;
+ }
+ // TODO: Re-enable with a newer kernel: err = -ENOENT;
+out:
+ mutex_unlock(&priv->reg_mutex);
+ return err;
+}
+
+static int rtl83xx_port_mirror_add(struct dsa_switch *ds, int port,
+ struct dsa_mall_mirror_tc_entry *mirror,
+ bool ingress)
+{
+ /* We support 4 mirror groups, one destination port per group */
+ int group;
+ struct rtl838x_switch_priv *priv = ds->priv;
+ int ctrl_reg, dpm_reg, spm_reg;
+
+ pr_debug("In %s\n", __func__);
+
+ for (group = 0; group < 4; group++) {
+ if (priv->mirror_group_ports[group] == mirror->to_local_port)
+ break;
+ }
+ if (group >= 4) {
+ for (group = 0; group < 4; group++) {
+ if (priv->mirror_group_ports[group] < 0)
+ break;
+ }
+ }
+
+ if (group >= 4)
+ return -ENOSPC;
+
+ ctrl_reg = priv->r->mir_ctrl + group * 4;
+ dpm_reg = priv->r->mir_dpm + group * 4 * priv->port_width;
+ spm_reg = priv->r->mir_spm + group * 4 * priv->port_width;
+
+ pr_debug("Using group %d\n", group);
+ mutex_lock(&priv->reg_mutex);
+
+ if (priv->family_id == RTL8380_FAMILY_ID) {
+ /* Enable mirroring to port across VLANs (bit 11) */
+ sw_w32(1 << 11 | (mirror->to_local_port << 4) | 1, ctrl_reg);
+ } else {
+ /* Enable mirroring to destination port */
+ sw_w32((mirror->to_local_port << 4) | 1, ctrl_reg);
+ }
+
+ if (ingress && (priv->r->get_port_reg_be(spm_reg) & (1ULL << port))) {
+ mutex_unlock(&priv->reg_mutex);
+ return -EEXIST;
+ }
+ if ((!ingress) && (priv->r->get_port_reg_be(dpm_reg) & (1ULL << port))) {
+ mutex_unlock(&priv->reg_mutex);
+ return -EEXIST;
+ }
+
+ if (ingress)
+ priv->r->mask_port_reg_be(0, 1ULL << port, spm_reg);
+ else
+ priv->r->mask_port_reg_be(0, 1ULL << port, dpm_reg);
+
+ priv->mirror_group_ports[group] = mirror->to_local_port;
+ mutex_unlock(&priv->reg_mutex);
+ return 0;
+}
+
+static void rtl83xx_port_mirror_del(struct dsa_switch *ds, int port,
+ struct dsa_mall_mirror_tc_entry *mirror)
+{
+ int group = 0;
+ struct rtl838x_switch_priv *priv = ds->priv;
+ int ctrl_reg, dpm_reg, spm_reg;
+
+ pr_debug("In %s\n", __func__);
+ for (group = 0; group < 4; group++) {
+ if (priv->mirror_group_ports[group] == mirror->to_local_port)
+ break;
+ }
+ if (group >= 4)
+ return;
+
+ ctrl_reg = priv->r->mir_ctrl + group * 4;
+ dpm_reg = priv->r->mir_dpm + group * 4 * priv->port_width;
+ spm_reg = priv->r->mir_spm + group * 4 * priv->port_width;
+
+ mutex_lock(&priv->reg_mutex);
+ if (mirror->ingress) {
+ /* Ingress, clear source port matrix */
+ priv->r->mask_port_reg_be(1ULL << port, 0, spm_reg);
+ } else {
+ /* Egress, clear destination port matrix */
+ priv->r->mask_port_reg_be(1ULL << port, 0, dpm_reg);
+ }
+
+ if (!(sw_r32(spm_reg) || sw_r32(dpm_reg))) {
+ priv->mirror_group_ports[group] = -1;
+ sw_w32(0, ctrl_reg);
+ }
+
+ mutex_unlock(&priv->reg_mutex);
+}
+
+static int rtl83xx_port_pre_bridge_flags(struct dsa_switch *ds, int port, unsigned long flags, struct netlink_ext_ack *extack)
+{
+ struct rtl838x_switch_priv *priv = ds->priv;
+ unsigned long features = 0;
+ pr_debug("%s: %d %lX\n", __func__, port, flags);
+ if (priv->r->enable_learning)
+ features |= BR_LEARNING;
+ if (priv->r->enable_flood)
+ features |= BR_FLOOD;
+ if (priv->r->enable_mcast_flood)
+ features |= BR_MCAST_FLOOD;
+ if (priv->r->enable_bcast_flood)
+ features |= BR_BCAST_FLOOD;
+ if (flags & ~(features))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int rtl83xx_port_bridge_flags(struct dsa_switch *ds, int port, unsigned long flags, struct netlink_ext_ack *extack)
+{
+ struct rtl838x_switch_priv *priv = ds->priv;
+
+ pr_debug("%s: %d %lX\n", __func__, port, flags);
+ if (priv->r->enable_learning)
+ priv->r->enable_learning(port, !!(flags & BR_LEARNING));
+
+ if (priv->r->enable_flood)
+ priv->r->enable_flood(port, !!(flags & BR_FLOOD));
+
+ if (priv->r->enable_mcast_flood)
+ priv->r->enable_mcast_flood(port, !!(flags & BR_MCAST_FLOOD));
+
+ if (priv->r->enable_bcast_flood)
+ priv->r->enable_bcast_flood(port, !!(flags & BR_BCAST_FLOOD));
+
+ return 0;
+}
+
+static bool rtl83xx_lag_can_offload(struct dsa_switch *ds,
+ struct net_device *lag,
+ struct netdev_lag_upper_info *info)
+{
+ int id;
+
+ id = dsa_lag_id(ds->dst, lag);
+ if (id < 0 || id >= ds->num_lag_ids)
+ return false;
+
+ if (info->tx_type != NETDEV_LAG_TX_TYPE_HASH) {
+ return false;
+ }
+ if (info->hash_type != NETDEV_LAG_HASH_L2 && info->hash_type != NETDEV_LAG_HASH_L23)
+ return false;
+
+ return true;
+}
+
+static int rtl83xx_port_lag_change(struct dsa_switch *ds, int port)
+{
+ struct rtl838x_switch_priv *priv = ds->priv;
+
+ pr_debug("%s: %d\n", __func__, port);
+ // Nothing to be done...
+
+ return 0;
+}
+
+static int rtl83xx_port_lag_join(struct dsa_switch *ds, int port,
+ struct net_device *lag,
+ struct netdev_lag_upper_info *info)
+{
+ struct rtl838x_switch_priv *priv = ds->priv;
+ int i, err = 0;
+
+ if (!rtl83xx_lag_can_offload(ds, lag, info))
+ return -EOPNOTSUPP;
+
+ mutex_lock(&priv->reg_mutex);
+
+ for (i = 0; i < priv->n_lags; i++) {
+ if ((!priv->lag_devs[i]) || (priv->lag_devs[i] == lag))
+ break;
+ }
+ if (port >= priv->cpu_port) {
+ err = -EINVAL;
+ goto out;
+ }
+ pr_info("port_lag_join: group %d, port %d\n",i, port);
+ if (!priv->lag_devs[i])
+ priv->lag_devs[i] = lag;
+
+ if (priv->lag_primary[i]==-1) {
+ priv->lag_primary[i]=port;
+ } else
+ priv->is_lagmember[port] = 1;
+
+ priv->lagmembers |= (1ULL << port);
+
+ pr_debug("lag_members = %llX\n", priv->lagmembers);
+ err = rtl83xx_lag_add(priv->ds, i, port, info);
+ if (err) {
+ err = -EINVAL;
+ goto out;
+ }
+
+out:
+ mutex_unlock(&priv->reg_mutex);
+ return err;
+
+}
+
+static int rtl83xx_port_lag_leave(struct dsa_switch *ds, int port,
+ struct net_device *lag)
+{
+ int i, group = -1, err;
+ struct rtl838x_switch_priv *priv = ds->priv;
+
+ mutex_lock(&priv->reg_mutex);
+ for (i=0;i<priv->n_lags;i++) {
+ if (priv->lags_port_members[i] & BIT_ULL(port)) {
+ group = i;
+ break;
+ }
+ }
+
+ if (group == -1) {
+ pr_info("port_lag_leave: port %d is not a member\n", port);
+ err = -EINVAL;
+ goto out;
+ }
+
+ if (port >= priv->cpu_port) {
+ err = -EINVAL;
+ goto out;
+ }
+ pr_info("port_lag_del: group %d, port %d\n",group, port);
+ priv->lagmembers &=~ (1ULL << port);
+ priv->lag_primary[i] = -1;
+ priv->is_lagmember[port] = 0;
+ pr_debug("lag_members = %llX\n", priv->lagmembers);
+ err = rtl83xx_lag_del(priv->ds, group, port);
+ if (err) {
+ err = -EINVAL;
+ goto out;
+ }
+ if (!priv->lags_port_members[i])
+ priv->lag_devs[i] = NULL;
+
+out:
+ mutex_unlock(&priv->reg_mutex);
+ return 0;
+}
+
+int dsa_phy_read(struct dsa_switch *ds, int phy_addr, int phy_reg)
+{
+ u32 val;
+ u32 offset = 0;
+ struct rtl838x_switch_priv *priv = ds->priv;
+
+ if (phy_addr >= 24 && phy_addr <= 27
+ && priv->ports[24].phy == PHY_RTL838X_SDS) {
+ if (phy_addr == 26)
+ offset = 0x100;
+ val = sw_r32(RTL838X_SDS4_FIB_REG0 + offset + (phy_reg << 2)) & 0xffff;
+ return val;
+ }
+
+ read_phy(phy_addr, 0, phy_reg, &val);
+ return val;
+}
+
+int dsa_phy_write(struct dsa_switch *ds, int phy_addr, int phy_reg, u16 val)
+{
+ u32 offset = 0;
+ struct rtl838x_switch_priv *priv = ds->priv;
+
+ if (phy_addr >= 24 && phy_addr <= 27
+ && priv->ports[24].phy == PHY_RTL838X_SDS) {
+ if (phy_addr == 26)
+ offset = 0x100;
+ sw_w32(val, RTL838X_SDS4_FIB_REG0 + offset + (phy_reg << 2));
+ return 0;
+ }
+ return write_phy(phy_addr, 0, phy_reg, val);
+}
+
+const struct dsa_switch_ops rtl83xx_switch_ops = {
+ .get_tag_protocol = rtl83xx_get_tag_protocol,
+ .setup = rtl83xx_setup,
+
+ .phy_read = dsa_phy_read,
+ .phy_write = dsa_phy_write,
+
+ .phylink_validate = rtl83xx_phylink_validate,
+ .phylink_mac_link_state = rtl83xx_phylink_mac_link_state,
+ .phylink_mac_config = rtl83xx_phylink_mac_config,
+ .phylink_mac_link_down = rtl83xx_phylink_mac_link_down,
+ .phylink_mac_link_up = rtl83xx_phylink_mac_link_up,
+
+ .get_strings = rtl83xx_get_strings,
+ .get_ethtool_stats = rtl83xx_get_ethtool_stats,
+ .get_sset_count = rtl83xx_get_sset_count,
+
+ .port_enable = rtl83xx_port_enable,
+ .port_disable = rtl83xx_port_disable,
+
+ .get_mac_eee = rtl83xx_get_mac_eee,
+ .set_mac_eee = rtl83xx_set_mac_eee,
+
+ .set_ageing_time = rtl83xx_set_ageing_time,
+ .port_bridge_join = rtl83xx_port_bridge_join,
+ .port_bridge_leave = rtl83xx_port_bridge_leave,
+ .port_stp_state_set = rtl83xx_port_stp_state_set,
+ .port_fast_age = rtl83xx_fast_age,
+
+ .port_vlan_filtering = rtl83xx_vlan_filtering,
+ .port_vlan_prepare = rtl83xx_vlan_prepare,
+ .port_vlan_add = rtl83xx_vlan_add,
+ .port_vlan_del = rtl83xx_vlan_del,
+
+ .port_fdb_add = rtl83xx_port_fdb_add,
+ .port_fdb_del = rtl83xx_port_fdb_del,
+ .port_fdb_dump = rtl83xx_port_fdb_dump,
+
+ .port_mdb_prepare = rtl83xx_port_mdb_prepare,
+ .port_mdb_add = rtl83xx_port_mdb_add,
+ .port_mdb_del = rtl83xx_port_mdb_del,
+
+ .port_mirror_add = rtl83xx_port_mirror_add,
+ .port_mirror_del = rtl83xx_port_mirror_del,
+
+ .port_lag_change = rtl83xx_port_lag_change,
+ .port_lag_join = rtl83xx_port_lag_join,
+ .port_lag_leave = rtl83xx_port_lag_leave,
+
+ .port_pre_bridge_flags = rtl83xx_port_pre_bridge_flags,
+ .port_bridge_flags = rtl83xx_port_bridge_flags,
+};
+
+const struct dsa_switch_ops rtl930x_switch_ops = {
+ .get_tag_protocol = rtl83xx_get_tag_protocol,
+ .setup = rtl93xx_setup,
+
+ .phy_read = dsa_phy_read,
+ .phy_write = dsa_phy_write,
+
+ .phylink_validate = rtl93xx_phylink_validate,
+ .phylink_mac_link_state = rtl93xx_phylink_mac_link_state,
+ .phylink_mac_config = rtl93xx_phylink_mac_config,
+ .phylink_mac_link_down = rtl93xx_phylink_mac_link_down,
+ .phylink_mac_link_up = rtl93xx_phylink_mac_link_up,
+
+ .get_strings = rtl83xx_get_strings,
+ .get_ethtool_stats = rtl83xx_get_ethtool_stats,
+ .get_sset_count = rtl83xx_get_sset_count,
+
+ .port_enable = rtl83xx_port_enable,
+ .port_disable = rtl83xx_port_disable,
+
+ .get_mac_eee = rtl93xx_get_mac_eee,
+ .set_mac_eee = rtl83xx_set_mac_eee,
+
+ .set_ageing_time = rtl83xx_set_ageing_time,
+ .port_bridge_join = rtl83xx_port_bridge_join,
+ .port_bridge_leave = rtl83xx_port_bridge_leave,
+ .port_stp_state_set = rtl83xx_port_stp_state_set,
+ .port_fast_age = rtl930x_fast_age,
+
+ .port_vlan_filtering = rtl83xx_vlan_filtering,
+ .port_vlan_prepare = rtl83xx_vlan_prepare,
+ .port_vlan_add = rtl83xx_vlan_add,
+ .port_vlan_del = rtl83xx_vlan_del,
+
+ .port_fdb_add = rtl83xx_port_fdb_add,
+ .port_fdb_del = rtl83xx_port_fdb_del,
+ .port_fdb_dump = rtl83xx_port_fdb_dump,
+
+ .port_mdb_prepare = rtl83xx_port_mdb_prepare,
+ .port_mdb_add = rtl83xx_port_mdb_add,
+ .port_mdb_del = rtl83xx_port_mdb_del,
+
+ .port_lag_change = rtl83xx_port_lag_change,
+ .port_lag_join = rtl83xx_port_lag_join,
+ .port_lag_leave = rtl83xx_port_lag_leave,
+
+ .port_pre_bridge_flags = rtl83xx_port_pre_bridge_flags,
+ .port_bridge_flags = rtl83xx_port_bridge_flags,
+};
diff --git a/target/linux/realtek/files-5.15/drivers/net/dsa/rtl83xx/qos.c b/target/linux/realtek/files-5.15/drivers/net/dsa/rtl83xx/qos.c
new file mode 100644
index 0000000000..2fc8d37f3e
--- /dev/null
+++ b/target/linux/realtek/files-5.15/drivers/net/dsa/rtl83xx/qos.c
@@ -0,0 +1,576 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <net/dsa.h>
+#include <linux/delay.h>
+
+#include <asm/mach-rtl838x/mach-rtl83xx.h>
+#include "rtl83xx.h"
+
+static struct rtl838x_switch_priv *switch_priv;
+extern struct rtl83xx_soc_info soc_info;
+
+enum scheduler_type {
+ WEIGHTED_FAIR_QUEUE = 0,
+ WEIGHTED_ROUND_ROBIN,
+};
+
+int max_available_queue[] = {0, 1, 2, 3, 4, 5, 6, 7};
+int default_queue_weights[] = {1, 1, 1, 1, 1, 1, 1, 1};
+int dot1p_priority_remapping[] = {0, 1, 2, 3, 4, 5, 6, 7};
+
+static void rtl839x_read_scheduling_table(int port)
+{
+ u32 cmd = 1 << 9 /* Execute cmd */
+ | 0 << 8 /* Read */
+ | 0 << 6 /* Table type 0b00 */
+ | (port & 0x3f);
+ rtl839x_exec_tbl2_cmd(cmd);
+}
+
+static void rtl839x_write_scheduling_table(int port)
+{
+ u32 cmd = 1 << 9 /* Execute cmd */
+ | 1 << 8 /* Write */
+ | 0 << 6 /* Table type 0b00 */
+ | (port & 0x3f);
+ rtl839x_exec_tbl2_cmd(cmd);
+}
+
+static void rtl839x_read_out_q_table(int port)
+{
+ u32 cmd = 1 << 9 /* Execute cmd */
+ | 0 << 8 /* Read */
+ | 2 << 6 /* Table type 0b10 */
+ | (port & 0x3f);
+ rtl839x_exec_tbl2_cmd(cmd);
+}
+
+static void rtl838x_storm_enable(struct rtl838x_switch_priv *priv, int port, bool enable)
+{
+ // Enable Storm control for that port for UC, MC, and BC
+ if (enable)
+ sw_w32(0x7, RTL838X_STORM_CTRL_LB_CTRL(port));
+ else
+ sw_w32(0x0, RTL838X_STORM_CTRL_LB_CTRL(port));
+}
+
+u32 rtl838x_get_egress_rate(struct rtl838x_switch_priv *priv, int port)
+{
+ u32 rate;
+
+ if (port > priv->cpu_port)
+ return 0;
+ rate = sw_r32(RTL838X_SCHED_P_EGR_RATE_CTRL(port)) & 0x3fff;
+ return rate;
+}
+
+/* Sets the rate limit, 10MBit/s is equal to a rate value of 625 */
+int rtl838x_set_egress_rate(struct rtl838x_switch_priv *priv, int port, u32 rate)
+{
+ u32 old_rate;
+
+ if (port > priv->cpu_port)
+ return -1;
+
+ old_rate = sw_r32(RTL838X_SCHED_P_EGR_RATE_CTRL(port));
+ sw_w32(rate, RTL838X_SCHED_P_EGR_RATE_CTRL(port));
+
+ return old_rate;
+}
+
+/* Set the rate limit for a particular queue in Bits/s
+ * units of the rate is 16Kbps
+ */
+void rtl838x_egress_rate_queue_limit(struct rtl838x_switch_priv *priv, int port,
+ int queue, u32 rate)
+{
+ if (port > priv->cpu_port)
+ return;
+ if (queue > 7)
+ return;
+ sw_w32(rate, RTL838X_SCHED_Q_EGR_RATE_CTRL(port, queue));
+}
+
+static void rtl838x_rate_control_init(struct rtl838x_switch_priv *priv)
+{
+ int i;
+
+ pr_info("Enabling Storm control\n");
+ // TICK_PERIOD_PPS
+ if (priv->id == 0x8380)
+ sw_w32_mask(0x3ff << 20, 434 << 20, RTL838X_SCHED_LB_TICK_TKN_CTRL_0);
+
+ // Set burst rate
+ sw_w32(0x00008000, RTL838X_STORM_CTRL_BURST_0); // UC
+ sw_w32(0x80008000, RTL838X_STORM_CTRL_BURST_1); // MC and BC
+
+ // Set burst Packets per Second to 32
+ sw_w32(0x00000020, RTL838X_STORM_CTRL_BURST_PPS_0); // UC
+ sw_w32(0x00200020, RTL838X_STORM_CTRL_BURST_PPS_1); // MC and BC
+
+ // Include IFG in storm control, rate based on bytes/s (0 = packets)
+ sw_w32_mask(0, 1 << 6 | 1 << 5, RTL838X_STORM_CTRL);
+ // Bandwidth control includes preamble and IFG (10 Bytes)
+ sw_w32_mask(0, 1, RTL838X_SCHED_CTRL);
+
+ // On SoCs except RTL8382M, set burst size of port egress
+ if (priv->id != 0x8382)
+ sw_w32_mask(0xffff, 0x800, RTL838X_SCHED_LB_THR);
+
+ /* Enable storm control on all ports with a PHY and limit rates,
+ * for UC and MC for both known and unknown addresses */
+ for (i = 0; i < priv->cpu_port; i++) {
+ if (priv->ports[i].phy) {
+ sw_w32((1 << 18) | 0x8000, RTL838X_STORM_CTRL_PORT_UC(i));
+ sw_w32((1 << 18) | 0x8000, RTL838X_STORM_CTRL_PORT_MC(i));
+ sw_w32(0x8000, RTL838X_STORM_CTRL_PORT_BC(i));
+ rtl838x_storm_enable(priv, i, true);
+ }
+ }
+
+ // Attack prevention, enable all attack prevention measures
+ //sw_w32(0x1ffff, RTL838X_ATK_PRVNT_CTRL);
+ /* Attack prevention, drop (bit = 0) problematic packets on all ports.
+ * Setting bit = 1 means: trap to CPU
+ */
+ //sw_w32(0, RTL838X_ATK_PRVNT_ACT);
+ // Enable attack prevention on all ports
+ //sw_w32(0x0fffffff, RTL838X_ATK_PRVNT_PORT_EN);
+}
+
+/* Sets the rate limit, 10MBit/s is equal to a rate value of 625 */
+u32 rtl839x_get_egress_rate(struct rtl838x_switch_priv *priv, int port)
+{
+ u32 rate;
+
+ pr_debug("%s: Getting egress rate on port %d to %d\n", __func__, port, rate);
+ if (port >= priv->cpu_port)
+ return 0;
+
+ mutex_lock(&priv->reg_mutex);
+
+ rtl839x_read_scheduling_table(port);
+
+ rate = sw_r32(RTL839X_TBL_ACCESS_DATA_2(7));
+ rate <<= 12;
+ rate |= sw_r32(RTL839X_TBL_ACCESS_DATA_2(8)) >> 20;
+
+ mutex_unlock(&priv->reg_mutex);
+
+ return rate;
+}
+
+/* Sets the rate limit, 10MBit/s is equal to a rate value of 625, returns previous rate */
+int rtl839x_set_egress_rate(struct rtl838x_switch_priv *priv, int port, u32 rate)
+{
+ u32 old_rate;
+
+ pr_debug("%s: Setting egress rate on port %d to %d\n", __func__, port, rate);
+ if (port >= priv->cpu_port)
+ return -1;
+
+ mutex_lock(&priv->reg_mutex);
+
+ rtl839x_read_scheduling_table(port);
+
+ old_rate = sw_r32(RTL839X_TBL_ACCESS_DATA_2(7)) & 0xff;
+ old_rate <<= 12;
+ old_rate |= sw_r32(RTL839X_TBL_ACCESS_DATA_2(8)) >> 20;
+ sw_w32_mask(0xff, (rate >> 12) & 0xff, RTL839X_TBL_ACCESS_DATA_2(7));
+ sw_w32_mask(0xfff << 20, rate << 20, RTL839X_TBL_ACCESS_DATA_2(8));
+
+ rtl839x_write_scheduling_table(port);
+
+ mutex_unlock(&priv->reg_mutex);
+
+ return old_rate;
+}
+
+/* Set the rate limit for a particular queue in Bits/s
+ * units of the rate is 16Kbps
+ */
+void rtl839x_egress_rate_queue_limit(struct rtl838x_switch_priv *priv, int port,
+ int queue, u32 rate)
+{
+ int lsb = 128 + queue * 20;
+ int low_byte = 8 - (lsb >> 5);
+ int start_bit = lsb - (low_byte << 5);
+ u32 high_mask = 0xfffff >> (32 - start_bit);
+
+ pr_debug("%s: Setting egress rate on port %d, queue %d to %d\n",
+ __func__, port, queue, rate);
+ if (port >= priv->cpu_port)
+ return;
+ if (queue > 7)
+ return;
+
+ mutex_lock(&priv->reg_mutex);
+
+ rtl839x_read_scheduling_table(port);
+
+ sw_w32_mask(0xfffff << start_bit, (rate & 0xfffff) << start_bit,
+ RTL839X_TBL_ACCESS_DATA_2(low_byte));
+ if (high_mask)
+ sw_w32_mask(high_mask, (rate & 0xfffff) >> (32- start_bit),
+ RTL839X_TBL_ACCESS_DATA_2(low_byte - 1));
+
+ rtl839x_write_scheduling_table(port);
+
+ mutex_unlock(&priv->reg_mutex);
+}
+
+static void rtl839x_rate_control_init(struct rtl838x_switch_priv *priv)
+{
+ int p, q;
+
+ pr_info("%s: enabling rate control\n", __func__);
+ /* Tick length and token size settings for SoC with 250MHz,
+ * RTL8350 family would use 50MHz
+ */
+ // Set the special tick period
+ sw_w32(976563, RTL839X_STORM_CTRL_SPCL_LB_TICK_TKN_CTRL);
+ // Ingress tick period and token length 10G
+ sw_w32(18 << 11 | 151, RTL839X_IGR_BWCTRL_LB_TICK_TKN_CTRL_0);
+ // Ingress tick period and token length 1G
+ sw_w32(245 << 11 | 129, RTL839X_IGR_BWCTRL_LB_TICK_TKN_CTRL_1);
+ // Egress tick period 10G, bytes/token 10G and tick period 1G, bytes/token 1G
+ sw_w32(18 << 24 | 151 << 16 | 185 << 8 | 97, RTL839X_SCHED_LB_TICK_TKN_CTRL);
+ // Set the tick period of the CPU and the Token Len
+ sw_w32(3815 << 8 | 1, RTL839X_SCHED_LB_TICK_TKN_PPS_CTRL);
+
+ // Set the Weighted Fair Queueing burst size
+ sw_w32_mask(0xffff, 4500, RTL839X_SCHED_LB_THR);
+
+ // Storm-rate calculation is based on bytes/sec (bit 5), include IFG (bit 6)
+ sw_w32_mask(0, 1 << 5 | 1 << 6, RTL839X_STORM_CTRL);
+
+ /* Based on the rate control mode being bytes/s
+ * set tick period and token length for 10G
+ */
+ sw_w32(18 << 10 | 151, RTL839X_STORM_CTRL_LB_TICK_TKN_CTRL_0);
+ /* and for 1G ports */
+ sw_w32(246 << 10 | 129, RTL839X_STORM_CTRL_LB_TICK_TKN_CTRL_1);
+
+ /* Set default burst rates on all ports (the same for 1G / 10G) with a PHY
+ * for UC, MC and BC
+ * For 1G port, the minimum burst rate is 1700, maximum 65535,
+ * For 10G ports it is 2650 and 1048575 respectively */
+ for (p = 0; p < priv->cpu_port; p++) {
+ if (priv->ports[p].phy && !priv->ports[p].is10G) {
+ sw_w32_mask(0xffff, 0x8000, RTL839X_STORM_CTRL_PORT_UC_1(p));
+ sw_w32_mask(0xffff, 0x8000, RTL839X_STORM_CTRL_PORT_MC_1(p));
+ sw_w32_mask(0xffff, 0x8000, RTL839X_STORM_CTRL_PORT_BC_1(p));
+ }
+ }
+
+ /* Setup ingress/egress per-port rate control */
+ for (p = 0; p < priv->cpu_port; p++) {
+ if (!priv->ports[p].phy)
+ continue;
+
+ if (priv->ports[p].is10G)
+ rtl839x_set_egress_rate(priv, p, 625000); // 10GB/s
+ else
+ rtl839x_set_egress_rate(priv, p, 62500); // 1GB/s
+
+ // Setup queues: all RTL83XX SoCs have 8 queues, maximum rate
+ for (q = 0; q < 8; q++)
+ rtl839x_egress_rate_queue_limit(priv, p, q, 0xfffff);
+
+ if (priv->ports[p].is10G) {
+ // Set high threshold to maximum
+ sw_w32_mask(0xffff, 0xffff, RTL839X_IGR_BWCTRL_PORT_CTRL_10G_0(p));
+ } else {
+ // Set high threshold to maximum
+ sw_w32_mask(0xffff, 0xffff, RTL839X_IGR_BWCTRL_PORT_CTRL_1(p));
+ }
+ }
+
+ // Set global ingress low watermark rate
+ sw_w32(65532, RTL839X_IGR_BWCTRL_CTRL_LB_THR);
+}
+
+
+
+void rtl838x_setup_prio2queue_matrix(int *min_queues)
+{
+ int i;
+ u32 v;
+
+ pr_info("Current Intprio2queue setting: %08x\n", sw_r32(RTL838X_QM_INTPRI2QID_CTRL));
+ for (i = 0; i < MAX_PRIOS; i++)
+ v |= i << (min_queues[i] * 3);
+ sw_w32(v, RTL838X_QM_INTPRI2QID_CTRL);
+}
+
+void rtl839x_setup_prio2queue_matrix(int *min_queues)
+{
+ int i, q;
+
+ pr_info("Current Intprio2queue setting: %08x\n", sw_r32(RTL839X_QM_INTPRI2QID_CTRL(0)));
+ for (i = 0; i < MAX_PRIOS; i++) {
+ q = min_queues[i];
+ sw_w32(i << (q * 3), RTL839X_QM_INTPRI2QID_CTRL(q));
+ }
+}
+
+/* Sets the CPU queue depending on the internal priority of a packet */
+void rtl83xx_setup_prio2queue_cpu_matrix(int *max_queues)
+{
+ int reg = soc_info.family == RTL8380_FAMILY_ID ? RTL838X_QM_PKT2CPU_INTPRI_MAP
+ : RTL839X_QM_PKT2CPU_INTPRI_MAP;
+ int i;
+ u32 v;
+
+ pr_info("QM_PKT2CPU_INTPRI_MAP: %08x\n", sw_r32(reg));
+ for (i = 0; i < MAX_PRIOS; i++)
+ v |= max_queues[i] << (i * 3);
+ sw_w32(v, reg);
+}
+
+void rtl83xx_setup_default_prio2queue(void)
+{
+ if (soc_info.family == RTL8380_FAMILY_ID) {
+ rtl838x_setup_prio2queue_matrix(max_available_queue);
+ } else {
+ rtl839x_setup_prio2queue_matrix(max_available_queue);
+ }
+ rtl83xx_setup_prio2queue_cpu_matrix(max_available_queue);
+}
+
+/* Sets the output queue assigned to a port, the port can be the CPU-port */
+void rtl839x_set_egress_queue(int port, int queue)
+{
+ sw_w32(queue << ((port % 10) *3), RTL839X_QM_PORT_QNUM(port));
+}
+
+/* Sets the priority assigned of an ingress port, the port can be the CPU-port */
+void rtl83xx_set_ingress_priority(int port, int priority)
+{
+ if (soc_info.family == RTL8380_FAMILY_ID)
+ sw_w32(priority << ((port % 10) *3), RTL838X_PRI_SEL_PORT_PRI(port));
+ else
+ sw_w32(priority << ((port % 10) *3), RTL839X_PRI_SEL_PORT_PRI(port));
+
+}
+
+int rtl839x_get_scheduling_algorithm(struct rtl838x_switch_priv *priv, int port)
+{
+ u32 v;
+
+ mutex_lock(&priv->reg_mutex);
+
+ rtl839x_read_scheduling_table(port);
+ v = sw_r32(RTL839X_TBL_ACCESS_DATA_2(8));
+
+ mutex_unlock(&priv->reg_mutex);
+
+ if (v & BIT(19))
+ return WEIGHTED_ROUND_ROBIN;
+ return WEIGHTED_FAIR_QUEUE;
+}
+
+void rtl839x_set_scheduling_algorithm(struct rtl838x_switch_priv *priv, int port,
+ enum scheduler_type sched)
+{
+ enum scheduler_type t = rtl839x_get_scheduling_algorithm(priv, port);
+ u32 v, oam_state, oam_port_state;
+ u32 count;
+ int i, egress_rate;
+
+ mutex_lock(&priv->reg_mutex);
+ /* Check whether we need to empty the egress queue of that port due to Errata E0014503 */
+ if (sched == WEIGHTED_FAIR_QUEUE && t == WEIGHTED_ROUND_ROBIN && port != priv->cpu_port) {
+ // Read Operations, Adminstatrion and Management control register
+ oam_state = sw_r32(RTL839X_OAM_CTRL);
+
+ // Get current OAM state
+ oam_port_state = sw_r32(RTL839X_OAM_PORT_ACT_CTRL(port));
+
+ // Disable OAM to block traffice
+ v = sw_r32(RTL839X_OAM_CTRL);
+ sw_w32_mask(0, 1, RTL839X_OAM_CTRL);
+ v = sw_r32(RTL839X_OAM_CTRL);
+
+ // Set to trap action OAM forward (bits 1, 2) and OAM Mux Action Drop (bit 0)
+ sw_w32(0x2, RTL839X_OAM_PORT_ACT_CTRL(port));
+
+ // Set port egress rate to unlimited
+ egress_rate = rtl839x_set_egress_rate(priv, port, 0xFFFFF);
+
+ // Wait until the egress used page count of that port is 0
+ i = 0;
+ do {
+ usleep_range(100, 200);
+ rtl839x_read_out_q_table(port);
+ count = sw_r32(RTL839X_TBL_ACCESS_DATA_2(6));
+ count >>= 20;
+ i++;
+ } while (i < 3500 && count > 0);
+ }
+
+ // Actually set the scheduling algorithm
+ rtl839x_read_scheduling_table(port);
+ sw_w32_mask(BIT(19), sched ? BIT(19) : 0, RTL839X_TBL_ACCESS_DATA_2(8));
+ rtl839x_write_scheduling_table(port);
+
+ if (sched == WEIGHTED_FAIR_QUEUE && t == WEIGHTED_ROUND_ROBIN && port != priv->cpu_port) {
+ // Restore OAM state to control register
+ sw_w32(oam_state, RTL839X_OAM_CTRL);
+
+ // Restore trap action state
+ sw_w32(oam_port_state, RTL839X_OAM_PORT_ACT_CTRL(port));
+
+ // Restore port egress rate
+ rtl839x_set_egress_rate(priv, port, egress_rate);
+ }
+
+ mutex_unlock(&priv->reg_mutex);
+}
+
+void rtl839x_set_scheduling_queue_weights(struct rtl838x_switch_priv *priv, int port,
+ int *queue_weights)
+{
+ int i, lsb, low_byte, start_bit, high_mask;
+
+ mutex_lock(&priv->reg_mutex);
+
+ rtl839x_read_scheduling_table(port);
+
+ for (i = 0; i < 8; i++) {
+ lsb = 48 + i * 8;
+ low_byte = 8 - (lsb >> 5);
+ start_bit = lsb - (low_byte << 5);
+ high_mask = 0x3ff >> (32 - start_bit);
+ sw_w32_mask(0x3ff << start_bit, (queue_weights[i] & 0x3ff) << start_bit,
+ RTL839X_TBL_ACCESS_DATA_2(low_byte));
+ if (high_mask)
+ sw_w32_mask(high_mask, (queue_weights[i] & 0x3ff) >> (32- start_bit),
+ RTL839X_TBL_ACCESS_DATA_2(low_byte - 1));
+ }
+
+ rtl839x_write_scheduling_table(port);
+ mutex_unlock(&priv->reg_mutex);
+}
+
+void rtl838x_config_qos(void)
+{
+ int i, p;
+ u32 v;
+
+ pr_info("Setting up RTL838X QoS\n");
+ pr_info("RTL838X_PRI_SEL_TBL_CTRL(i): %08x\n", sw_r32(RTL838X_PRI_SEL_TBL_CTRL(0)));
+ rtl83xx_setup_default_prio2queue();
+
+ // Enable inner (bit 12) and outer (bit 13) priority remapping from DSCP
+ sw_w32_mask(0, BIT(12) | BIT(13), RTL838X_PRI_DSCP_INVLD_CTRL0);
+
+ /* Set default weight for calculating internal priority, in prio selection group 0
+ * Port based (prio 3), Port outer-tag (4), DSCP (5), Inner Tag (6), Outer Tag (7)
+ */
+ v = 3 | (4 << 3) | (5 << 6) | (6 << 9) | (7 << 12);
+ sw_w32(v, RTL838X_PRI_SEL_TBL_CTRL(0));
+
+ // Set the inner and outer priority one-to-one to re-marked outer dot1p priority
+ v = 0;
+ for (p = 0; p < 8; p++)
+ v |= p << (3 * p);
+ sw_w32(v, RTL838X_RMK_OPRI_CTRL);
+ sw_w32(v, RTL838X_RMK_IPRI_CTRL);
+
+ v = 0;
+ for (p = 0; p < 8; p++)
+ v |= (dot1p_priority_remapping[p] & 0x7) << (p * 3);
+ sw_w32(v, RTL838X_PRI_SEL_IPRI_REMAP);
+
+ // On all ports set scheduler type to WFQ
+ for (i = 0; i <= soc_info.cpu_port; i++)
+ sw_w32(0, RTL838X_SCHED_P_TYPE_CTRL(i));
+
+ // Enable egress scheduler for CPU-Port
+ sw_w32_mask(0, BIT(8), RTL838X_SCHED_LB_CTRL(soc_info.cpu_port));
+
+ // Enable egress drop allways on
+ sw_w32_mask(0, BIT(11), RTL838X_FC_P_EGR_DROP_CTRL(soc_info.cpu_port));
+
+ // Give special trap frames priority 7 (BPDUs) and routing exceptions:
+ sw_w32_mask(0, 7 << 3 | 7, RTL838X_QM_PKT2CPU_INTPRI_2);
+ // Give RMA frames priority 7:
+ sw_w32_mask(0, 7, RTL838X_QM_PKT2CPU_INTPRI_1);
+}
+
+void rtl839x_config_qos(void)
+{
+ int port, p, q;
+ u32 v;
+ struct rtl838x_switch_priv *priv = switch_priv;
+
+ pr_info("Setting up RTL839X QoS\n");
+ pr_info("RTL839X_PRI_SEL_TBL_CTRL(i): %08x\n", sw_r32(RTL839X_PRI_SEL_TBL_CTRL(0)));
+ rtl83xx_setup_default_prio2queue();
+
+ for (port = 0; port < soc_info.cpu_port; port++)
+ sw_w32(7, RTL839X_QM_PORT_QNUM(port));
+
+ // CPU-port gets queue number 7
+ sw_w32(7, RTL839X_QM_PORT_QNUM(soc_info.cpu_port));
+
+ for (port = 0; port <= soc_info.cpu_port; port++) {
+ rtl83xx_set_ingress_priority(port, 0);
+ rtl839x_set_scheduling_algorithm(priv, port, WEIGHTED_FAIR_QUEUE);
+ rtl839x_set_scheduling_queue_weights(priv, port, default_queue_weights);
+ // Do re-marking based on outer tag
+ sw_w32_mask(0, BIT(port % 32), RTL839X_RMK_PORT_DEI_TAG_CTRL(port));
+ }
+
+ // Remap dot1p priorities to internal priority, for this the outer tag needs be re-marked
+ v = 0;
+ for (p = 0; p < 8; p++)
+ v |= (dot1p_priority_remapping[p] & 0x7) << (p * 3);
+ sw_w32(v, RTL839X_PRI_SEL_IPRI_REMAP);
+
+ /* Configure Drop Precedence for Drop Eligible Indicator (DEI)
+ * Index 0: 0
+ * Index 1: 2
+ * Each indicator is 2 bits long
+ */
+ sw_w32(2 << 2, RTL839X_PRI_SEL_DEI2DP_REMAP);
+
+ // Re-mark DEI: 4 bit-fields of 2 bits each, field 0 is bits 0-1, ...
+ sw_w32((0x1 << 2) | (0x1 << 4), RTL839X_RMK_DEI_CTRL);
+
+ /* Set Congestion avoidance drop probability to 0 for drop precedences 0-2 (bits 24-31)
+ * low threshold (bits 0-11) to 4095 and high threshold (bits 12-23) to 4095
+ * Weighted Random Early Detection (WRED) is used
+ */
+ sw_w32(4095 << 12| 4095, RTL839X_WRED_PORT_THR_CTRL(0));
+ sw_w32(4095 << 12| 4095, RTL839X_WRED_PORT_THR_CTRL(1));
+ sw_w32(4095 << 12| 4095, RTL839X_WRED_PORT_THR_CTRL(2));
+
+ /* Set queue-based congestion avoidance properties, register fields are as
+ * for forward RTL839X_WRED_PORT_THR_CTRL
+ */
+ for (q = 0; q < 8; q++) {
+ sw_w32(255 << 24 | 78 << 12 | 68, RTL839X_WRED_QUEUE_THR_CTRL(q, 0));
+ sw_w32(255 << 24 | 74 << 12 | 64, RTL839X_WRED_QUEUE_THR_CTRL(q, 0));
+ sw_w32(255 << 24 | 70 << 12 | 60, RTL839X_WRED_QUEUE_THR_CTRL(q, 0));
+ }
+}
+
+void __init rtl83xx_setup_qos(struct rtl838x_switch_priv *priv)
+{
+ switch_priv = priv;
+
+ pr_info("In %s\n", __func__);
+
+ if (priv->family_id == RTL8380_FAMILY_ID)
+ return rtl838x_config_qos();
+ else if (priv->family_id == RTL8390_FAMILY_ID)
+ return rtl839x_config_qos();
+
+ if (priv->family_id == RTL8380_FAMILY_ID)
+ rtl838x_rate_control_init(priv);
+ else if (priv->family_id == RTL8390_FAMILY_ID)
+ rtl839x_rate_control_init(priv);
+
+}
diff --git a/target/linux/realtek/files-5.15/drivers/net/dsa/rtl83xx/rtl838x.c b/target/linux/realtek/files-5.15/drivers/net/dsa/rtl83xx/rtl838x.c
new file mode 100644
index 0000000000..9ce5098979
--- /dev/null
+++ b/target/linux/realtek/files-5.15/drivers/net/dsa/rtl83xx/rtl838x.c
@@ -0,0 +1,2054 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <asm/mach-rtl838x/mach-rtl83xx.h>
+#include <linux/iopoll.h>
+#include <net/nexthop.h>
+
+#include "rtl83xx.h"
+
+#define RTL838X_VLAN_PORT_TAG_STS_UNTAG 0x0
+#define RTL838X_VLAN_PORT_TAG_STS_TAGGED 0x1
+#define RTL838X_VLAN_PORT_TAG_STS_PRIORITY_TAGGED 0x2
+
+#define RTL838X_VLAN_PORT_TAG_STS_CTRL_BASE 0xA530
+/* port 0-28 */
+#define RTL838X_VLAN_PORT_TAG_STS_CTRL(port) \
+ RTL838X_VLAN_PORT_TAG_STS_CTRL_BASE + (port << 2)
+
+#define RTL838X_VLAN_PORT_TAG_STS_CTRL_EGR_P_OTAG_KEEP_MASK GENMASK(11,10)
+#define RTL838X_VLAN_PORT_TAG_STS_CTRL_EGR_P_ITAG_KEEP_MASK GENMASK(9,8)
+#define RTL838X_VLAN_PORT_TAG_STS_CTRL_IGR_P_OTAG_KEEP_MASK GENMASK(7,6)
+#define RTL838X_VLAN_PORT_TAG_STS_CTRL_IGR_P_ITAG_KEEP_MASK GENMASK(5,4)
+#define RTL838X_VLAN_PORT_TAG_STS_CTRL_OTAG_STS_MASK GENMASK(3,2)
+#define RTL838X_VLAN_PORT_TAG_STS_CTRL_ITAG_STS_MASK GENMASK(1,0)
+
+extern struct mutex smi_lock;
+
+// see_dal_maple_acl_log2PhyTmplteField and src/app/diag_v2/src/diag_acl.c
+/* Definition of the RTL838X-specific template field IDs as used in the PIE */
+enum template_field_id {
+ TEMPLATE_FIELD_SPMMASK = 0,
+ TEMPLATE_FIELD_SPM0 = 1, // Source portmask ports 0-15
+ TEMPLATE_FIELD_SPM1 = 2, // Source portmask ports 16-28
+ TEMPLATE_FIELD_RANGE_CHK = 3,
+ TEMPLATE_FIELD_DMAC0 = 4, // Destination MAC [15:0]
+ TEMPLATE_FIELD_DMAC1 = 5, // Destination MAC [31:16]
+ TEMPLATE_FIELD_DMAC2 = 6, // Destination MAC [47:32]
+ TEMPLATE_FIELD_SMAC0 = 7, // Source MAC [15:0]
+ TEMPLATE_FIELD_SMAC1 = 8, // Source MAC [31:16]
+ TEMPLATE_FIELD_SMAC2 = 9, // Source MAC [47:32]
+ TEMPLATE_FIELD_ETHERTYPE = 10, // Ethernet typ
+ TEMPLATE_FIELD_OTAG = 11, // Outer VLAN tag
+ TEMPLATE_FIELD_ITAG = 12, // Inner VLAN tag
+ TEMPLATE_FIELD_SIP0 = 13, // IPv4 or IPv6 source IP[15:0] or ARP/RARP
+ // source protocol address in header
+ TEMPLATE_FIELD_SIP1 = 14, // IPv4 or IPv6 source IP[31:16] or ARP/RARP
+ TEMPLATE_FIELD_DIP0 = 15, // IPv4 or IPv6 destination IP[15:0]
+ TEMPLATE_FIELD_DIP1 = 16, // IPv4 or IPv6 destination IP[31:16]
+ TEMPLATE_FIELD_IP_TOS_PROTO = 17, // IPv4 TOS/IPv6 traffic class and
+ // IPv4 proto/IPv6 next header fields
+ TEMPLATE_FIELD_L34_HEADER = 18, // packet with extra tag and IPv6 with auth, dest,
+ // frag, route, hop-by-hop option header,
+ // IGMP type, TCP flag
+ TEMPLATE_FIELD_L4_SPORT = 19, // TCP/UDP source port
+ TEMPLATE_FIELD_L4_DPORT = 20, // TCP/UDP destination port
+ TEMPLATE_FIELD_ICMP_IGMP = 21,
+ TEMPLATE_FIELD_IP_RANGE = 22,
+ TEMPLATE_FIELD_FIELD_SELECTOR_VALID = 23, // Field selector mask
+ TEMPLATE_FIELD_FIELD_SELECTOR_0 = 24,
+ TEMPLATE_FIELD_FIELD_SELECTOR_1 = 25,
+ TEMPLATE_FIELD_FIELD_SELECTOR_2 = 26,
+ TEMPLATE_FIELD_FIELD_SELECTOR_3 = 27,
+ TEMPLATE_FIELD_SIP2 = 28, // IPv6 source IP[47:32]
+ TEMPLATE_FIELD_SIP3 = 29, // IPv6 source IP[63:48]
+ TEMPLATE_FIELD_SIP4 = 30, // IPv6 source IP[79:64]
+ TEMPLATE_FIELD_SIP5 = 31, // IPv6 source IP[95:80]
+ TEMPLATE_FIELD_SIP6 = 32, // IPv6 source IP[111:96]
+ TEMPLATE_FIELD_SIP7 = 33, // IPv6 source IP[127:112]
+ TEMPLATE_FIELD_DIP2 = 34, // IPv6 destination IP[47:32]
+ TEMPLATE_FIELD_DIP3 = 35, // IPv6 destination IP[63:48]
+ TEMPLATE_FIELD_DIP4 = 36, // IPv6 destination IP[79:64]
+ TEMPLATE_FIELD_DIP5 = 37, // IPv6 destination IP[95:80]
+ TEMPLATE_FIELD_DIP6 = 38, // IPv6 destination IP[111:96]
+ TEMPLATE_FIELD_DIP7 = 39, // IPv6 destination IP[127:112]
+ TEMPLATE_FIELD_FWD_VID = 40, // Forwarding VLAN-ID
+ TEMPLATE_FIELD_FLOW_LABEL = 41,
+};
+
+/*
+ * The RTL838X SoCs use 5 fixed templates with definitions for which data fields are to
+ * be copied from the Ethernet Frame header into the 12 User-definable fields of the Packet
+ * Inspection Engine's buffer. The following defines the field contents for each of the fixed
+ * templates. Additionally, 3 user-definable templates can be set up via the definitions
+ * in RTL838X_ACL_TMPLTE_CTRL control registers.
+ * TODO: See all src/app/diag_v2/src/diag_pie.c
+ */
+#define N_FIXED_TEMPLATES 5
+static enum template_field_id fixed_templates[N_FIXED_TEMPLATES][N_FIXED_FIELDS] =
+{
+ {
+ TEMPLATE_FIELD_SPM0, TEMPLATE_FIELD_SPM1, TEMPLATE_FIELD_OTAG,
+ TEMPLATE_FIELD_SMAC0, TEMPLATE_FIELD_SMAC1, TEMPLATE_FIELD_SMAC2,
+ TEMPLATE_FIELD_DMAC0, TEMPLATE_FIELD_DMAC1, TEMPLATE_FIELD_DMAC2,
+ TEMPLATE_FIELD_ETHERTYPE, TEMPLATE_FIELD_ITAG, TEMPLATE_FIELD_RANGE_CHK
+ }, {
+ TEMPLATE_FIELD_SIP0, TEMPLATE_FIELD_SIP1, TEMPLATE_FIELD_DIP0,
+ TEMPLATE_FIELD_DIP1,TEMPLATE_FIELD_IP_TOS_PROTO, TEMPLATE_FIELD_L4_SPORT,
+ TEMPLATE_FIELD_L4_DPORT, TEMPLATE_FIELD_ICMP_IGMP, TEMPLATE_FIELD_ITAG,
+ TEMPLATE_FIELD_RANGE_CHK, TEMPLATE_FIELD_SPM0, TEMPLATE_FIELD_SPM1
+ }, {
+ TEMPLATE_FIELD_DMAC0, TEMPLATE_FIELD_DMAC1, TEMPLATE_FIELD_DMAC2,
+ TEMPLATE_FIELD_ITAG, TEMPLATE_FIELD_ETHERTYPE, TEMPLATE_FIELD_IP_TOS_PROTO,
+ TEMPLATE_FIELD_L4_DPORT, TEMPLATE_FIELD_L4_SPORT, TEMPLATE_FIELD_SIP0,
+ TEMPLATE_FIELD_SIP1, TEMPLATE_FIELD_DIP0, TEMPLATE_FIELD_DIP1
+ }, {
+ TEMPLATE_FIELD_DIP0, TEMPLATE_FIELD_DIP1, TEMPLATE_FIELD_DIP2,
+ TEMPLATE_FIELD_DIP3, TEMPLATE_FIELD_DIP4, TEMPLATE_FIELD_DIP5,
+ TEMPLATE_FIELD_DIP6, TEMPLATE_FIELD_DIP7, TEMPLATE_FIELD_L4_DPORT,
+ TEMPLATE_FIELD_L4_SPORT, TEMPLATE_FIELD_ICMP_IGMP, TEMPLATE_FIELD_IP_TOS_PROTO
+ }, {
+ TEMPLATE_FIELD_SIP0, TEMPLATE_FIELD_SIP1, TEMPLATE_FIELD_SIP2,
+ TEMPLATE_FIELD_SIP3, TEMPLATE_FIELD_SIP4, TEMPLATE_FIELD_SIP5,
+ TEMPLATE_FIELD_SIP6, TEMPLATE_FIELD_SIP7, TEMPLATE_FIELD_ITAG,
+ TEMPLATE_FIELD_RANGE_CHK, TEMPLATE_FIELD_SPM0, TEMPLATE_FIELD_SPM1
+ },
+};
+
+void rtl838x_print_matrix(void)
+{
+ unsigned volatile int *ptr8;
+ int i;
+
+ ptr8 = RTL838X_SW_BASE + RTL838X_PORT_ISO_CTRL(0);
+ for (i = 0; i < 28; i += 8)
+ pr_debug("> %8x %8x %8x %8x %8x %8x %8x %8x\n",
+ ptr8[i + 0], ptr8[i + 1], ptr8[i + 2], ptr8[i + 3],
+ ptr8[i + 4], ptr8[i + 5], ptr8[i + 6], ptr8[i + 7]);
+ pr_debug("CPU_PORT> %8x\n", ptr8[28]);
+}
+
+static inline int rtl838x_port_iso_ctrl(int p)
+{
+ return RTL838X_PORT_ISO_CTRL(p);
+}
+
+static inline void rtl838x_exec_tbl0_cmd(u32 cmd)
+{
+ sw_w32(cmd, RTL838X_TBL_ACCESS_CTRL_0);
+ do { } while (sw_r32(RTL838X_TBL_ACCESS_CTRL_0) & BIT(15));
+}
+
+static inline void rtl838x_exec_tbl1_cmd(u32 cmd)
+{
+ sw_w32(cmd, RTL838X_TBL_ACCESS_CTRL_1);
+ do { } while (sw_r32(RTL838X_TBL_ACCESS_CTRL_1) & BIT(15));
+}
+
+static inline int rtl838x_tbl_access_data_0(int i)
+{
+ return RTL838X_TBL_ACCESS_DATA_0(i);
+}
+
+static void rtl838x_vlan_tables_read(u32 vlan, struct rtl838x_vlan_info *info)
+{
+ u32 v;
+ // Read VLAN table (0) via register 0
+ struct table_reg *r = rtl_table_get(RTL8380_TBL_0, 0);
+
+ rtl_table_read(r, vlan);
+ info->tagged_ports = sw_r32(rtl_table_data(r, 0));
+ v = sw_r32(rtl_table_data(r, 1));
+ pr_debug("VLAN_READ %d: %016llx %08x\n", vlan, info->tagged_ports, v);
+ rtl_table_release(r);
+
+ info->profile_id = v & 0x7;
+ info->hash_mc_fid = !!(v & 0x8);
+ info->hash_uc_fid = !!(v & 0x10);
+ info->fid = (v >> 5) & 0x3f;
+
+ // Read UNTAG table (0) via table register 1
+ r = rtl_table_get(RTL8380_TBL_1, 0);
+ rtl_table_read(r, vlan);
+ info->untagged_ports = sw_r32(rtl_table_data(r, 0));
+ rtl_table_release(r);
+}
+
+static void rtl838x_vlan_set_tagged(u32 vlan, struct rtl838x_vlan_info *info)
+{
+ u32 v;
+ // Access VLAN table (0) via register 0
+ struct table_reg *r = rtl_table_get(RTL8380_TBL_0, 0);
+
+ sw_w32(info->tagged_ports, rtl_table_data(r, 0));
+
+ v = info->profile_id;
+ v |= info->hash_mc_fid ? 0x8 : 0;
+ v |= info->hash_uc_fid ? 0x10 : 0;
+ v |= ((u32)info->fid) << 5;
+ sw_w32(v, rtl_table_data(r, 1));
+
+ rtl_table_write(r, vlan);
+ rtl_table_release(r);
+}
+
+static void rtl838x_vlan_set_untagged(u32 vlan, u64 portmask)
+{
+ // Access UNTAG table (0) via register 1
+ struct table_reg *r = rtl_table_get(RTL8380_TBL_1, 0);
+
+ sw_w32(portmask & 0x1fffffff, rtl_table_data(r, 0));
+ rtl_table_write(r, vlan);
+ rtl_table_release(r);
+}
+
+/* Sets the L2 forwarding to be based on either the inner VLAN tag or the outer
+ */
+static void rtl838x_vlan_fwd_on_inner(int port, bool is_set)
+{
+ if (is_set)
+ sw_w32_mask(BIT(port), 0, RTL838X_VLAN_PORT_FWD);
+ else
+ sw_w32_mask(0, BIT(port), RTL838X_VLAN_PORT_FWD);
+}
+
+static u64 rtl838x_l2_hash_seed(u64 mac, u32 vid)
+{
+ return mac << 12 | vid;
+}
+
+/*
+ * Applies the same hash algorithm as the one used currently by the ASIC to the seed
+ * and returns a key into the L2 hash table
+ */
+static u32 rtl838x_l2_hash_key(struct rtl838x_switch_priv *priv, u64 seed)
+{
+ u32 h1, h2, h3, h;
+
+ if (sw_r32(priv->r->l2_ctrl_0) & 1) {
+ h1 = (seed >> 11) & 0x7ff;
+ h1 = ((h1 & 0x1f) << 6) | ((h1 >> 5) & 0x3f);
+
+ h2 = (seed >> 33) & 0x7ff;
+ h2 = ((h2 & 0x3f) << 5) | ((h2 >> 6) & 0x1f);
+
+ h3 = (seed >> 44) & 0x7ff;
+ h3 = ((h3 & 0x7f) << 4) | ((h3 >> 7) & 0xf);
+
+ h = h1 ^ h2 ^ h3 ^ ((seed >> 55) & 0x1ff);
+ h ^= ((seed >> 22) & 0x7ff) ^ (seed & 0x7ff);
+ } else {
+ h = ((seed >> 55) & 0x1ff) ^ ((seed >> 44) & 0x7ff)
+ ^ ((seed >> 33) & 0x7ff) ^ ((seed >> 22) & 0x7ff)
+ ^ ((seed >> 11) & 0x7ff) ^ (seed & 0x7ff);
+ }
+
+ return h;
+}
+
+static inline int rtl838x_mac_force_mode_ctrl(int p)
+{
+ return RTL838X_MAC_FORCE_MODE_CTRL + (p << 2);
+}
+
+static inline int rtl838x_mac_port_ctrl(int p)
+{
+ return RTL838X_MAC_PORT_CTRL(p);
+}
+
+static inline int rtl838x_l2_port_new_salrn(int p)
+{
+ return RTL838X_L2_PORT_NEW_SALRN(p);
+}
+
+static inline int rtl838x_l2_port_new_sa_fwd(int p)
+{
+ return RTL838X_L2_PORT_NEW_SA_FWD(p);
+}
+
+static inline int rtl838x_mac_link_spd_sts(int p)
+{
+ return RTL838X_MAC_LINK_SPD_STS(p);
+}
+
+inline static int rtl838x_trk_mbr_ctr(int group)
+{
+ return RTL838X_TRK_MBR_CTR + (group << 2);
+}
+
+/*
+ * Fills an L2 entry structure from the SoC registers
+ */
+static void rtl838x_fill_l2_entry(u32 r[], struct rtl838x_l2_entry *e)
+{
+ /* Table contains different entry types, we need to identify the right one:
+ * Check for MC entries, first
+ * In contrast to the RTL93xx SoCs, there is no valid bit, use heuristics to
+ * identify valid entries
+ */
+ e->is_ip_mc = !!(r[0] & BIT(22));
+ e->is_ipv6_mc = !!(r[0] & BIT(21));
+ e->type = L2_INVALID;
+
+ if (!e->is_ip_mc && !e->is_ipv6_mc) {
+ e->mac[0] = (r[1] >> 20);
+ e->mac[1] = (r[1] >> 12);
+ e->mac[2] = (r[1] >> 4);
+ e->mac[3] = (r[1] & 0xf) << 4 | (r[2] >> 28);
+ e->mac[4] = (r[2] >> 20);
+ e->mac[5] = (r[2] >> 12);
+
+ e->rvid = r[2] & 0xfff;
+ e->vid = r[0] & 0xfff;
+
+ /* Is it a unicast entry? check multicast bit */
+ if (!(e->mac[0] & 1)) {
+ e->is_static = !!((r[0] >> 19) & 1);
+ e->port = (r[0] >> 12) & 0x1f;
+ e->block_da = !!(r[1] & BIT(30));
+ e->block_sa = !!(r[1] & BIT(31));
+ e->suspended = !!(r[1] & BIT(29));
+ e->next_hop = !!(r[1] & BIT(28));
+ if (e->next_hop) {
+ pr_debug("Found next hop entry, need to read extra data\n");
+ e->nh_vlan_target = !!(r[0] & BIT(9));
+ e->nh_route_id = r[0] & 0x1ff;
+ e->vid = e->rvid;
+ }
+ e->age = (r[0] >> 17) & 0x3;
+ e->valid = true;
+
+ /* A valid entry has one of mutli-cast, aging, sa/da-blocking,
+ * next-hop or static entry bit set */
+ if (!(r[0] & 0x007c0000) && !(r[1] & 0xd0000000))
+ e->valid = false;
+ else
+ e->type = L2_UNICAST;
+ } else { // L2 multicast
+ pr_debug("Got L2 MC entry: %08x %08x %08x\n", r[0], r[1], r[2]);
+ e->valid = true;
+ e->type = L2_MULTICAST;
+ e->mc_portmask_index = (r[0] >> 12) & 0x1ff;
+ }
+ } else { // IPv4 and IPv6 multicast
+ e->valid = true;
+ e->mc_portmask_index = (r[0] >> 12) & 0x1ff;
+ e->mc_gip = (r[1] << 20) | (r[2] >> 12);
+ e->rvid = r[2] & 0xfff;
+ }
+ if (e->is_ip_mc)
+ e->type = IP4_MULTICAST;
+ if (e->is_ipv6_mc)
+ e->type = IP6_MULTICAST;
+}
+
+/*
+ * Fills the 3 SoC table registers r[] with the information of in the rtl838x_l2_entry
+ */
+static void rtl838x_fill_l2_row(u32 r[], struct rtl838x_l2_entry *e)
+{
+ u64 mac = ether_addr_to_u64(e->mac);
+
+ if (!e->valid) {
+ r[0] = r[1] = r[2] = 0;
+ return;
+ }
+
+ r[0] = e->is_ip_mc ? BIT(22) : 0;
+ r[0] |= e->is_ipv6_mc ? BIT(21) : 0;
+
+ if (!e->is_ip_mc && !e->is_ipv6_mc) {
+ r[1] = mac >> 20;
+ r[2] = (mac & 0xfffff) << 12;
+
+ /* Is it a unicast entry? check multicast bit */
+ if (!(e->mac[0] & 1)) {
+ r[0] |= e->is_static ? BIT(19) : 0;
+ r[0] |= (e->port & 0x3f) << 12;
+ r[0] |= e->vid;
+ r[1] |= e->block_da ? BIT(30) : 0;
+ r[1] |= e->block_sa ? BIT(31) : 0;
+ r[1] |= e->suspended ? BIT(29) : 0;
+ r[2] |= e->rvid & 0xfff;
+ if (e->next_hop) {
+ r[1] |= BIT(28);
+ r[0] |= e->nh_vlan_target ? BIT(9) : 0;
+ r[0] |= e->nh_route_id & 0x1ff;
+ }
+ r[0] |= (e->age & 0x3) << 17;
+ } else { // L2 Multicast
+ r[0] |= (e->mc_portmask_index & 0x1ff) << 12;
+ r[2] |= e->rvid & 0xfff;
+ r[0] |= e->vid & 0xfff;
+ pr_debug("FILL MC: %08x %08x %08x\n", r[0], r[1], r[2]);
+ }
+ } else { // IPv4 and IPv6 multicast
+ r[0] |= (e->mc_portmask_index & 0x1ff) << 12;
+ r[1] = e->mc_gip >> 20;
+ r[2] = e->mc_gip << 12;
+ r[2] |= e->rvid;
+ }
+}
+
+/*
+ * Read an L2 UC or MC entry out of a hash bucket of the L2 forwarding table
+ * hash is the id of the bucket and pos is the position of the entry in that bucket
+ * The data read from the SoC is filled into rtl838x_l2_entry
+ */
+static u64 rtl838x_read_l2_entry_using_hash(u32 hash, u32 pos, struct rtl838x_l2_entry *e)
+{
+ u64 entry;
+ u32 r[3];
+ struct table_reg *q = rtl_table_get(RTL8380_TBL_L2, 0); // Access L2 Table 0
+ u32 idx = (0 << 14) | (hash << 2) | pos; // Search SRAM, with hash and at pos in bucket
+ int i;
+
+ rtl_table_read(q, idx);
+ for (i= 0; i < 3; i++)
+ r[i] = sw_r32(rtl_table_data(q, i));
+
+ rtl_table_release(q);
+
+ rtl838x_fill_l2_entry(r, e);
+ if (!e->valid)
+ return 0;
+
+ entry = (((u64) r[1]) << 32) | (r[2]); // mac and vid concatenated as hash seed
+ return entry;
+}
+
+static void rtl838x_write_l2_entry_using_hash(u32 hash, u32 pos, struct rtl838x_l2_entry *e)
+{
+ u32 r[3];
+ struct table_reg *q = rtl_table_get(RTL8380_TBL_L2, 0);
+ int i;
+
+ u32 idx = (0 << 14) | (hash << 2) | pos; // Access SRAM, with hash and at pos in bucket
+
+ rtl838x_fill_l2_row(r, e);
+
+ for (i= 0; i < 3; i++)
+ sw_w32(r[i], rtl_table_data(q, i));
+
+ rtl_table_write(q, idx);
+ rtl_table_release(q);
+}
+
+static u64 rtl838x_read_cam(int idx, struct rtl838x_l2_entry *e)
+{
+ u64 entry;
+ u32 r[3];
+ struct table_reg *q = rtl_table_get(RTL8380_TBL_L2, 1); // Access L2 Table 1
+ int i;
+
+ rtl_table_read(q, idx);
+ for (i= 0; i < 3; i++)
+ r[i] = sw_r32(rtl_table_data(q, i));
+
+ rtl_table_release(q);
+
+ rtl838x_fill_l2_entry(r, e);
+ if (!e->valid)
+ return 0;
+
+ pr_debug("Found in CAM: R1 %x R2 %x R3 %x\n", r[0], r[1], r[2]);
+
+ // Return MAC with concatenated VID ac concatenated ID
+ entry = (((u64) r[1]) << 32) | r[2];
+ return entry;
+}
+
+static void rtl838x_write_cam(int idx, struct rtl838x_l2_entry *e)
+{
+ u32 r[3];
+ struct table_reg *q = rtl_table_get(RTL8380_TBL_L2, 1); // Access L2 Table 1
+ int i;
+
+ rtl838x_fill_l2_row(r, e);
+
+ for (i= 0; i < 3; i++)
+ sw_w32(r[i], rtl_table_data(q, i));
+
+ rtl_table_write(q, idx);
+ rtl_table_release(q);
+}
+
+static u64 rtl838x_read_mcast_pmask(int idx)
+{
+ u32 portmask;
+ // Read MC_PMSK (2) via register RTL8380_TBL_L2
+ struct table_reg *q = rtl_table_get(RTL8380_TBL_L2, 2);
+
+ rtl_table_read(q, idx);
+ portmask = sw_r32(rtl_table_data(q, 0));
+ rtl_table_release(q);
+
+ return portmask;
+}
+
+static void rtl838x_write_mcast_pmask(int idx, u64 portmask)
+{
+ // Access MC_PMSK (2) via register RTL8380_TBL_L2
+ struct table_reg *q = rtl_table_get(RTL8380_TBL_L2, 2);
+
+ sw_w32(((u32)portmask) & 0x1fffffff, rtl_table_data(q, 0));
+ rtl_table_write(q, idx);
+ rtl_table_release(q);
+}
+
+static void rtl838x_vlan_profile_setup(int profile)
+{
+ u32 pmask_id = UNKNOWN_MC_PMASK;
+ // Enable L2 Learning BIT 0, portmask UNKNOWN_MC_PMASK for unknown MC traffic flooding
+ u32 p = 1 | pmask_id << 1 | pmask_id << 10 | pmask_id << 19;
+
+ sw_w32(p, RTL838X_VLAN_PROFILE(profile));
+
+ /* RTL8380 and RTL8390 use an index into the portmask table to set the
+ * unknown multicast portmask, setup a default at a safe location
+ * On RTL93XX, the portmask is directly set in the profile,
+ * see e.g. rtl9300_vlan_profile_setup
+ */
+ rtl838x_write_mcast_pmask(UNKNOWN_MC_PMASK, 0x1fffffff);
+}
+
+static void rtl838x_l2_learning_setup(void)
+{
+ /* Set portmask for broadcast traffic and unknown unicast address flooding
+ * to the reserved entry in the portmask table used also for
+ * multicast flooding */
+ sw_w32(UNKNOWN_MC_PMASK << 12 | UNKNOWN_MC_PMASK, RTL838X_L2_FLD_PMSK);
+
+ /* Enable learning constraint system-wide (bit 0), per-port (bit 1)
+ * and per vlan (bit 2) */
+ sw_w32(0x7, RTL838X_L2_LRN_CONSTRT_EN);
+
+ // Limit learning to maximum: 16k entries, after that just flood (bits 0-1)
+ sw_w32((0x3fff << 2) | 0, RTL838X_L2_LRN_CONSTRT);
+
+ // Do not trap ARP packets to CPU_PORT
+ sw_w32(0, RTL838X_SPCL_TRAP_ARP_CTRL);
+}
+
+static void rtl838x_enable_learning(int port, bool enable)
+{
+ // Limit learning to maximum: 16k entries
+
+ sw_w32_mask(0x3fff << 2, enable ? (0x3fff << 2) : 0,
+ RTL838X_L2_PORT_LRN_CONSTRT + (port << 2));
+}
+
+static void rtl838x_enable_flood(int port, bool enable)
+{
+ /*
+ * 0: Forward
+ * 1: Disable
+ * 2: to CPU
+ * 3: Copy to CPU
+ */
+ sw_w32_mask(0x3, enable ? 0 : 1,
+ RTL838X_L2_PORT_LRN_CONSTRT + (port << 2));
+}
+
+static void rtl838x_enable_mcast_flood(int port, bool enable)
+{
+
+}
+
+static void rtl838x_enable_bcast_flood(int port, bool enable)
+{
+
+}
+
+static void rtl838x_stp_get(struct rtl838x_switch_priv *priv, u16 msti, u32 port_state[])
+{
+ int i;
+ u32 cmd = 1 << 15 /* Execute cmd */
+ | 1 << 14 /* Read */
+ | 2 << 12 /* Table type 0b10 */
+ | (msti & 0xfff);
+ priv->r->exec_tbl0_cmd(cmd);
+
+ for (i = 0; i < 2; i++)
+ port_state[i] = sw_r32(priv->r->tbl_access_data_0(i));
+}
+
+static void rtl838x_stp_set(struct rtl838x_switch_priv *priv, u16 msti, u32 port_state[])
+{
+ int i;
+ u32 cmd = 1 << 15 /* Execute cmd */
+ | 0 << 14 /* Write */
+ | 2 << 12 /* Table type 0b10 */
+ | (msti & 0xfff);
+
+ for (i = 0; i < 2; i++)
+ sw_w32(port_state[i], priv->r->tbl_access_data_0(i));
+ priv->r->exec_tbl0_cmd(cmd);
+}
+
+u64 rtl838x_traffic_get(int source)
+{
+ return rtl838x_get_port_reg(rtl838x_port_iso_ctrl(source));
+}
+
+void rtl838x_traffic_set(int source, u64 dest_matrix)
+{
+ rtl838x_set_port_reg(dest_matrix, rtl838x_port_iso_ctrl(source));
+}
+
+void rtl838x_traffic_enable(int source, int dest)
+{
+ rtl838x_mask_port_reg(0, BIT(dest), rtl838x_port_iso_ctrl(source));
+}
+
+void rtl838x_traffic_disable(int source, int dest)
+{
+ rtl838x_mask_port_reg(BIT(dest), 0, rtl838x_port_iso_ctrl(source));
+}
+
+/*
+ * Enables or disables the EEE/EEEP capability of a port
+ */
+static void rtl838x_port_eee_set(struct rtl838x_switch_priv *priv, int port, bool enable)
+{
+ u32 v;
+
+ // This works only for Ethernet ports, and on the RTL838X, ports from 24 are SFP
+ if (port >= 24)
+ return;
+
+ pr_debug("In %s: setting port %d to %d\n", __func__, port, enable);
+ v = enable ? 0x3 : 0x0;
+
+ // Set EEE state for 100 (bit 9) & 1000MBit (bit 10)
+ sw_w32_mask(0x3 << 9, v << 9, priv->r->mac_force_mode_ctrl(port));
+
+ // Set TX/RX EEE state
+ if (enable) {
+ sw_w32_mask(0, BIT(port), RTL838X_EEE_PORT_TX_EN);
+ sw_w32_mask(0, BIT(port), RTL838X_EEE_PORT_RX_EN);
+ } else {
+ sw_w32_mask(BIT(port), 0, RTL838X_EEE_PORT_TX_EN);
+ sw_w32_mask(BIT(port), 0, RTL838X_EEE_PORT_RX_EN);
+ }
+ priv->ports[port].eee_enabled = enable;
+}
+
+
+/*
+ * Get EEE own capabilities and negotiation result
+ */
+static int rtl838x_eee_port_ability(struct rtl838x_switch_priv *priv,
+ struct ethtool_eee *e, int port)
+{
+ u64 link;
+
+ if (port >= 24)
+ return 0;
+
+ link = rtl839x_get_port_reg_le(RTL838X_MAC_LINK_STS);
+ if (!(link & BIT(port)))
+ return 0;
+
+ if (sw_r32(rtl838x_mac_force_mode_ctrl(port)) & BIT(9))
+ e->advertised |= ADVERTISED_100baseT_Full;
+
+ if (sw_r32(rtl838x_mac_force_mode_ctrl(port)) & BIT(10))
+ e->advertised |= ADVERTISED_1000baseT_Full;
+
+ if (sw_r32(RTL838X_MAC_EEE_ABLTY) & BIT(port)) {
+ e->lp_advertised = ADVERTISED_100baseT_Full;
+ e->lp_advertised |= ADVERTISED_1000baseT_Full;
+ return 1;
+ }
+
+ return 0;
+}
+
+static void rtl838x_init_eee(struct rtl838x_switch_priv *priv, bool enable)
+{
+ int i;
+
+ pr_info("Setting up EEE, state: %d\n", enable);
+ sw_w32_mask(0x4, 0, RTL838X_SMI_GLB_CTRL);
+
+ /* Set timers for EEE */
+ sw_w32(0x5001411, RTL838X_EEE_TX_TIMER_GIGA_CTRL);
+ sw_w32(0x5001417, RTL838X_EEE_TX_TIMER_GELITE_CTRL);
+
+ // Enable EEE MAC support on ports
+ for (i = 0; i < priv->cpu_port; i++) {
+ if (priv->ports[i].phy)
+ rtl838x_port_eee_set(priv, i, enable);
+ }
+ priv->eee_enabled = enable;
+}
+
+static void rtl838x_pie_lookup_enable(struct rtl838x_switch_priv *priv, int index)
+{
+ int block = index / PIE_BLOCK_SIZE;
+ u32 block_state = sw_r32(RTL838X_ACL_BLK_LOOKUP_CTRL);
+
+ // Make sure rule-lookup is enabled in the block
+ if (!(block_state & BIT(block)))
+ sw_w32(block_state | BIT(block), RTL838X_ACL_BLK_LOOKUP_CTRL);
+}
+
+static void rtl838x_pie_rule_del(struct rtl838x_switch_priv *priv, int index_from, int index_to)
+{
+ int block_from = index_from / PIE_BLOCK_SIZE;
+ int block_to = index_to / PIE_BLOCK_SIZE;
+ u32 v = (index_from << 1)| (index_to << 12 ) | BIT(0);
+ int block;
+ u32 block_state;
+
+ pr_debug("%s: from %d to %d\n", __func__, index_from, index_to);
+ mutex_lock(&priv->reg_mutex);
+
+ // Remember currently active blocks
+ block_state = sw_r32(RTL838X_ACL_BLK_LOOKUP_CTRL);
+
+ // Make sure rule-lookup is disabled in the relevant blocks
+ for (block = block_from; block <= block_to; block++) {
+ if (block_state & BIT(block))
+ sw_w32(block_state & (~BIT(block)), RTL838X_ACL_BLK_LOOKUP_CTRL);
+ }
+
+ // Write from-to and execute bit into control register
+ sw_w32(v, RTL838X_ACL_CLR_CTRL);
+
+ // Wait until command has completed
+ do {
+ } while (sw_r32(RTL838X_ACL_CLR_CTRL) & BIT(0));
+
+ // Re-enable rule lookup
+ for (block = block_from; block <= block_to; block++) {
+ if (!(block_state & BIT(block)))
+ sw_w32(block_state | BIT(block), RTL838X_ACL_BLK_LOOKUP_CTRL);
+ }
+
+ mutex_unlock(&priv->reg_mutex);
+}
+
+/*
+ * Reads the intermediate representation of the templated match-fields of the
+ * PIE rule in the pie_rule structure and fills in the raw data fields in the
+ * raw register space r[].
+ * The register space configuration size is identical for the RTL8380/90 and RTL9300,
+ * however the RTL9310 has 2 more registers / fields and the physical field-ids
+ * are specific to every platform.
+ */
+static void rtl838x_write_pie_templated(u32 r[], struct pie_rule *pr, enum template_field_id t[])
+{
+ int i;
+ enum template_field_id field_type;
+ u16 data, data_m;
+
+ for (i = 0; i < N_FIXED_FIELDS; i++) {
+ field_type = t[i];
+ data = data_m = 0;
+
+ switch (field_type) {
+ case TEMPLATE_FIELD_SPM0:
+ data = pr->spm;
+ data_m = pr->spm_m;
+ break;
+ case TEMPLATE_FIELD_SPM1:
+ data = pr->spm >> 16;
+ data_m = pr->spm_m >> 16;
+ break;
+ case TEMPLATE_FIELD_OTAG:
+ data = pr->otag;
+ data_m = pr->otag_m;
+ break;
+ case TEMPLATE_FIELD_SMAC0:
+ data = pr->smac[4];
+ data = (data << 8) | pr->smac[5];
+ data_m = pr->smac_m[4];
+ data_m = (data_m << 8) | pr->smac_m[5];
+ break;
+ case TEMPLATE_FIELD_SMAC1:
+ data = pr->smac[2];
+ data = (data << 8) | pr->smac[3];
+ data_m = pr->smac_m[2];
+ data_m = (data_m << 8) | pr->smac_m[3];
+ break;
+ case TEMPLATE_FIELD_SMAC2:
+ data = pr->smac[0];
+ data = (data << 8) | pr->smac[1];
+ data_m = pr->smac_m[0];
+ data_m = (data_m << 8) | pr->smac_m[1];
+ break;
+ case TEMPLATE_FIELD_DMAC0:
+ data = pr->dmac[4];
+ data = (data << 8) | pr->dmac[5];
+ data_m = pr->dmac_m[4];
+ data_m = (data_m << 8) | pr->dmac_m[5];
+ break;
+ case TEMPLATE_FIELD_DMAC1:
+ data = pr->dmac[2];
+ data = (data << 8) | pr->dmac[3];
+ data_m = pr->dmac_m[2];
+ data_m = (data_m << 8) | pr->dmac_m[3];
+ break;
+ case TEMPLATE_FIELD_DMAC2:
+ data = pr->dmac[0];
+ data = (data << 8) | pr->dmac[1];
+ data_m = pr->dmac_m[0];
+ data_m = (data_m << 8) | pr->dmac_m[1];
+ break;
+ case TEMPLATE_FIELD_ETHERTYPE:
+ data = pr->ethertype;
+ data_m = pr->ethertype_m;
+ break;
+ case TEMPLATE_FIELD_ITAG:
+ data = pr->itag;
+ data_m = pr->itag_m;
+ break;
+ case TEMPLATE_FIELD_RANGE_CHK:
+ data = pr->field_range_check;
+ data_m = pr->field_range_check_m;
+ break;
+ case TEMPLATE_FIELD_SIP0:
+ if (pr->is_ipv6) {
+ data = pr->sip6.s6_addr16[7];
+ data_m = pr->sip6_m.s6_addr16[7];
+ } else {
+ data = pr->sip;
+ data_m = pr->sip_m;
+ }
+ break;
+ case TEMPLATE_FIELD_SIP1:
+ if (pr->is_ipv6) {
+ data = pr->sip6.s6_addr16[6];
+ data_m = pr->sip6_m.s6_addr16[6];
+ } else {
+ data = pr->sip >> 16;
+ data_m = pr->sip_m >> 16;
+ }
+ break;
+
+ case TEMPLATE_FIELD_SIP2:
+ case TEMPLATE_FIELD_SIP3:
+ case TEMPLATE_FIELD_SIP4:
+ case TEMPLATE_FIELD_SIP5:
+ case TEMPLATE_FIELD_SIP6:
+ case TEMPLATE_FIELD_SIP7:
+ data = pr->sip6.s6_addr16[5 - (field_type - TEMPLATE_FIELD_SIP2)];
+ data_m = pr->sip6_m.s6_addr16[5 - (field_type - TEMPLATE_FIELD_SIP2)];
+ break;
+
+ case TEMPLATE_FIELD_DIP0:
+ if (pr->is_ipv6) {
+ data = pr->dip6.s6_addr16[7];
+ data_m = pr->dip6_m.s6_addr16[7];
+ } else {
+ data = pr->dip;
+ data_m = pr->dip_m;
+ }
+ break;
+
+ case TEMPLATE_FIELD_DIP1:
+ if (pr->is_ipv6) {
+ data = pr->dip6.s6_addr16[6];
+ data_m = pr->dip6_m.s6_addr16[6];
+ } else {
+ data = pr->dip >> 16;
+ data_m = pr->dip_m >> 16;
+ }
+ break;
+
+ case TEMPLATE_FIELD_DIP2:
+ case TEMPLATE_FIELD_DIP3:
+ case TEMPLATE_FIELD_DIP4:
+ case TEMPLATE_FIELD_DIP5:
+ case TEMPLATE_FIELD_DIP6:
+ case TEMPLATE_FIELD_DIP7:
+ data = pr->dip6.s6_addr16[5 - (field_type - TEMPLATE_FIELD_DIP2)];
+ data_m = pr->dip6_m.s6_addr16[5 - (field_type - TEMPLATE_FIELD_DIP2)];
+ break;
+
+ case TEMPLATE_FIELD_IP_TOS_PROTO:
+ data = pr->tos_proto;
+ data_m = pr->tos_proto_m;
+ break;
+ case TEMPLATE_FIELD_L4_SPORT:
+ data = pr->sport;
+ data_m = pr->sport_m;
+ break;
+ case TEMPLATE_FIELD_L4_DPORT:
+ data = pr->dport;
+ data_m = pr->dport_m;
+ break;
+ case TEMPLATE_FIELD_ICMP_IGMP:
+ data = pr->icmp_igmp;
+ data_m = pr->icmp_igmp_m;
+ break;
+ default:
+ pr_info("%s: unknown field %d\n", __func__, field_type);
+ continue;
+ }
+ if (!(i % 2)) {
+ r[5 - i / 2] = data;
+ r[12 - i / 2] = data_m;
+ } else {
+ r[5 - i / 2] |= ((u32)data) << 16;
+ r[12 - i / 2] |= ((u32)data_m) << 16;
+ }
+ }
+}
+
+/*
+ * Creates the intermediate representation of the templated match-fields of the
+ * PIE rule in the pie_rule structure by reading the raw data fields in the
+ * raw register space r[].
+ * The register space configuration size is identical for the RTL8380/90 and RTL9300,
+ * however the RTL9310 has 2 more registers / fields and the physical field-ids
+ */
+static void rtl838x_read_pie_templated(u32 r[], struct pie_rule *pr, enum template_field_id t[])
+{
+ int i;
+ enum template_field_id field_type;
+ u16 data, data_m;
+
+ for (i = 0; i < N_FIXED_FIELDS; i++) {
+ field_type = t[i];
+ if (!(i % 2)) {
+ data = r[5 - i / 2];
+ data_m = r[12 - i / 2];
+ } else {
+ data = r[5 - i / 2] >> 16;
+ data_m = r[12 - i / 2] >> 16;
+ }
+
+ switch (field_type) {
+ case TEMPLATE_FIELD_SPM0:
+ pr->spm = (pr->spn << 16) | data;
+ pr->spm_m = (pr->spn << 16) | data_m;
+ break;
+ case TEMPLATE_FIELD_SPM1:
+ pr->spm = data;
+ pr->spm_m = data_m;
+ break;
+ case TEMPLATE_FIELD_OTAG:
+ pr->otag = data;
+ pr->otag_m = data_m;
+ break;
+ case TEMPLATE_FIELD_SMAC0:
+ pr->smac[4] = data >> 8;
+ pr->smac[5] = data;
+ pr->smac_m[4] = data >> 8;
+ pr->smac_m[5] = data;
+ break;
+ case TEMPLATE_FIELD_SMAC1:
+ pr->smac[2] = data >> 8;
+ pr->smac[3] = data;
+ pr->smac_m[2] = data >> 8;
+ pr->smac_m[3] = data;
+ break;
+ case TEMPLATE_FIELD_SMAC2:
+ pr->smac[0] = data >> 8;
+ pr->smac[1] = data;
+ pr->smac_m[0] = data >> 8;
+ pr->smac_m[1] = data;
+ break;
+ case TEMPLATE_FIELD_DMAC0:
+ pr->dmac[4] = data >> 8;
+ pr->dmac[5] = data;
+ pr->dmac_m[4] = data >> 8;
+ pr->dmac_m[5] = data;
+ break;
+ case TEMPLATE_FIELD_DMAC1:
+ pr->dmac[2] = data >> 8;
+ pr->dmac[3] = data;
+ pr->dmac_m[2] = data >> 8;
+ pr->dmac_m[3] = data;
+ break;
+ case TEMPLATE_FIELD_DMAC2:
+ pr->dmac[0] = data >> 8;
+ pr->dmac[1] = data;
+ pr->dmac_m[0] = data >> 8;
+ pr->dmac_m[1] = data;
+ break;
+ case TEMPLATE_FIELD_ETHERTYPE:
+ pr->ethertype = data;
+ pr->ethertype_m = data_m;
+ break;
+ case TEMPLATE_FIELD_ITAG:
+ pr->itag = data;
+ pr->itag_m = data_m;
+ break;
+ case TEMPLATE_FIELD_RANGE_CHK:
+ pr->field_range_check = data;
+ pr->field_range_check_m = data_m;
+ break;
+ case TEMPLATE_FIELD_SIP0:
+ pr->sip = data;
+ pr->sip_m = data_m;
+ break;
+ case TEMPLATE_FIELD_SIP1:
+ pr->sip = (pr->sip << 16) | data;
+ pr->sip_m = (pr->sip << 16) | data_m;
+ break;
+ case TEMPLATE_FIELD_SIP2:
+ pr->is_ipv6 = true;
+ // Make use of limitiations on the position of the match values
+ ipv6_addr_set(&pr->sip6, pr->sip, r[5 - i / 2],
+ r[4 - i / 2], r[3 - i / 2]);
+ ipv6_addr_set(&pr->sip6_m, pr->sip_m, r[5 - i / 2],
+ r[4 - i / 2], r[3 - i / 2]);
+ case TEMPLATE_FIELD_SIP3:
+ case TEMPLATE_FIELD_SIP4:
+ case TEMPLATE_FIELD_SIP5:
+ case TEMPLATE_FIELD_SIP6:
+ case TEMPLATE_FIELD_SIP7:
+ break;
+
+ case TEMPLATE_FIELD_DIP0:
+ pr->dip = data;
+ pr->dip_m = data_m;
+ break;
+ case TEMPLATE_FIELD_DIP1:
+ pr->dip = (pr->dip << 16) | data;
+ pr->dip_m = (pr->dip << 16) | data_m;
+ break;
+ case TEMPLATE_FIELD_DIP2:
+ pr->is_ipv6 = true;
+ ipv6_addr_set(&pr->dip6, pr->dip, r[5 - i / 2],
+ r[4 - i / 2], r[3 - i / 2]);
+ ipv6_addr_set(&pr->dip6_m, pr->dip_m, r[5 - i / 2],
+ r[4 - i / 2], r[3 - i / 2]);
+ case TEMPLATE_FIELD_DIP3:
+ case TEMPLATE_FIELD_DIP4:
+ case TEMPLATE_FIELD_DIP5:
+ case TEMPLATE_FIELD_DIP6:
+ case TEMPLATE_FIELD_DIP7:
+ break;
+ case TEMPLATE_FIELD_IP_TOS_PROTO:
+ pr->tos_proto = data;
+ pr->tos_proto_m = data_m;
+ break;
+ case TEMPLATE_FIELD_L4_SPORT:
+ pr->sport = data;
+ pr->sport_m = data_m;
+ break;
+ case TEMPLATE_FIELD_L4_DPORT:
+ pr->dport = data;
+ pr->dport_m = data_m;
+ break;
+ case TEMPLATE_FIELD_ICMP_IGMP:
+ pr->icmp_igmp = data;
+ pr->icmp_igmp_m = data_m;
+ break;
+ default:
+ pr_info("%s: unknown field %d\n", __func__, field_type);
+ }
+ }
+}
+
+static void rtl838x_read_pie_fixed_fields(u32 r[], struct pie_rule *pr)
+{
+ pr->spmmask_fix = (r[6] >> 22) & 0x3;
+ pr->spn = (r[6] >> 16) & 0x3f;
+ pr->mgnt_vlan = (r[6] >> 15) & 1;
+ pr->dmac_hit_sw = (r[6] >> 14) & 1;
+ pr->not_first_frag = (r[6] >> 13) & 1;
+ pr->frame_type_l4 = (r[6] >> 10) & 7;
+ pr->frame_type = (r[6] >> 8) & 3;
+ pr->otag_fmt = (r[6] >> 7) & 1;
+ pr->itag_fmt = (r[6] >> 6) & 1;
+ pr->otag_exist = (r[6] >> 5) & 1;
+ pr->itag_exist = (r[6] >> 4) & 1;
+ pr->frame_type_l2 = (r[6] >> 2) & 3;
+ pr->tid = r[6] & 3;
+
+ pr->spmmask_fix_m = (r[13] >> 22) & 0x3;
+ pr->spn_m = (r[13] >> 16) & 0x3f;
+ pr->mgnt_vlan_m = (r[13] >> 15) & 1;
+ pr->dmac_hit_sw_m = (r[13] >> 14) & 1;
+ pr->not_first_frag_m = (r[13] >> 13) & 1;
+ pr->frame_type_l4_m = (r[13] >> 10) & 7;
+ pr->frame_type_m = (r[13] >> 8) & 3;
+ pr->otag_fmt_m = (r[13] >> 7) & 1;
+ pr->itag_fmt_m = (r[13] >> 6) & 1;
+ pr->otag_exist_m = (r[13] >> 5) & 1;
+ pr->itag_exist_m = (r[13] >> 4) & 1;
+ pr->frame_type_l2_m = (r[13] >> 2) & 3;
+ pr->tid_m = r[13] & 3;
+
+ pr->valid = r[14] & BIT(31);
+ pr->cond_not = r[14] & BIT(30);
+ pr->cond_and1 = r[14] & BIT(29);
+ pr->cond_and2 = r[14] & BIT(28);
+ pr->ivalid = r[14] & BIT(27);
+
+ pr->drop = (r[17] >> 14) & 3;
+ pr->fwd_sel = r[17] & BIT(13);
+ pr->ovid_sel = r[17] & BIT(12);
+ pr->ivid_sel = r[17] & BIT(11);
+ pr->flt_sel = r[17] & BIT(10);
+ pr->log_sel = r[17] & BIT(9);
+ pr->rmk_sel = r[17] & BIT(8);
+ pr->meter_sel = r[17] & BIT(7);
+ pr->tagst_sel = r[17] & BIT(6);
+ pr->mir_sel = r[17] & BIT(5);
+ pr->nopri_sel = r[17] & BIT(4);
+ pr->cpupri_sel = r[17] & BIT(3);
+ pr->otpid_sel = r[17] & BIT(2);
+ pr->itpid_sel = r[17] & BIT(1);
+ pr->shaper_sel = r[17] & BIT(0);
+}
+
+static void rtl838x_write_pie_fixed_fields(u32 r[], struct pie_rule *pr)
+{
+ r[6] = ((u32) (pr->spmmask_fix & 0x3)) << 22;
+ r[6] |= ((u32) (pr->spn & 0x3f)) << 16;
+ r[6] |= pr->mgnt_vlan ? BIT(15) : 0;
+ r[6] |= pr->dmac_hit_sw ? BIT(14) : 0;
+ r[6] |= pr->not_first_frag ? BIT(13) : 0;
+ r[6] |= ((u32) (pr->frame_type_l4 & 0x7)) << 10;
+ r[6] |= ((u32) (pr->frame_type & 0x3)) << 8;
+ r[6] |= pr->otag_fmt ? BIT(7) : 0;
+ r[6] |= pr->itag_fmt ? BIT(6) : 0;
+ r[6] |= pr->otag_exist ? BIT(5) : 0;
+ r[6] |= pr->itag_exist ? BIT(4) : 0;
+ r[6] |= ((u32) (pr->frame_type_l2 & 0x3)) << 2;
+ r[6] |= ((u32) (pr->tid & 0x3));
+
+ r[13] = ((u32) (pr->spmmask_fix_m & 0x3)) << 22;
+ r[13] |= ((u32) (pr->spn_m & 0x3f)) << 16;
+ r[13] |= pr->mgnt_vlan_m ? BIT(15) : 0;
+ r[13] |= pr->dmac_hit_sw_m ? BIT(14) : 0;
+ r[13] |= pr->not_first_frag_m ? BIT(13) : 0;
+ r[13] |= ((u32) (pr->frame_type_l4_m & 0x7)) << 10;
+ r[13] |= ((u32) (pr->frame_type_m & 0x3)) << 8;
+ r[13] |= pr->otag_fmt_m ? BIT(7) : 0;
+ r[13] |= pr->itag_fmt_m ? BIT(6) : 0;
+ r[13] |= pr->otag_exist_m ? BIT(5) : 0;
+ r[13] |= pr->itag_exist_m ? BIT(4) : 0;
+ r[13] |= ((u32) (pr->frame_type_l2_m & 0x3)) << 2;
+ r[13] |= ((u32) (pr->tid_m & 0x3));
+
+ r[14] = pr->valid ? BIT(31) : 0;
+ r[14] |= pr->cond_not ? BIT(30) : 0;
+ r[14] |= pr->cond_and1 ? BIT(29) : 0;
+ r[14] |= pr->cond_and2 ? BIT(28) : 0;
+ r[14] |= pr->ivalid ? BIT(27) : 0;
+
+ if (pr->drop)
+ r[17] = 0x1 << 14; // Standard drop action
+ else
+ r[17] = 0;
+ r[17] |= pr->fwd_sel ? BIT(13) : 0;
+ r[17] |= pr->ovid_sel ? BIT(12) : 0;
+ r[17] |= pr->ivid_sel ? BIT(11) : 0;
+ r[17] |= pr->flt_sel ? BIT(10) : 0;
+ r[17] |= pr->log_sel ? BIT(9) : 0;
+ r[17] |= pr->rmk_sel ? BIT(8) : 0;
+ r[17] |= pr->meter_sel ? BIT(7) : 0;
+ r[17] |= pr->tagst_sel ? BIT(6) : 0;
+ r[17] |= pr->mir_sel ? BIT(5) : 0;
+ r[17] |= pr->nopri_sel ? BIT(4) : 0;
+ r[17] |= pr->cpupri_sel ? BIT(3) : 0;
+ r[17] |= pr->otpid_sel ? BIT(2) : 0;
+ r[17] |= pr->itpid_sel ? BIT(1) : 0;
+ r[17] |= pr->shaper_sel ? BIT(0) : 0;
+}
+
+static int rtl838x_write_pie_action(u32 r[], struct pie_rule *pr)
+{
+ u16 *aif = (u16 *)&r[17];
+ u16 data;
+ int fields_used = 0;
+
+ aif--;
+
+ pr_debug("%s, at %08x\n", __func__, (u32)aif);
+ /* Multiple actions can be linked to a match of a PIE rule,
+ * they have different precedence depending on their type and this precedence
+ * defines which Action Information Field (0-4) in the IACL table stores
+ * the additional data of the action (like e.g. the port number a packet is
+ * forwarded to) */
+ // TODO: count bits in selectors to limit to a maximum number of actions
+ if (pr->fwd_sel) { // Forwarding action
+ data = pr->fwd_act << 13;
+ data |= pr->fwd_data;
+ data |= pr->bypass_all ? BIT(12) : 0;
+ data |= pr->bypass_ibc_sc ? BIT(11) : 0;
+ data |= pr->bypass_igr_stp ? BIT(10) : 0;
+ *aif-- = data;
+ fields_used++;
+ }
+
+ if (pr->ovid_sel) { // Outer VID action
+ data = (pr->ovid_act & 0x3) << 12;
+ data |= pr->ovid_data;
+ *aif-- = data;
+ fields_used++;
+ }
+
+ if (pr->ivid_sel) { // Inner VID action
+ data = (pr->ivid_act & 0x3) << 12;
+ data |= pr->ivid_data;
+ *aif-- = data;
+ fields_used++;
+ }
+
+ if (pr->flt_sel) { // Filter action
+ *aif-- = pr->flt_data;
+ fields_used++;
+ }
+
+ if (pr->log_sel) { // Log action
+ if (fields_used >= 4)
+ return -1;
+ *aif-- = pr->log_data;
+ fields_used++;
+ }
+
+ if (pr->rmk_sel) { // Remark action
+ if (fields_used >= 4)
+ return -1;
+ *aif-- = pr->rmk_data;
+ fields_used++;
+ }
+
+ if (pr->meter_sel) { // Meter action
+ if (fields_used >= 4)
+ return -1;
+ *aif-- = pr->meter_data;
+ fields_used++;
+ }
+
+ if (pr->tagst_sel) { // Egress Tag Status action
+ if (fields_used >= 4)
+ return -1;
+ *aif-- = pr->tagst_data;
+ fields_used++;
+ }
+
+ if (pr->mir_sel) { // Mirror action
+ if (fields_used >= 4)
+ return -1;
+ *aif-- = pr->mir_data;
+ fields_used++;
+ }
+
+ if (pr->nopri_sel) { // Normal Priority action
+ if (fields_used >= 4)
+ return -1;
+ *aif-- = pr->nopri_data;
+ fields_used++;
+ }
+
+ if (pr->cpupri_sel) { // CPU Priority action
+ if (fields_used >= 4)
+ return -1;
+ *aif-- = pr->nopri_data;
+ fields_used++;
+ }
+
+ if (pr->otpid_sel) { // OTPID action
+ if (fields_used >= 4)
+ return -1;
+ *aif-- = pr->otpid_data;
+ fields_used++;
+ }
+
+ if (pr->itpid_sel) { // ITPID action
+ if (fields_used >= 4)
+ return -1;
+ *aif-- = pr->itpid_data;
+ fields_used++;
+ }
+
+ if (pr->shaper_sel) { // Traffic shaper action
+ if (fields_used >= 4)
+ return -1;
+ *aif-- = pr->shaper_data;
+ fields_used++;
+ }
+
+ return 0;
+}
+
+static void rtl838x_read_pie_action(u32 r[], struct pie_rule *pr)
+{
+ u16 *aif = (u16 *)&r[17];
+
+ aif--;
+
+ pr_debug("%s, at %08x\n", __func__, (u32)aif);
+ if (pr->drop)
+ pr_debug("%s: Action Drop: %d", __func__, pr->drop);
+
+ if (pr->fwd_sel){ // Forwarding action
+ pr->fwd_act = *aif >> 13;
+ pr->fwd_data = *aif--;
+ pr->bypass_all = pr->fwd_data & BIT(12);
+ pr->bypass_ibc_sc = pr->fwd_data & BIT(11);
+ pr->bypass_igr_stp = pr->fwd_data & BIT(10);
+ if (pr->bypass_all || pr->bypass_ibc_sc || pr->bypass_igr_stp)
+ pr->bypass_sel = true;
+ }
+ if (pr->ovid_sel) // Outer VID action
+ pr->ovid_data = *aif--;
+ if (pr->ivid_sel) // Inner VID action
+ pr->ivid_data = *aif--;
+ if (pr->flt_sel) // Filter action
+ pr->flt_data = *aif--;
+ if (pr->log_sel) // Log action
+ pr->log_data = *aif--;
+ if (pr->rmk_sel) // Remark action
+ pr->rmk_data = *aif--;
+ if (pr->meter_sel) // Meter action
+ pr->meter_data = *aif--;
+ if (pr->tagst_sel) // Egress Tag Status action
+ pr->tagst_data = *aif--;
+ if (pr->mir_sel) // Mirror action
+ pr->mir_data = *aif--;
+ if (pr->nopri_sel) // Normal Priority action
+ pr->nopri_data = *aif--;
+ if (pr->cpupri_sel) // CPU Priority action
+ pr->nopri_data = *aif--;
+ if (pr->otpid_sel) // OTPID action
+ pr->otpid_data = *aif--;
+ if (pr->itpid_sel) // ITPID action
+ pr->itpid_data = *aif--;
+ if (pr->shaper_sel) // Traffic shaper action
+ pr->shaper_data = *aif--;
+}
+
+static void rtl838x_pie_rule_dump_raw(u32 r[])
+{
+ pr_info("Raw IACL table entry:\n");
+ pr_info("Match : %08x %08x %08x %08x %08x %08x\n", r[0], r[1], r[2], r[3], r[4], r[5]);
+ pr_info("Fixed : %08x\n", r[6]);
+ pr_info("Match M: %08x %08x %08x %08x %08x %08x\n", r[7], r[8], r[9], r[10], r[11], r[12]);
+ pr_info("Fixed M: %08x\n", r[13]);
+ pr_info("AIF : %08x %08x %08x\n", r[14], r[15], r[16]);
+ pr_info("Sel : %08x\n", r[17]);
+}
+
+static void rtl838x_pie_rule_dump(struct pie_rule *pr)
+{
+ pr_info("Drop: %d, fwd: %d, ovid: %d, ivid: %d, flt: %d, log: %d, rmk: %d, meter: %d tagst: %d, mir: %d, nopri: %d, cpupri: %d, otpid: %d, itpid: %d, shape: %d\n",
+ pr->drop, pr->fwd_sel, pr->ovid_sel, pr->ivid_sel, pr->flt_sel, pr->log_sel, pr->rmk_sel, pr->log_sel, pr->tagst_sel, pr->mir_sel, pr->nopri_sel,
+ pr->cpupri_sel, pr->otpid_sel, pr->itpid_sel, pr->shaper_sel);
+ if (pr->fwd_sel)
+ pr_info("FWD: %08x\n", pr->fwd_data);
+ pr_info("TID: %x, %x\n", pr->tid, pr->tid_m);
+}
+
+static int rtl838x_pie_rule_read(struct rtl838x_switch_priv *priv, int idx, struct pie_rule *pr)
+{
+ // Read IACL table (1) via register 0
+ struct table_reg *q = rtl_table_get(RTL8380_TBL_0, 1);
+ u32 r[18];
+ int i;
+ int block = idx / PIE_BLOCK_SIZE;
+ u32 t_select = sw_r32(RTL838X_ACL_BLK_TMPLTE_CTRL(block));
+
+ memset(pr, 0, sizeof(*pr));
+ rtl_table_read(q, idx);
+ for (i = 0; i < 18; i++)
+ r[i] = sw_r32(rtl_table_data(q, i));
+
+ rtl_table_release(q);
+
+ rtl838x_read_pie_fixed_fields(r, pr);
+ if (!pr->valid)
+ return 0;
+
+ pr_info("%s: template_selectors %08x, tid: %d\n", __func__, t_select, pr->tid);
+ rtl838x_pie_rule_dump_raw(r);
+
+ rtl838x_read_pie_templated(r, pr, fixed_templates[(t_select >> (pr->tid * 3)) & 0x7]);
+
+ rtl838x_read_pie_action(r, pr);
+
+ return 0;
+}
+
+static int rtl838x_pie_rule_write(struct rtl838x_switch_priv *priv, int idx, struct pie_rule *pr)
+{
+ // Access IACL table (1) via register 0
+ struct table_reg *q = rtl_table_get(RTL8380_TBL_0, 1);
+ u32 r[18];
+ int i, err = 0;
+ int block = idx / PIE_BLOCK_SIZE;
+ u32 t_select = sw_r32(RTL838X_ACL_BLK_TMPLTE_CTRL(block));
+
+ pr_debug("%s: %d, t_select: %08x\n", __func__, idx, t_select);
+
+ for (i = 0; i < 18; i++)
+ r[i] = 0;
+
+ if (!pr->valid)
+ goto err_out;
+
+ rtl838x_write_pie_fixed_fields(r, pr);
+
+ pr_debug("%s: template %d\n", __func__, (t_select >> (pr->tid * 3)) & 0x7);
+ rtl838x_write_pie_templated(r, pr, fixed_templates[(t_select >> (pr->tid * 3)) & 0x7]);
+
+ if (rtl838x_write_pie_action(r, pr)) {
+ pr_err("Rule actions too complex\n");
+ goto err_out;
+ }
+
+// rtl838x_pie_rule_dump_raw(r);
+
+ for (i = 0; i < 18; i++)
+ sw_w32(r[i], rtl_table_data(q, i));
+
+err_out:
+ rtl_table_write(q, idx);
+ rtl_table_release(q);
+
+ return err;
+}
+
+static bool rtl838x_pie_templ_has(int t, enum template_field_id field_type)
+{
+ int i;
+ enum template_field_id ft;
+
+ for (i = 0; i < N_FIXED_FIELDS; i++) {
+ ft = fixed_templates[t][i];
+ if (field_type == ft)
+ return true;
+ }
+
+ return false;
+}
+
+static int rtl838x_pie_verify_template(struct rtl838x_switch_priv *priv,
+ struct pie_rule *pr, int t, int block)
+{
+ int i;
+
+ if (!pr->is_ipv6 && pr->sip_m && !rtl838x_pie_templ_has(t, TEMPLATE_FIELD_SIP0))
+ return -1;
+
+ if (!pr->is_ipv6 && pr->dip_m && !rtl838x_pie_templ_has(t, TEMPLATE_FIELD_DIP0))
+ return -1;
+
+ if (pr->is_ipv6) {
+ if ((pr->sip6_m.s6_addr32[0] || pr->sip6_m.s6_addr32[1]
+ || pr->sip6_m.s6_addr32[2] || pr->sip6_m.s6_addr32[3])
+ && !rtl838x_pie_templ_has(t, TEMPLATE_FIELD_SIP2))
+ return -1;
+ if ((pr->dip6_m.s6_addr32[0] || pr->dip6_m.s6_addr32[1]
+ || pr->dip6_m.s6_addr32[2] || pr->dip6_m.s6_addr32[3])
+ && !rtl838x_pie_templ_has(t, TEMPLATE_FIELD_DIP2))
+ return -1;
+ }
+
+ if (ether_addr_to_u64(pr->smac) && !rtl838x_pie_templ_has(t, TEMPLATE_FIELD_SMAC0))
+ return -1;
+
+ if (ether_addr_to_u64(pr->dmac) && !rtl838x_pie_templ_has(t, TEMPLATE_FIELD_DMAC0))
+ return -1;
+
+ // TODO: Check more
+
+ i = find_first_zero_bit(&priv->pie_use_bm[block * 4], PIE_BLOCK_SIZE);
+
+ if (i >= PIE_BLOCK_SIZE)
+ return -1;
+
+ return i + PIE_BLOCK_SIZE * block;
+}
+
+static int rtl838x_pie_rule_add(struct rtl838x_switch_priv *priv, struct pie_rule *pr)
+{
+ int idx, block, j, t;
+
+ pr_debug("In %s\n", __func__);
+
+ mutex_lock(&priv->pie_mutex);
+
+ for (block = 0; block < priv->n_pie_blocks; block++) {
+ for (j = 0; j < 3; j++) {
+ t = (sw_r32(RTL838X_ACL_BLK_TMPLTE_CTRL(block)) >> (j * 3)) & 0x7;
+ pr_debug("Testing block %d, template %d, template id %d\n", block, j, t);
+ idx = rtl838x_pie_verify_template(priv, pr, t, block);
+ if (idx >= 0)
+ break;
+ }
+ if (j < 3)
+ break;
+ }
+
+ if (block >= priv->n_pie_blocks) {
+ mutex_unlock(&priv->pie_mutex);
+ return -EOPNOTSUPP;
+ }
+
+ pr_debug("Using block: %d, index %d, template-id %d\n", block, idx, j);
+ set_bit(idx, priv->pie_use_bm);
+
+ pr->valid = true;
+ pr->tid = j; // Mapped to template number
+ pr->tid_m = 0x3;
+ pr->id = idx;
+
+ rtl838x_pie_lookup_enable(priv, idx);
+ rtl838x_pie_rule_write(priv, idx, pr);
+
+ mutex_unlock(&priv->pie_mutex);
+ return 0;
+}
+
+static void rtl838x_pie_rule_rm(struct rtl838x_switch_priv *priv, struct pie_rule *pr)
+{
+ int idx = pr->id;
+
+ rtl838x_pie_rule_del(priv, idx, idx);
+ clear_bit(idx, priv->pie_use_bm);
+}
+
+/*
+ * Initializes the Packet Inspection Engine:
+ * powers it up, enables default matching templates for all blocks
+ * and clears all rules possibly installed by u-boot
+ */
+static void rtl838x_pie_init(struct rtl838x_switch_priv *priv)
+{
+ int i;
+ u32 template_selectors;
+
+ mutex_init(&priv->pie_mutex);
+
+ // Enable ACL lookup on all ports, including CPU_PORT
+ for (i = 0; i <= priv->cpu_port; i++)
+ sw_w32(1, RTL838X_ACL_PORT_LOOKUP_CTRL(i));
+
+ // Power on all PIE blocks
+ for (i = 0; i < priv->n_pie_blocks; i++)
+ sw_w32_mask(0, BIT(i), RTL838X_ACL_BLK_PWR_CTRL);
+
+ // Include IPG in metering
+ sw_w32(1, RTL838X_METER_GLB_CTRL);
+
+ // Delete all present rules
+ rtl838x_pie_rule_del(priv, 0, priv->n_pie_blocks * PIE_BLOCK_SIZE - 1);
+
+ // Routing bypasses source port filter: disable write-protection, first
+ sw_w32_mask(0, 3, RTL838X_INT_RW_CTRL);
+ sw_w32_mask(0, 1, RTL838X_DMY_REG27);
+ sw_w32_mask(3, 0, RTL838X_INT_RW_CTRL);
+
+ // Enable predefined templates 0, 1 and 2 for even blocks
+ template_selectors = 0 | (1 << 3) | (2 << 6);
+ for (i = 0; i < 6; i += 2)
+ sw_w32(template_selectors, RTL838X_ACL_BLK_TMPLTE_CTRL(i));
+
+ // Enable predefined templates 0, 3 and 4 (IPv6 support) for odd blocks
+ template_selectors = 0 | (3 << 3) | (4 << 6);
+ for (i = 1; i < priv->n_pie_blocks; i += 2)
+ sw_w32(template_selectors, RTL838X_ACL_BLK_TMPLTE_CTRL(i));
+
+ // Group each pair of physical blocks together to a logical block
+ sw_w32(0b10101010101, RTL838X_ACL_BLK_GROUP_CTRL);
+}
+
+static u32 rtl838x_packet_cntr_read(int counter)
+{
+ u32 v;
+
+ // Read LOG table (3) via register RTL8380_TBL_0
+ struct table_reg *r = rtl_table_get(RTL8380_TBL_0, 3);
+
+ pr_debug("In %s, id %d\n", __func__, counter);
+ rtl_table_read(r, counter / 2);
+
+ pr_debug("Registers: %08x %08x\n",
+ sw_r32(rtl_table_data(r, 0)), sw_r32(rtl_table_data(r, 1)));
+ // The table has a size of 2 registers
+ if (counter % 2)
+ v = sw_r32(rtl_table_data(r, 0));
+ else
+ v = sw_r32(rtl_table_data(r, 1));
+
+ rtl_table_release(r);
+
+ return v;
+}
+
+static void rtl838x_packet_cntr_clear(int counter)
+{
+ // Access LOG table (3) via register RTL8380_TBL_0
+ struct table_reg *r = rtl_table_get(RTL8380_TBL_0, 3);
+
+ pr_debug("In %s, id %d\n", __func__, counter);
+ // The table has a size of 2 registers
+ if (counter % 2)
+ sw_w32(0, rtl_table_data(r, 0));
+ else
+ sw_w32(0, rtl_table_data(r, 1));
+
+ rtl_table_write(r, counter / 2);
+
+ rtl_table_release(r);
+}
+
+static void rtl838x_route_read(int idx, struct rtl83xx_route *rt)
+{
+ // Read ROUTING table (2) via register RTL8380_TBL_1
+ struct table_reg *r = rtl_table_get(RTL8380_TBL_1, 2);
+
+ pr_debug("In %s, id %d\n", __func__, idx);
+ rtl_table_read(r, idx);
+
+ // The table has a size of 2 registers
+ rt->nh.gw = sw_r32(rtl_table_data(r, 0));
+ rt->nh.gw <<= 32;
+ rt->nh.gw |= sw_r32(rtl_table_data(r, 1));
+
+ rtl_table_release(r);
+}
+
+static void rtl838x_route_write(int idx, struct rtl83xx_route *rt)
+{
+ // Access ROUTING table (2) via register RTL8380_TBL_1
+ struct table_reg *r = rtl_table_get(RTL8380_TBL_1, 2);
+
+ pr_debug("In %s, id %d, gw: %016llx\n", __func__, idx, rt->nh.gw);
+ sw_w32(rt->nh.gw >> 32, rtl_table_data(r, 0));
+ sw_w32(rt->nh.gw, rtl_table_data(r, 1));
+ rtl_table_write(r, idx);
+
+ rtl_table_release(r);
+}
+
+static int rtl838x_l3_setup(struct rtl838x_switch_priv *priv)
+{
+ // Nothing to be done
+ return 0;
+}
+
+void rtl838x_vlan_port_keep_tag_set(int port, bool keep_outer, bool keep_inner)
+{
+ sw_w32(FIELD_PREP(RTL838X_VLAN_PORT_TAG_STS_CTRL_OTAG_STS_MASK,
+ keep_outer ? RTL838X_VLAN_PORT_TAG_STS_TAGGED : RTL838X_VLAN_PORT_TAG_STS_UNTAG) |
+ FIELD_PREP(RTL838X_VLAN_PORT_TAG_STS_CTRL_ITAG_STS_MASK,
+ keep_inner ? RTL838X_VLAN_PORT_TAG_STS_TAGGED : RTL838X_VLAN_PORT_TAG_STS_UNTAG),
+ RTL838X_VLAN_PORT_TAG_STS_CTRL(port));
+}
+
+void rtl838x_vlan_port_pvidmode_set(int port, enum pbvlan_type type, enum pbvlan_mode mode)
+{
+ if (type == PBVLAN_TYPE_INNER)
+ sw_w32_mask(0x3, mode, RTL838X_VLAN_PORT_PB_VLAN + (port << 2));
+ else
+ sw_w32_mask(0x3 << 14, mode << 14, RTL838X_VLAN_PORT_PB_VLAN + (port << 2));
+}
+
+void rtl838x_vlan_port_pvid_set(int port, enum pbvlan_type type, int pvid)
+{
+ if (type == PBVLAN_TYPE_INNER)
+ sw_w32_mask(0xfff << 2, pvid << 2, RTL838X_VLAN_PORT_PB_VLAN + (port << 2));
+ else
+ sw_w32_mask(0xfff << 16, pvid << 16, RTL838X_VLAN_PORT_PB_VLAN + (port << 2));
+}
+
+static int rtl838x_set_ageing_time(unsigned long msec)
+{
+ int t = sw_r32(RTL838X_L2_CTRL_1);
+
+ t &= 0x7FFFFF;
+ t = t * 128 / 625; /* Aging time in seconds. 0: L2 aging disabled */
+ pr_debug("L2 AGING time: %d sec\n", t);
+
+ t = (msec * 625 + 127000) / 128000;
+ t = t > 0x7FFFFF ? 0x7FFFFF : t;
+ sw_w32_mask(0x7FFFFF, t, RTL838X_L2_CTRL_1);
+ pr_debug("Dynamic aging for ports: %x\n", sw_r32(RTL838X_L2_PORT_AGING_OUT));
+
+ return 0;
+}
+
+static void rtl838x_set_igr_filter(int port, enum igr_filter state)
+{
+ sw_w32_mask(0x3 << ((port & 0xf)<<1), state << ((port & 0xf)<<1),
+ RTL838X_VLAN_PORT_IGR_FLTR + (((port >> 4) << 2)));
+}
+
+static void rtl838x_set_egr_filter(int port, enum egr_filter state)
+{
+ sw_w32_mask(0x1 << (port % 0x1d), state << (port % 0x1d),
+ RTL838X_VLAN_PORT_EGR_FLTR + (((port / 29) << 2)));
+}
+
+void rtl838x_set_distribution_algorithm(int group, int algoidx, u32 algomsk)
+{
+ algoidx &= 1; // RTL838X only supports 2 concurrent algorithms
+ sw_w32_mask(1 << (group % 8), algoidx << (group % 8),
+ RTL838X_TRK_HASH_IDX_CTRL + ((group >> 3) << 2));
+ sw_w32(algomsk, RTL838X_TRK_HASH_CTRL + (algoidx << 2));
+}
+
+void rtl838x_set_receive_management_action(int port, rma_ctrl_t type, action_type_t action)
+{
+ switch(type) {
+ case BPDU:
+ sw_w32_mask(3 << ((port & 0xf) << 1), (action & 0x3) << ((port & 0xf) << 1),
+ RTL838X_RMA_BPDU_CTRL + ((port >> 4) << 2));
+ break;
+ case PTP:
+ sw_w32_mask(3 << ((port & 0xf) << 1), (action & 0x3) << ((port & 0xf) << 1),
+ RTL838X_RMA_PTP_CTRL + ((port >> 4) << 2));
+ break;
+ case LLTP:
+ sw_w32_mask(3 << ((port & 0xf) << 1), (action & 0x3) << ((port & 0xf) << 1),
+ RTL838X_RMA_LLTP_CTRL + ((port >> 4) << 2));
+ break;
+ default:
+ break;
+ }
+}
+
+const struct rtl838x_reg rtl838x_reg = {
+ .mask_port_reg_be = rtl838x_mask_port_reg,
+ .set_port_reg_be = rtl838x_set_port_reg,
+ .get_port_reg_be = rtl838x_get_port_reg,
+ .mask_port_reg_le = rtl838x_mask_port_reg,
+ .set_port_reg_le = rtl838x_set_port_reg,
+ .get_port_reg_le = rtl838x_get_port_reg,
+ .stat_port_rst = RTL838X_STAT_PORT_RST,
+ .stat_rst = RTL838X_STAT_RST,
+ .stat_port_std_mib = RTL838X_STAT_PORT_STD_MIB,
+ .port_iso_ctrl = rtl838x_port_iso_ctrl,
+ .traffic_enable = rtl838x_traffic_enable,
+ .traffic_disable = rtl838x_traffic_disable,
+ .traffic_get = rtl838x_traffic_get,
+ .traffic_set = rtl838x_traffic_set,
+ .l2_ctrl_0 = RTL838X_L2_CTRL_0,
+ .l2_ctrl_1 = RTL838X_L2_CTRL_1,
+ .l2_port_aging_out = RTL838X_L2_PORT_AGING_OUT,
+ .set_ageing_time = rtl838x_set_ageing_time,
+ .smi_poll_ctrl = RTL838X_SMI_POLL_CTRL,
+ .l2_tbl_flush_ctrl = RTL838X_L2_TBL_FLUSH_CTRL,
+ .exec_tbl0_cmd = rtl838x_exec_tbl0_cmd,
+ .exec_tbl1_cmd = rtl838x_exec_tbl1_cmd,
+ .tbl_access_data_0 = rtl838x_tbl_access_data_0,
+ .isr_glb_src = RTL838X_ISR_GLB_SRC,
+ .isr_port_link_sts_chg = RTL838X_ISR_PORT_LINK_STS_CHG,
+ .imr_port_link_sts_chg = RTL838X_IMR_PORT_LINK_STS_CHG,
+ .imr_glb = RTL838X_IMR_GLB,
+ .vlan_tables_read = rtl838x_vlan_tables_read,
+ .vlan_set_tagged = rtl838x_vlan_set_tagged,
+ .vlan_set_untagged = rtl838x_vlan_set_untagged,
+ .mac_force_mode_ctrl = rtl838x_mac_force_mode_ctrl,
+ .vlan_profile_dump = rtl838x_vlan_profile_dump,
+ .vlan_profile_setup = rtl838x_vlan_profile_setup,
+ .vlan_fwd_on_inner = rtl838x_vlan_fwd_on_inner,
+ .set_vlan_igr_filter = rtl838x_set_igr_filter,
+ .set_vlan_egr_filter = rtl838x_set_egr_filter,
+ .enable_learning = rtl838x_enable_learning,
+ .enable_flood = rtl838x_enable_flood,
+ .enable_mcast_flood = rtl838x_enable_mcast_flood,
+ .enable_bcast_flood = rtl838x_enable_bcast_flood,
+ .stp_get = rtl838x_stp_get,
+ .stp_set = rtl838x_stp_set,
+ .mac_port_ctrl = rtl838x_mac_port_ctrl,
+ .l2_port_new_salrn = rtl838x_l2_port_new_salrn,
+ .l2_port_new_sa_fwd = rtl838x_l2_port_new_sa_fwd,
+ .mir_ctrl = RTL838X_MIR_CTRL,
+ .mir_dpm = RTL838X_MIR_DPM_CTRL,
+ .mir_spm = RTL838X_MIR_SPM_CTRL,
+ .mac_link_sts = RTL838X_MAC_LINK_STS,
+ .mac_link_dup_sts = RTL838X_MAC_LINK_DUP_STS,
+ .mac_link_spd_sts = rtl838x_mac_link_spd_sts,
+ .mac_rx_pause_sts = RTL838X_MAC_RX_PAUSE_STS,
+ .mac_tx_pause_sts = RTL838X_MAC_TX_PAUSE_STS,
+ .read_l2_entry_using_hash = rtl838x_read_l2_entry_using_hash,
+ .write_l2_entry_using_hash = rtl838x_write_l2_entry_using_hash,
+ .read_cam = rtl838x_read_cam,
+ .write_cam = rtl838x_write_cam,
+ .vlan_port_keep_tag_set = rtl838x_vlan_port_keep_tag_set,
+ .vlan_port_pvidmode_set = rtl838x_vlan_port_pvidmode_set,
+ .vlan_port_pvid_set = rtl838x_vlan_port_pvid_set,
+ .trk_mbr_ctr = rtl838x_trk_mbr_ctr,
+ .rma_bpdu_fld_pmask = RTL838X_RMA_BPDU_FLD_PMSK,
+ .spcl_trap_eapol_ctrl = RTL838X_SPCL_TRAP_EAPOL_CTRL,
+ .init_eee = rtl838x_init_eee,
+ .port_eee_set = rtl838x_port_eee_set,
+ .eee_port_ability = rtl838x_eee_port_ability,
+ .l2_hash_seed = rtl838x_l2_hash_seed,
+ .l2_hash_key = rtl838x_l2_hash_key,
+ .read_mcast_pmask = rtl838x_read_mcast_pmask,
+ .write_mcast_pmask = rtl838x_write_mcast_pmask,
+ .pie_init = rtl838x_pie_init,
+ .pie_rule_read = rtl838x_pie_rule_read,
+ .pie_rule_write = rtl838x_pie_rule_write,
+ .pie_rule_add = rtl838x_pie_rule_add,
+ .pie_rule_rm = rtl838x_pie_rule_rm,
+ .l2_learning_setup = rtl838x_l2_learning_setup,
+ .packet_cntr_read = rtl838x_packet_cntr_read,
+ .packet_cntr_clear = rtl838x_packet_cntr_clear,
+ .route_read = rtl838x_route_read,
+ .route_write = rtl838x_route_write,
+ .l3_setup = rtl838x_l3_setup,
+ .set_distribution_algorithm = rtl838x_set_distribution_algorithm,
+ .set_receive_management_action = rtl838x_set_receive_management_action,
+};
+
+irqreturn_t rtl838x_switch_irq(int irq, void *dev_id)
+{
+ struct dsa_switch *ds = dev_id;
+ u32 status = sw_r32(RTL838X_ISR_GLB_SRC);
+ u32 ports = sw_r32(RTL838X_ISR_PORT_LINK_STS_CHG);
+ u32 link;
+ int i;
+
+ /* Clear status */
+ sw_w32(ports, RTL838X_ISR_PORT_LINK_STS_CHG);
+ pr_info("RTL8380 Link change: status: %x, ports %x\n", status, ports);
+
+ for (i = 0; i < 28; i++) {
+ if (ports & BIT(i)) {
+ link = sw_r32(RTL838X_MAC_LINK_STS);
+ if (link & BIT(i))
+ dsa_port_phylink_mac_change(ds, i, true);
+ else
+ dsa_port_phylink_mac_change(ds, i, false);
+ }
+ }
+ return IRQ_HANDLED;
+}
+
+int rtl838x_smi_wait_op(int timeout)
+{
+ int ret = 0;
+ u32 val;
+
+ ret = readx_poll_timeout(sw_r32, RTL838X_SMI_ACCESS_PHY_CTRL_1,
+ val, !(val & 0x1), 20, timeout);
+ if (ret)
+ pr_err("%s: timeout\n", __func__);
+
+ return ret;
+}
+
+/*
+ * Reads a register in a page from the PHY
+ */
+int rtl838x_read_phy(u32 port, u32 page, u32 reg, u32 *val)
+{
+ u32 v;
+ u32 park_page;
+
+ if (port > 31) {
+ *val = 0xffff;
+ return 0;
+ }
+
+ if (page > 4095 || reg > 31)
+ return -ENOTSUPP;
+
+ mutex_lock(&smi_lock);
+
+ if (rtl838x_smi_wait_op(100000))
+ goto timeout;
+
+ sw_w32_mask(0xffff0000, port << 16, RTL838X_SMI_ACCESS_PHY_CTRL_2);
+
+ park_page = sw_r32(RTL838X_SMI_ACCESS_PHY_CTRL_1) & ((0x1f << 15) | 0x2);
+ v = reg << 20 | page << 3;
+ sw_w32(v | park_page, RTL838X_SMI_ACCESS_PHY_CTRL_1);
+ sw_w32_mask(0, 1, RTL838X_SMI_ACCESS_PHY_CTRL_1);
+
+ if (rtl838x_smi_wait_op(100000))
+ goto timeout;
+
+ *val = sw_r32(RTL838X_SMI_ACCESS_PHY_CTRL_2) & 0xffff;
+
+ mutex_unlock(&smi_lock);
+ return 0;
+
+timeout:
+ mutex_unlock(&smi_lock);
+ return -ETIMEDOUT;
+}
+
+/*
+ * Write to a register in a page of the PHY
+ */
+int rtl838x_write_phy(u32 port, u32 page, u32 reg, u32 val)
+{
+ u32 v;
+ u32 park_page;
+
+ val &= 0xffff;
+ if (port > 31 || page > 4095 || reg > 31)
+ return -ENOTSUPP;
+
+ mutex_lock(&smi_lock);
+ if (rtl838x_smi_wait_op(100000))
+ goto timeout;
+
+ sw_w32(BIT(port), RTL838X_SMI_ACCESS_PHY_CTRL_0);
+ mdelay(10);
+
+ sw_w32_mask(0xffff0000, val << 16, RTL838X_SMI_ACCESS_PHY_CTRL_2);
+
+ park_page = sw_r32(RTL838X_SMI_ACCESS_PHY_CTRL_1) & ((0x1f << 15) | 0x2);
+ v = reg << 20 | page << 3 | 0x4;
+ sw_w32(v | park_page, RTL838X_SMI_ACCESS_PHY_CTRL_1);
+ sw_w32_mask(0, 1, RTL838X_SMI_ACCESS_PHY_CTRL_1);
+
+ if (rtl838x_smi_wait_op(100000))
+ goto timeout;
+
+ mutex_unlock(&smi_lock);
+ return 0;
+
+timeout:
+ mutex_unlock(&smi_lock);
+ return -ETIMEDOUT;
+}
+
+/*
+ * Read an mmd register of a PHY
+ */
+int rtl838x_read_mmd_phy(u32 port, u32 addr, u32 reg, u32 *val)
+{
+ u32 v;
+
+ mutex_lock(&smi_lock);
+
+ if (rtl838x_smi_wait_op(100000))
+ goto timeout;
+
+ sw_w32(1 << port, RTL838X_SMI_ACCESS_PHY_CTRL_0);
+ mdelay(10);
+
+ sw_w32_mask(0xffff0000, port << 16, RTL838X_SMI_ACCESS_PHY_CTRL_2);
+
+ v = addr << 16 | reg;
+ sw_w32(v, RTL838X_SMI_ACCESS_PHY_CTRL_3);
+
+ /* mmd-access | read | cmd-start */
+ v = 1 << 1 | 0 << 2 | 1;
+ sw_w32(v, RTL838X_SMI_ACCESS_PHY_CTRL_1);
+
+ if (rtl838x_smi_wait_op(100000))
+ goto timeout;
+
+ *val = sw_r32(RTL838X_SMI_ACCESS_PHY_CTRL_2) & 0xffff;
+
+ mutex_unlock(&smi_lock);
+ return 0;
+
+timeout:
+ mutex_unlock(&smi_lock);
+ return -ETIMEDOUT;
+}
+
+/*
+ * Write to an mmd register of a PHY
+ */
+int rtl838x_write_mmd_phy(u32 port, u32 addr, u32 reg, u32 val)
+{
+ u32 v;
+
+ pr_debug("MMD write: port %d, dev %d, reg %d, val %x\n", port, addr, reg, val);
+ val &= 0xffff;
+ mutex_lock(&smi_lock);
+
+ if (rtl838x_smi_wait_op(100000))
+ goto timeout;
+
+ sw_w32(1 << port, RTL838X_SMI_ACCESS_PHY_CTRL_0);
+ mdelay(10);
+
+ sw_w32_mask(0xffff0000, val << 16, RTL838X_SMI_ACCESS_PHY_CTRL_2);
+
+ sw_w32_mask(0x1f << 16, addr << 16, RTL838X_SMI_ACCESS_PHY_CTRL_3);
+ sw_w32_mask(0xffff, reg, RTL838X_SMI_ACCESS_PHY_CTRL_3);
+ /* mmd-access | write | cmd-start */
+ v = 1 << 1 | 1 << 2 | 1;
+ sw_w32(v, RTL838X_SMI_ACCESS_PHY_CTRL_1);
+
+ if (rtl838x_smi_wait_op(100000))
+ goto timeout;
+
+ mutex_unlock(&smi_lock);
+ return 0;
+
+timeout:
+ mutex_unlock(&smi_lock);
+ return -ETIMEDOUT;
+}
+
+void rtl8380_get_version(struct rtl838x_switch_priv *priv)
+{
+ u32 rw_save, info_save;
+ u32 info;
+
+ rw_save = sw_r32(RTL838X_INT_RW_CTRL);
+ sw_w32(rw_save | 0x3, RTL838X_INT_RW_CTRL);
+
+ info_save = sw_r32(RTL838X_CHIP_INFO);
+ sw_w32(info_save | 0xA0000000, RTL838X_CHIP_INFO);
+
+ info = sw_r32(RTL838X_CHIP_INFO);
+ sw_w32(info_save, RTL838X_CHIP_INFO);
+ sw_w32(rw_save, RTL838X_INT_RW_CTRL);
+
+ if ((info & 0xFFFF) == 0x6275) {
+ if (((info >> 16) & 0x1F) == 0x1)
+ priv->version = RTL8380_VERSION_A;
+ else if (((info >> 16) & 0x1F) == 0x2)
+ priv->version = RTL8380_VERSION_B;
+ else
+ priv->version = RTL8380_VERSION_B;
+ } else {
+ priv->version = '-';
+ }
+}
+
+void rtl838x_vlan_profile_dump(int profile)
+{
+ u32 p;
+
+ if (profile < 0 || profile > 7)
+ return;
+
+ p = sw_r32(RTL838X_VLAN_PROFILE(profile));
+
+ pr_info("VLAN profile %d: L2 learning: %d, UNKN L2MC FLD PMSK %d, \
+ UNKN IPMC FLD PMSK %d, UNKN IPv6MC FLD PMSK: %d",
+ profile, p & 1, (p >> 1) & 0x1ff, (p >> 10) & 0x1ff, (p >> 19) & 0x1ff);
+}
+
+void rtl8380_sds_rst(int mac)
+{
+ u32 offset = (mac == 24) ? 0 : 0x100;
+
+ sw_w32_mask(1 << 11, 0, RTL838X_SDS4_FIB_REG0 + offset);
+ sw_w32_mask(0x3, 0, RTL838X_SDS4_REG28 + offset);
+ sw_w32_mask(0x3, 0x3, RTL838X_SDS4_REG28 + offset);
+ sw_w32_mask(0, 0x1 << 6, RTL838X_SDS4_DUMMY0 + offset);
+ sw_w32_mask(0x1 << 6, 0, RTL838X_SDS4_DUMMY0 + offset);
+ pr_debug("SERDES reset: %d\n", mac);
+}
+
+int rtl8380_sds_power(int mac, int val)
+{
+ u32 mode = (val == 1) ? 0x4 : 0x9;
+ u32 offset = (mac == 24) ? 5 : 0;
+
+ if ((mac != 24) && (mac != 26)) {
+ pr_err("%s: not a fibre port: %d\n", __func__, mac);
+ return -1;
+ }
+
+ sw_w32_mask(0x1f << offset, mode << offset, RTL838X_SDS_MODE_SEL);
+
+ rtl8380_sds_rst(mac);
+
+ return 0;
+}
diff --git a/target/linux/realtek/files-5.15/drivers/net/dsa/rtl83xx/rtl838x.h b/target/linux/realtek/files-5.15/drivers/net/dsa/rtl83xx/rtl838x.h
new file mode 100644
index 0000000000..19049e4c95
--- /dev/null
+++ b/target/linux/realtek/files-5.15/drivers/net/dsa/rtl83xx/rtl838x.h
@@ -0,0 +1,1093 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef _RTL838X_H
+#define _RTL838X_H
+
+#include <net/dsa.h>
+
+/*
+ * Register definition
+ */
+#define RTL838X_MAC_PORT_CTRL(port) (0xd560 + (((port) << 7)))
+#define RTL839X_MAC_PORT_CTRL(port) (0x8004 + (((port) << 7)))
+#define RTL930X_MAC_PORT_CTRL(port) (0x3260 + (((port) << 6)))
+#define RTL931X_MAC_PORT_CTRL (0x6004)
+
+#define RTL930X_MAC_L2_PORT_CTRL(port) (0x3268 + (((port) << 6)))
+#define RTL931X_MAC_L2_PORT_CTRL (0x6000)
+
+#define RTL838X_RST_GLB_CTRL_0 (0x003c)
+
+#define RTL838X_MAC_FORCE_MODE_CTRL (0xa104)
+#define RTL839X_MAC_FORCE_MODE_CTRL (0x02bc)
+#define RTL930X_MAC_FORCE_MODE_CTRL (0xCA1C)
+#define RTL931X_MAC_FORCE_MODE_CTRL (0x0DCC)
+
+#define RTL838X_DMY_REG31 (0x3b28)
+#define RTL838X_SDS_MODE_SEL (0x0028)
+#define RTL838X_SDS_CFG_REG (0x0034)
+#define RTL838X_INT_MODE_CTRL (0x005c)
+#define RTL838X_CHIP_INFO (0x00d8)
+#define RTL839X_CHIP_INFO (0x0ff4)
+#define RTL838X_PORT_ISO_CTRL(port) (0x4100 + ((port) << 2))
+#define RTL839X_PORT_ISO_CTRL(port) (0x1400 + ((port) << 3))
+
+/* Packet statistics */
+#define RTL838X_STAT_PORT_STD_MIB (0x1200)
+#define RTL839X_STAT_PORT_STD_MIB (0xC000)
+#define RTL930X_STAT_PORT_MIB_CNTR (0x0664)
+#define RTL838X_STAT_RST (0x3100)
+#define RTL839X_STAT_RST (0xF504)
+#define RTL930X_STAT_RST (0x3240)
+#define RTL931X_STAT_RST (0x7ef4)
+#define RTL838X_STAT_PORT_RST (0x3104)
+#define RTL839X_STAT_PORT_RST (0xF508)
+#define RTL930X_STAT_PORT_RST (0x3244)
+#define RTL931X_STAT_PORT_RST (0x7ef8)
+#define RTL838X_STAT_CTRL (0x3108)
+#define RTL839X_STAT_CTRL (0x04cc)
+#define RTL930X_STAT_CTRL (0x3248)
+#define RTL931X_STAT_CTRL (0x5720)
+
+/* Registers of the internal Serdes of the 8390 */
+#define RTL8390_SDS0_1_XSG0 (0xA000)
+#define RTL8390_SDS0_1_XSG1 (0xA100)
+#define RTL839X_SDS12_13_XSG0 (0xB800)
+#define RTL839X_SDS12_13_XSG1 (0xB900)
+#define RTL839X_SDS12_13_PWR0 (0xb880)
+#define RTL839X_SDS12_13_PWR1 (0xb980)
+
+/* Registers of the internal Serdes of the 8380 */
+#define RTL838X_SDS4_FIB_REG0 (0xF800)
+#define RTL838X_SDS4_REG28 (0xef80)
+#define RTL838X_SDS4_DUMMY0 (0xef8c)
+#define RTL838X_SDS5_EXT_REG6 (0xf18c)
+
+/* VLAN registers */
+#define RTL838X_VLAN_CTRL (0x3A74)
+#define RTL838X_VLAN_PROFILE(idx) (0x3A88 + ((idx) << 2))
+#define RTL838X_VLAN_PORT_EGR_FLTR (0x3A84)
+#define RTL838X_VLAN_PORT_PB_VLAN (0x3C00)
+#define RTL838X_VLAN_PORT_IGR_FLTR (0x3A7C)
+
+#define RTL839X_VLAN_PROFILE(idx) (0x25C0 + (((idx) << 3)))
+#define RTL839X_VLAN_CTRL (0x26D4)
+#define RTL839X_VLAN_PORT_PB_VLAN (0x26D8)
+#define RTL839X_VLAN_PORT_IGR_FLTR (0x27B4)
+#define RTL839X_VLAN_PORT_EGR_FLTR (0x27C4)
+
+#define RTL930X_VLAN_PROFILE_SET(idx) (0x9c60 + (((idx) * 20)))
+#define RTL930X_VLAN_CTRL (0x82D4)
+#define RTL930X_VLAN_PORT_PB_VLAN (0x82D8)
+#define RTL930X_VLAN_PORT_IGR_FLTR (0x83C0)
+#define RTL930X_VLAN_PORT_EGR_FLTR (0x83C8)
+
+#define RTL931X_VLAN_PROFILE_SET(idx) (0x9800 + (((idx) * 28)))
+#define RTL931X_VLAN_CTRL (0x94E4)
+#define RTL931X_VLAN_PORT_IGR_CTRL (0x94E8)
+#define RTL931X_VLAN_PORT_IGR_FLTR (0x96B4)
+#define RTL931X_VLAN_PORT_EGR_FLTR (0x96C4)
+
+/* Table access registers */
+#define RTL838X_TBL_ACCESS_CTRL_0 (0x6914)
+#define RTL838X_TBL_ACCESS_DATA_0(idx) (0x6918 + ((idx) << 2))
+#define RTL838X_TBL_ACCESS_CTRL_1 (0xA4C8)
+#define RTL838X_TBL_ACCESS_DATA_1(idx) (0xA4CC + ((idx) << 2))
+
+#define RTL839X_TBL_ACCESS_CTRL_0 (0x1190)
+#define RTL839X_TBL_ACCESS_DATA_0(idx) (0x1194 + ((idx) << 2))
+#define RTL839X_TBL_ACCESS_CTRL_1 (0x6b80)
+#define RTL839X_TBL_ACCESS_DATA_1(idx) (0x6b84 + ((idx) << 2))
+#define RTL839X_TBL_ACCESS_CTRL_2 (0x611C)
+#define RTL839X_TBL_ACCESS_DATA_2(i) (0x6120 + (((i) << 2)))
+
+#define RTL930X_TBL_ACCESS_CTRL_0 (0xB340)
+#define RTL930X_TBL_ACCESS_DATA_0(idx) (0xB344 + ((idx) << 2))
+#define RTL930X_TBL_ACCESS_CTRL_1 (0xB3A0)
+#define RTL930X_TBL_ACCESS_DATA_1(idx) (0xB3A4 + ((idx) << 2))
+#define RTL930X_TBL_ACCESS_CTRL_2 (0xCE04)
+#define RTL930X_TBL_ACCESS_DATA_2(i) (0xCE08 + (((i) << 2)))
+
+#define RTL931X_TBL_ACCESS_CTRL_0 (0x8500)
+#define RTL931X_TBL_ACCESS_DATA_0(idx) (0x8508 + ((idx) << 2))
+#define RTL931X_TBL_ACCESS_CTRL_1 (0x40C0)
+#define RTL931X_TBL_ACCESS_DATA_1(idx) (0x40C4 + ((idx) << 2))
+#define RTL931X_TBL_ACCESS_CTRL_2 (0x8528)
+#define RTL931X_TBL_ACCESS_DATA_2(i) (0x852C + (((i) << 2)))
+#define RTL931X_TBL_ACCESS_CTRL_3 (0x0200)
+#define RTL931X_TBL_ACCESS_DATA_3(i) (0x0204 + (((i) << 2)))
+#define RTL931X_TBL_ACCESS_CTRL_4 (0x20DC)
+#define RTL931X_TBL_ACCESS_DATA_4(i) (0x20E0 + (((i) << 2)))
+#define RTL931X_TBL_ACCESS_CTRL_5 (0x7E1C)
+#define RTL931X_TBL_ACCESS_DATA_5(i) (0x7E20 + (((i) << 2)))
+
+/* MAC handling */
+#define RTL838X_MAC_LINK_STS (0xa188)
+#define RTL839X_MAC_LINK_STS (0x0390)
+#define RTL930X_MAC_LINK_STS (0xCB10)
+#define RTL931X_MAC_LINK_STS (0x0EC0)
+#define RTL838X_MAC_LINK_SPD_STS(p) (0xa190 + (((p >> 4) << 2)))
+#define RTL839X_MAC_LINK_SPD_STS(p) (0x03a0 + (((p >> 4) << 2)))
+#define RTL930X_MAC_LINK_SPD_STS(p) (0xCB18 + (((p >> 3) << 2)))
+#define RTL931X_MAC_LINK_SPD_STS (0x0ED0)
+#define RTL838X_MAC_LINK_DUP_STS (0xa19c)
+#define RTL839X_MAC_LINK_DUP_STS (0x03b0)
+#define RTL930X_MAC_LINK_DUP_STS (0xCB28)
+#define RTL931X_MAC_LINK_DUP_STS (0x0EF0)
+#define RTL838X_MAC_TX_PAUSE_STS (0xa1a0)
+#define RTL839X_MAC_TX_PAUSE_STS (0x03b8)
+#define RTL930X_MAC_TX_PAUSE_STS (0xCB2C)
+#define RTL931X_MAC_TX_PAUSE_STS (0x0EF8)
+#define RTL838X_MAC_RX_PAUSE_STS (0xa1a4)
+#define RTL839X_MAC_RX_PAUSE_STS (0x03c0)
+#define RTL930X_MAC_RX_PAUSE_STS (0xCB30)
+#define RTL931X_MAC_RX_PAUSE_STS (0x0F00)
+#define RTL930X_MAC_LINK_MEDIA_STS (0xCB14)
+#define RTL931X_MAC_LINK_MEDIA_STS (0x0EC8)
+
+/* MAC link state bits */
+#define RTL838X_FORCE_EN (1 << 0)
+#define RTL838X_FORCE_LINK_EN (1 << 1)
+#define RTL838X_NWAY_EN (1 << 2)
+#define RTL838X_DUPLEX_MODE (1 << 3)
+#define RTL838X_TX_PAUSE_EN (1 << 6)
+#define RTL838X_RX_PAUSE_EN (1 << 7)
+#define RTL838X_MAC_FORCE_FC_EN (1 << 8)
+
+#define RTL839X_FORCE_EN (1 << 0)
+#define RTL839X_FORCE_LINK_EN (1 << 1)
+#define RTL839X_DUPLEX_MODE (1 << 2)
+#define RTL839X_TX_PAUSE_EN (1 << 5)
+#define RTL839X_RX_PAUSE_EN (1 << 6)
+#define RTL839X_MAC_FORCE_FC_EN (1 << 7)
+
+#define RTL930X_FORCE_EN (1 << 0)
+#define RTL930X_FORCE_LINK_EN (1 << 1)
+#define RTL930X_DUPLEX_MODE (1 << 2)
+#define RTL930X_TX_PAUSE_EN (1 << 7)
+#define RTL930X_RX_PAUSE_EN (1 << 8)
+#define RTL930X_MAC_FORCE_FC_EN (1 << 9)
+
+#define RTL931X_FORCE_EN (1 << 9)
+#define RTL931X_FORCE_LINK_EN (1 << 0)
+#define RTL931X_DUPLEX_MODE (1 << 2)
+#define RTL931X_MAC_FORCE_FC_EN (1 << 4)
+#define RTL931X_TX_PAUSE_EN (1 << 16)
+#define RTL931X_RX_PAUSE_EN (1 << 17)
+
+/* EEE */
+#define RTL838X_MAC_EEE_ABLTY (0xa1a8)
+#define RTL838X_EEE_PORT_TX_EN (0x014c)
+#define RTL838X_EEE_PORT_RX_EN (0x0150)
+#define RTL838X_EEE_CLK_STOP_CTRL (0x0148)
+#define RTL838X_EEE_TX_TIMER_GIGA_CTRL (0xaa04)
+#define RTL838X_EEE_TX_TIMER_GELITE_CTRL (0xaa08)
+
+#define RTL839X_EEE_TX_TIMER_GELITE_CTRL (0x042C)
+#define RTL839X_EEE_TX_TIMER_GIGA_CTRL (0x0430)
+#define RTL839X_EEE_TX_TIMER_10G_CTRL (0x0434)
+#define RTL839X_EEE_CTRL(p) (0x8008 + ((p) << 7))
+#define RTL839X_MAC_EEE_ABLTY (0x03C8)
+
+#define RTL930X_MAC_EEE_ABLTY (0xCB34)
+#define RTL930X_EEE_CTRL(p) (0x3274 + ((p) << 6))
+#define RTL930X_EEEP_PORT_CTRL(p) (0x3278 + ((p) << 6))
+
+/* L2 functionality */
+#define RTL838X_L2_CTRL_0 (0x3200)
+#define RTL839X_L2_CTRL_0 (0x3800)
+#define RTL930X_L2_CTRL (0x8FD8)
+#define RTL931X_L2_CTRL (0xC800)
+#define RTL838X_L2_CTRL_1 (0x3204)
+#define RTL839X_L2_CTRL_1 (0x3804)
+#define RTL930X_L2_AGE_CTRL (0x8FDC)
+#define RTL931X_L2_AGE_CTRL (0xC804)
+#define RTL838X_L2_PORT_AGING_OUT (0x3358)
+#define RTL839X_L2_PORT_AGING_OUT (0x3b74)
+#define RTL930X_L2_PORT_AGE_CTRL (0x8FE0)
+#define RTL931X_L2_PORT_AGE_CTRL (0xc808)
+#define RTL838X_TBL_ACCESS_L2_CTRL (0x6900)
+#define RTL839X_TBL_ACCESS_L2_CTRL (0x1180)
+#define RTL930X_TBL_ACCESS_L2_CTRL (0xB320)
+#define RTL930X_TBL_ACCESS_L2_METHOD_CTRL (0xB324)
+#define RTL838X_TBL_ACCESS_L2_DATA(idx) (0x6908 + ((idx) << 2))
+#define RTL839X_TBL_ACCESS_L2_DATA(idx) (0x1184 + ((idx) << 2))
+#define RTL930X_TBL_ACCESS_L2_DATA(idx) (0xab08 + ((idx) << 2))
+
+#define RTL838X_L2_TBL_FLUSH_CTRL (0x3370)
+#define RTL839X_L2_TBL_FLUSH_CTRL (0x3ba0)
+#define RTL930X_L2_TBL_FLUSH_CTRL (0x9404)
+#define RTL931X_L2_TBL_FLUSH_CTRL (0xCD9C)
+
+#define RTL838X_L2_LRN_CONSTRT (0x329C)
+#define RTL839X_L2_LRN_CONSTRT (0x3910)
+#define RTL930X_L2_LRN_CONSTRT_CTRL (0x909c)
+#define RTL931X_L2_LRN_CONSTRT_CTRL (0xC964)
+
+#define RTL838X_L2_FLD_PMSK (0x3288)
+#define RTL839X_L2_FLD_PMSK (0x38EC)
+#define RTL930X_L2_BC_FLD_PMSK (0x9068)
+#define RTL931X_L2_BC_FLD_PMSK (0xC8FC)
+
+#define RTL930X_L2_UNKN_UC_FLD_PMSK (0x9064)
+#define RTL931X_L2_UNKN_UC_FLD_PMSK (0xC8F4)
+
+#define RTL838X_L2_LRN_CONSTRT_EN (0x3368)
+#define RTL838X_L2_PORT_LRN_CONSTRT (0x32A0)
+#define RTL839X_L2_PORT_LRN_CONSTRT (0x3914)
+
+#define RTL838X_L2_PORT_NEW_SALRN(p) (0x328c + (((p >> 4) << 2)))
+#define RTL839X_L2_PORT_NEW_SALRN(p) (0x38F0 + (((p >> 4) << 2)))
+#define RTL930X_L2_PORT_SALRN(p) (0x8FEC + (((p >> 4) << 2)))
+#define RTL931X_L2_PORT_NEW_SALRN(p) (0xC820 + (((p >> 4) << 2)))
+
+#define SALRN_PORT_SHIFT(p) ((p % 16) * 2)
+#define SALRN_MODE_MASK 0x3
+#define SALRN_MODE_HARDWARE 0
+#define SALRN_MODE_DISABLED 2
+
+#define RTL838X_L2_PORT_NEW_SA_FWD(p) (0x3294 + (((p >> 4) << 2)))
+#define RTL839X_L2_PORT_NEW_SA_FWD(p) (0x3900 + (((p >> 4) << 2)))
+#define RTL930X_L2_PORT_NEW_SA_FWD(p) (0x8FF4 + (((p / 10) << 2)))
+#define RTL931X_L2_PORT_NEW_SA_FWD(p) (0xC830 + (((p / 10) << 2)))
+
+#define RTL930X_ST_CTRL (0x8798)
+
+#define RTL930X_L2_PORT_SABLK_CTRL (0x905c)
+#define RTL930X_L2_PORT_DABLK_CTRL (0x9060)
+
+#define RTL838X_L2_PORT_LM_ACT(p) (0x3208 + ((p) << 2))
+#define RTL838X_VLAN_PORT_FWD (0x3A78)
+#define RTL839X_VLAN_PORT_FWD (0x27AC)
+#define RTL930X_VLAN_PORT_FWD (0x834C)
+#define RTL931X_VLAN_PORT_FWD (0x95CC)
+#define RTL838X_VLAN_FID_CTRL (0x3aa8)
+
+/* Port Mirroring */
+#define RTL838X_MIR_CTRL (0x5D00)
+#define RTL838X_MIR_DPM_CTRL (0x5D20)
+#define RTL838X_MIR_SPM_CTRL (0x5D10)
+
+#define RTL839X_MIR_CTRL (0x2500)
+#define RTL839X_MIR_DPM_CTRL (0x2530)
+#define RTL839X_MIR_SPM_CTRL (0x2510)
+
+#define RTL930X_MIR_CTRL (0xA2A0)
+#define RTL930X_MIR_DPM_CTRL (0xA2C0)
+#define RTL930X_MIR_SPM_CTRL (0xA2B0)
+
+#define RTL931X_MIR_CTRL (0xAF00)
+#define RTL931X_MIR_DPM_CTRL (0xAF30)
+#define RTL931X_MIR_SPM_CTRL (0xAF10)
+
+/* Storm/rate control and scheduling */
+#define RTL838X_STORM_CTRL (0x4700)
+#define RTL839X_STORM_CTRL (0x1800)
+#define RTL838X_STORM_CTRL_LB_CTRL(p) (0x4884 + (((p) << 2)))
+#define RTL838X_STORM_CTRL_BURST_PPS_0 (0x4874)
+#define RTL838X_STORM_CTRL_BURST_PPS_1 (0x4878)
+#define RTL838X_STORM_CTRL_BURST_0 (0x487c)
+#define RTL838X_STORM_CTRL_BURST_1 (0x4880)
+#define RTL839X_STORM_CTRL_LB_TICK_TKN_CTRL_0 (0x1804)
+#define RTL839X_STORM_CTRL_LB_TICK_TKN_CTRL_1 (0x1808)
+#define RTL838X_SCHED_CTRL (0xB980)
+#define RTL839X_SCHED_CTRL (0x60F4)
+#define RTL838X_SCHED_LB_TICK_TKN_CTRL_0 (0xAD58)
+#define RTL838X_SCHED_LB_TICK_TKN_CTRL_1 (0xAD5C)
+#define RTL839X_SCHED_LB_TICK_TKN_CTRL_0 (0x1804)
+#define RTL839X_SCHED_LB_TICK_TKN_CTRL_1 (0x1808)
+#define RTL839X_STORM_CTRL_SPCL_LB_TICK_TKN_CTRL (0x2000)
+#define RTL839X_IGR_BWCTRL_LB_TICK_TKN_CTRL_0 (0x1604)
+#define RTL839X_IGR_BWCTRL_LB_TICK_TKN_CTRL_1 (0x1608)
+#define RTL839X_SCHED_LB_TICK_TKN_CTRL (0x60F8)
+#define RTL839X_SCHED_LB_TICK_TKN_PPS_CTRL (0x6200)
+#define RTL838X_SCHED_LB_THR (0xB984)
+#define RTL839X_SCHED_LB_THR (0x60FC)
+#define RTL838X_SCHED_P_EGR_RATE_CTRL(p) (0xC008 + (((p) << 7)))
+#define RTL838X_SCHED_Q_EGR_RATE_CTRL(p, q) (0xC00C + (p << 7) + (((q) << 2)))
+#define RTL838X_STORM_CTRL_PORT_BC_EXCEED (0x470C)
+#define RTL838X_STORM_CTRL_PORT_MC_EXCEED (0x4710)
+#define RTL838X_STORM_CTRL_PORT_UC_EXCEED (0x4714)
+#define RTL839X_STORM_CTRL_PORT_BC_EXCEED(p) (0x180c + (((p >> 5) << 2)))
+#define RTL839X_STORM_CTRL_PORT_MC_EXCEED(p) (0x1814 + (((p >> 5) << 2)))
+#define RTL839X_STORM_CTRL_PORT_UC_EXCEED(p) (0x181c + (((p >> 5) << 2)))
+#define RTL838X_STORM_CTRL_PORT_UC(p) (0x4718 + (((p) << 2)))
+#define RTL838X_STORM_CTRL_PORT_MC(p) (0x478c + (((p) << 2)))
+#define RTL838X_STORM_CTRL_PORT_BC(p) (0x4800 + (((p) << 2)))
+#define RTL839X_STORM_CTRL_PORT_UC_0(p) (0x185C + (((p) << 3)))
+#define RTL839X_STORM_CTRL_PORT_UC_1(p) (0x1860 + (((p) << 3)))
+#define RTL839X_STORM_CTRL_PORT_MC_0(p) (0x19FC + (((p) << 3)))
+#define RTL839X_STORM_CTRL_PORT_MC_1(p) (0x1a00 + (((p) << 3)))
+#define RTL839X_STORM_CTRL_PORT_BC_0(p) (0x1B9C + (((p) << 3)))
+#define RTL839X_STORM_CTRL_PORT_BC_1(p) (0x1BA0 + (((p) << 3)))
+#define RTL839X_TBL_ACCESS_CTRL_2 (0x611C)
+#define RTL839X_TBL_ACCESS_DATA_2(i) (0x6120 + (((i) << 2)))
+#define RTL839X_IGR_BWCTRL_PORT_CTRL_10G_0(p) (0x1618 + (((p) << 3)))
+#define RTL839X_IGR_BWCTRL_PORT_CTRL_10G_1(p) (0x161C + (((p) << 3)))
+#define RTL839X_IGR_BWCTRL_PORT_CTRL_0(p) (0x1640 + (((p) << 3)))
+#define RTL839X_IGR_BWCTRL_PORT_CTRL_1(p) (0x1644 + (((p) << 3)))
+#define RTL839X_IGR_BWCTRL_CTRL_LB_THR (0x1614)
+
+/* Link aggregation (Trunking) */
+#define TRUNK_DISTRIBUTION_ALGO_SPA_BIT 0x01
+#define TRUNK_DISTRIBUTION_ALGO_SMAC_BIT 0x02
+#define TRUNK_DISTRIBUTION_ALGO_DMAC_BIT 0x04
+#define TRUNK_DISTRIBUTION_ALGO_SIP_BIT 0x08
+#define TRUNK_DISTRIBUTION_ALGO_DIP_BIT 0x10
+#define TRUNK_DISTRIBUTION_ALGO_SRC_L4PORT_BIT 0x20
+#define TRUNK_DISTRIBUTION_ALGO_DST_L4PORT_BIT 0x40
+#define TRUNK_DISTRIBUTION_ALGO_MASKALL 0x7F
+
+#define TRUNK_DISTRIBUTION_ALGO_L2_SPA_BIT 0x01
+#define TRUNK_DISTRIBUTION_ALGO_L2_SMAC_BIT 0x02
+#define TRUNK_DISTRIBUTION_ALGO_L2_DMAC_BIT 0x04
+#define TRUNK_DISTRIBUTION_ALGO_L2_VLAN_BIT 0x08
+#define TRUNK_DISTRIBUTION_ALGO_L2_MASKALL 0xF
+
+#define TRUNK_DISTRIBUTION_ALGO_L3_SPA_BIT 0x01
+#define TRUNK_DISTRIBUTION_ALGO_L3_SMAC_BIT 0x02
+#define TRUNK_DISTRIBUTION_ALGO_L3_DMAC_BIT 0x04
+#define TRUNK_DISTRIBUTION_ALGO_L3_VLAN_BIT 0x08
+#define TRUNK_DISTRIBUTION_ALGO_L3_SIP_BIT 0x10
+#define TRUNK_DISTRIBUTION_ALGO_L3_DIP_BIT 0x20
+#define TRUNK_DISTRIBUTION_ALGO_L3_SRC_L4PORT_BIT 0x40
+#define TRUNK_DISTRIBUTION_ALGO_L3_DST_L4PORT_BIT 0x80
+#define TRUNK_DISTRIBUTION_ALGO_L3_PROTO_BIT 0x100
+#define TRUNK_DISTRIBUTION_ALGO_L3_FLOW_LABEL_BIT 0x200
+#define TRUNK_DISTRIBUTION_ALGO_L3_MASKALL 0x3FF
+
+#define RTL838X_TRK_MBR_CTR (0x3E00)
+#define RTL838X_TRK_HASH_IDX_CTRL (0x3E20)
+#define RTL838X_TRK_HASH_CTRL (0x3E24)
+
+#define RTL839X_TRK_MBR_CTR (0x2200)
+#define RTL839X_TRK_HASH_IDX_CTRL (0x2280)
+#define RTL839X_TRK_HASH_CTRL (0x2284)
+
+#define RTL930X_TRK_MBR_CTRL (0xA41C)
+#define RTL930X_TRK_HASH_CTRL (0x9F80)
+
+#define RTL931X_TRK_MBR_CTRL (0xB8D0)
+#define RTL931X_TRK_HASH_CTRL (0xBA70)
+
+/* Attack prevention */
+#define RTL838X_ATK_PRVNT_PORT_EN (0x5B00)
+#define RTL838X_ATK_PRVNT_CTRL (0x5B04)
+#define RTL838X_ATK_PRVNT_ACT (0x5B08)
+#define RTL838X_ATK_PRVNT_STS (0x5B1C)
+
+/* 802.1X */
+#define RTL838X_RMA_BPDU_FLD_PMSK (0x4348)
+#define RTL930X_RMA_BPDU_FLD_PMSK (0x9F18)
+#define RTL931X_RMA_BPDU_FLD_PMSK (0x8950)
+#define RTL839X_RMA_BPDU_FLD_PMSK (0x125C)
+
+#define RTL838X_SPCL_TRAP_CTRL (0x6980)
+#define RTL838X_SPCL_TRAP_EAPOL_CTRL (0x6988)
+#define RTL838X_SPCL_TRAP_ARP_CTRL (0x698C)
+#define RTL838X_SPCL_TRAP_IGMP_CTRL (0x6984)
+#define RTL838X_SPCL_TRAP_IPV6_CTRL (0x6994)
+#define RTL838X_SPCL_TRAP_SWITCH_MAC_CTRL (0x6998)
+
+#define RTL839X_SPCL_TRAP_CTRL (0x1054)
+#define RTL839X_SPCL_TRAP_EAPOL_CTRL (0x105C)
+#define RTL839X_SPCL_TRAP_ARP_CTRL (0x1060)
+#define RTL839X_SPCL_TRAP_IGMP_CTRL (0x1058)
+#define RTL839X_SPCL_TRAP_IPV6_CTRL (0x1064)
+#define RTL839X_SPCL_TRAP_SWITCH_MAC_CTRL (0x1068)
+#define RTL839X_SPCL_TRAP_SWITCH_IPV4_ADDR_CTRL (0x106C)
+#define RTL839X_SPCL_TRAP_CRC_CTRL (0x1070)
+/* special port action controls */
+/*
+ values:
+ 0 = FORWARD (default)
+ 1 = DROP
+ 2 = TRAP2CPU
+ 3 = FLOOD IN ALL PORT
+
+ Register encoding.
+ offset = CTRL + (port >> 4) << 2
+ value/mask = 3 << ((port&0xF) << 1)
+*/
+
+typedef enum {
+ BPDU = 0,
+ PTP,
+ PTP_UDP,
+ PTP_ETH2,
+ LLTP,
+ EAPOL,
+ GRATARP,
+} rma_ctrl_t;
+
+typedef enum {
+ FORWARD = 0,
+ DROP,
+ TRAP2CPU,
+ FLOODALL,
+ TRAP2MASTERCPU,
+ COPY2CPU,
+} action_type_t;
+
+#define RTL838X_RMA_BPDU_CTRL (0x4330)
+#define RTL839X_RMA_BPDU_CTRL (0x122C)
+#define RTL930X_RMA_BPDU_CTRL (0x9E7C)
+#define RTL931X_RMA_BPDU_CTRL (0x881C)
+
+#define RTL838X_RMA_PTP_CTRL (0x4338)
+#define RTL839X_RMA_PTP_CTRL (0x123C)
+#define RTL930X_RMA_PTP_CTRL (0x9E88)
+#define RTL931X_RMA_PTP_CTRL (0x8834)
+
+#define RTL838X_RMA_LLTP_CTRL (0x4340)
+#define RTL839X_RMA_LLTP_CTRL (0x124C)
+#define RTL930X_RMA_LLTP_CTRL (0x9EFC)
+#define RTL931X_RMA_LLTP_CTRL (0x8918)
+
+#define RTL930X_RMA_EAPOL_CTRL (0x9F08)
+#define RTL931X_RMA_EAPOL_CTRL (0x8930)
+#define RTL931X_TRAP_ARP_GRAT_PORT_ACT (0x8C04)
+
+/* QoS */
+#define RTL838X_QM_INTPRI2QID_CTRL (0x5F00)
+#define RTL839X_QM_INTPRI2QID_CTRL(q) (0x1110 + (q << 2))
+#define RTL839X_QM_PORT_QNUM(p) (0x1130 + (((p / 10) << 2)))
+#define RTL838X_PRI_SEL_PORT_PRI(p) (0x5FB8 + (((p / 10) << 2)))
+#define RTL839X_PRI_SEL_PORT_PRI(p) (0x10A8 + (((p / 10) << 2)))
+#define RTL838X_QM_PKT2CPU_INTPRI_MAP (0x5F10)
+#define RTL839X_QM_PKT2CPU_INTPRI_MAP (0x1154)
+#define RTL838X_PRI_SEL_CTRL (0x10E0)
+#define RTL839X_PRI_SEL_CTRL (0x10E0)
+#define RTL838X_PRI_SEL_TBL_CTRL(i) (0x5FD8 + (((i) << 2)))
+#define RTL839X_PRI_SEL_TBL_CTRL(i) (0x10D0 + (((i) << 2)))
+#define RTL838X_QM_PKT2CPU_INTPRI_0 (0x5F04)
+#define RTL838X_QM_PKT2CPU_INTPRI_1 (0x5F08)
+#define RTL838X_QM_PKT2CPU_INTPRI_2 (0x5F0C)
+#define RTL839X_OAM_CTRL (0x2100)
+#define RTL839X_OAM_PORT_ACT_CTRL(p) (0x2104 + (((p) << 2)))
+#define RTL839X_RMK_PORT_DEI_TAG_CTRL(p) (0x6A9C + (((p >> 5) << 2)))
+#define RTL839X_PRI_SEL_IPRI_REMAP (0x1080)
+#define RTL838X_PRI_SEL_IPRI_REMAP (0x5F8C)
+#define RTL839X_PRI_SEL_DEI2DP_REMAP (0x10EC)
+#define RTL839X_PRI_SEL_DSCP2DP_REMAP_ADDR(i) (0x10F0 + (((i >> 4) << 2)))
+#define RTL839X_RMK_DEI_CTRL (0x6AA4)
+#define RTL839X_WRED_PORT_THR_CTRL(i) (0x6084 + ((i) << 2))
+#define RTL839X_WRED_QUEUE_THR_CTRL(q, i) (0x6090 + ((q) * 12) + ((i) << 2))
+#define RTL838X_PRI_DSCP_INVLD_CTRL0 (0x5FE8)
+#define RTL838X_RMK_IPRI_CTRL (0xA460)
+#define RTL838X_RMK_OPRI_CTRL (0xA464)
+#define RTL838X_SCHED_P_TYPE_CTRL(p) (0xC04C + (((p) << 7)))
+#define RTL838X_SCHED_LB_CTRL(p) (0xC004 + (((p) << 7)))
+#define RTL838X_FC_P_EGR_DROP_CTRL(p) (0x6B1C + (((p) << 2)))
+
+/* Debug features */
+#define RTL930X_STAT_PRVTE_DROP_COUNTER0 (0xB5B8)
+
+/* Packet Inspection Engine */
+#define RTL838X_METER_GLB_CTRL (0x4B08)
+#define RTL839X_METER_GLB_CTRL (0x1300)
+#define RTL930X_METER_GLB_CTRL (0xa0a0)
+#define RTL931X_METER_GLB_CTRL (0x411C)
+
+#define RTL839X_ACL_CTRL (0x1288)
+
+#define RTL838X_ACL_BLK_LOOKUP_CTRL (0x6100)
+#define RTL839X_ACL_BLK_LOOKUP_CTRL (0x1280)
+#define RTL930X_PIE_BLK_LOOKUP_CTRL (0xa5a0)
+#define RTL931X_PIE_BLK_LOOKUP_CTRL (0x4180)
+
+#define RTL838X_ACL_BLK_PWR_CTRL (0x6104)
+#define RTL839X_PS_ACL_PWR_CTRL (0x049c)
+
+#define RTL838X_ACL_BLK_TMPLTE_CTRL(block) (0x6108 + ((block) << 2))
+#define RTL839X_ACL_BLK_TMPLTE_CTRL(block) (0x128c + ((block) << 2))
+#define RTL930X_PIE_BLK_TMPLTE_CTRL(block) (0xa624 + ((block) << 2))
+#define RTL931X_PIE_BLK_TMPLTE_CTRL(block) (0x4214 + ((block) << 2))
+
+#define RTL838X_ACL_BLK_GROUP_CTRL (0x615C)
+#define RTL839X_ACL_BLK_GROUP_CTRL (0x12ec)
+
+#define RTL838X_ACL_CLR_CTRL (0x6168)
+#define RTL839X_ACL_CLR_CTRL (0x12fc)
+#define RTL930X_PIE_CLR_CTRL (0xa66c)
+#define RTL931X_PIE_CLR_CTRL (0x42D8)
+
+#define RTL838X_DMY_REG27 (0x3378)
+
+#define RTL838X_ACL_PORT_LOOKUP_CTRL(p) (0x616C + (((p) << 2)))
+#define RTL930X_ACL_PORT_LOOKUP_CTRL(p) (0xA784 + (((p) << 2)))
+#define RTL931X_ACL_PORT_LOOKUP_CTRL(p) (0x44F8 + (((p) << 2)))
+
+#define RTL930X_PIE_BLK_PHASE_CTRL (0xA5A4)
+#define RTL931X_PIE_BLK_PHASE_CTRL (0x4184)
+
+// PIE actions
+#define PIE_ACT_COPY_TO_PORT 2
+#define PIE_ACT_REDIRECT_TO_PORT 4
+#define PIE_ACT_ROUTE_UC 6
+#define PIE_ACT_VID_ASSIGN 0
+
+// L3 actions
+#define L3_FORWARD 0
+#define L3_DROP 1
+#define L3_TRAP2CPU 2
+#define L3_COPY2CPU 3
+#define L3_TRAP2MASTERCPU 4
+#define L3_COPY2MASTERCPU 5
+#define L3_HARDDROP 6
+
+// Route actions
+#define ROUTE_ACT_FORWARD 0
+#define ROUTE_ACT_TRAP2CPU 1
+#define ROUTE_ACT_COPY2CPU 2
+#define ROUTE_ACT_DROP 3
+
+/* L3 Routing */
+#define RTL839X_ROUTING_SA_CTRL 0x6afc
+#define RTL930X_L3_HOST_TBL_CTRL (0xAB48)
+#define RTL930X_L3_IPUC_ROUTE_CTRL (0xAB4C)
+#define RTL930X_L3_IP6UC_ROUTE_CTRL (0xAB50)
+#define RTL930X_L3_IPMC_ROUTE_CTRL (0xAB54)
+#define RTL930X_L3_IP6MC_ROUTE_CTRL (0xAB58)
+#define RTL930X_L3_IP_MTU_CTRL(i) (0xAB5C + ((i >> 1) << 2))
+#define RTL930X_L3_IP6_MTU_CTRL(i) (0xAB6C + ((i >> 1) << 2))
+#define RTL930X_L3_HW_LU_KEY_CTRL (0xAC9C)
+#define RTL930X_L3_HW_LU_KEY_IP_CTRL (0xACA0)
+#define RTL930X_L3_HW_LU_CTRL (0xACC0)
+#define RTL930X_L3_IP_ROUTE_CTRL 0xab44
+
+/* Port LED Control */
+#define RTL930X_LED_PORT_NUM_CTRL(p) (0xCC04 + (((p >> 4) << 2)))
+#define RTL930X_LED_SET0_0_CTRL (0xCC28)
+#define RTL930X_LED_PORT_COPR_SET_SEL_CTRL(p) (0xCC2C + (((p >> 4) << 2)))
+#define RTL930X_LED_PORT_FIB_SET_SEL_CTRL(p) (0xCC34 + (((p >> 4) << 2)))
+#define RTL930X_LED_PORT_COPR_MASK_CTRL (0xCC3C)
+#define RTL930X_LED_PORT_FIB_MASK_CTRL (0xCC40)
+#define RTL930X_LED_PORT_COMBO_MASK_CTRL (0xCC44)
+
+#define RTL931X_LED_PORT_NUM_CTRL(p) (0x0604 + (((p >> 4) << 2)))
+#define RTL931X_LED_SET0_0_CTRL (0x0630)
+#define RTL931X_LED_PORT_COPR_SET_SEL_CTRL(p) (0x0634 + (((p >> 4) << 2)))
+#define RTL931X_LED_PORT_FIB_SET_SEL_CTRL(p) (0x0644 + (((p >> 4) << 2)))
+#define RTL931X_LED_PORT_COPR_MASK_CTRL (0x0654)
+#define RTL931X_LED_PORT_FIB_MASK_CTRL (0x065c)
+#define RTL931X_LED_PORT_COMBO_MASK_CTRL (0x0664)
+
+#define MAX_VLANS 4096
+#define MAX_LAGS 16
+#define MAX_PRIOS 8
+#define RTL930X_PORT_IGNORE 0x3f
+#define MAX_MC_GROUPS 512
+#define UNKNOWN_MC_PMASK (MAX_MC_GROUPS - 1)
+#define PIE_BLOCK_SIZE 128
+#define MAX_PIE_ENTRIES (18 * PIE_BLOCK_SIZE)
+#define N_FIXED_FIELDS 12
+#define N_FIXED_FIELDS_RTL931X 14
+#define MAX_COUNTERS 2048
+#define MAX_ROUTES 512
+#define MAX_HOST_ROUTES 1536
+#define MAX_INTF_MTUS 8
+#define DEFAULT_MTU 1536
+#define MAX_INTERFACES 100
+#define MAX_ROUTER_MACS 64
+#define L3_EGRESS_DMACS 2048
+#define MAX_SMACS 64
+
+enum phy_type {
+ PHY_NONE = 0,
+ PHY_RTL838X_SDS = 1,
+ PHY_RTL8218B_INT = 2,
+ PHY_RTL8218B_EXT = 3,
+ PHY_RTL8214FC = 4,
+ PHY_RTL839X_SDS = 5,
+ PHY_RTL930X_SDS = 6,
+};
+
+enum pbvlan_type {
+ PBVLAN_TYPE_INNER = 0,
+ PBVLAN_TYPE_OUTER,
+};
+
+enum pbvlan_mode {
+ PBVLAN_MODE_UNTAG_AND_PRITAG = 0,
+ PBVLAN_MODE_UNTAG_ONLY,
+ PBVLAN_MODE_ALL_PKT,
+};
+
+struct rtl838x_port {
+ bool enable;
+ u64 pm;
+ u16 pvid;
+ bool eee_enabled;
+ enum phy_type phy;
+ bool phy_is_integrated;
+ bool is10G;
+ bool is2G5;
+ int sds_num;
+ int led_set;
+ const struct dsa_port *dp;
+};
+
+struct rtl838x_vlan_info {
+ u64 untagged_ports;
+ u64 tagged_ports;
+ u8 profile_id;
+ bool hash_mc_fid;
+ bool hash_uc_fid;
+ u8 fid; // AKA MSTI
+
+ // The following fields are used only by the RTL931X
+ int if_id; // Interface (index in L3_EGR_INTF_IDX)
+ u16 multicast_grp_mask;
+ int l2_tunnel_list_id;
+};
+
+enum l2_entry_type {
+ L2_INVALID = 0,
+ L2_UNICAST = 1,
+ L2_MULTICAST = 2,
+ IP4_MULTICAST = 3,
+ IP6_MULTICAST = 4,
+};
+
+struct rtl838x_l2_entry {
+ u8 mac[6];
+ u16 vid;
+ u16 rvid;
+ u8 port;
+ bool valid;
+ enum l2_entry_type type;
+ bool is_static;
+ bool is_ip_mc;
+ bool is_ipv6_mc;
+ bool block_da;
+ bool block_sa;
+ bool suspended;
+ bool next_hop;
+ int age;
+ u8 trunk;
+ bool is_trunk;
+ u8 stack_dev;
+ u16 mc_portmask_index;
+ u32 mc_gip;
+ u32 mc_sip;
+ u16 mc_mac_index;
+ u16 nh_route_id;
+ bool nh_vlan_target; // Only RTL83xx: VLAN used for next hop
+
+ // The following is only valid on RTL931x
+ bool is_open_flow;
+ bool is_pe_forward;
+ bool is_local_forward;
+ bool is_remote_forward;
+ bool is_l2_tunnel;
+ int l2_tunnel_id;
+ int l2_tunnel_list_id;
+};
+
+enum fwd_rule_action {
+ FWD_RULE_ACTION_NONE = 0,
+ FWD_RULE_ACTION_FWD = 1,
+};
+
+enum pie_phase {
+ PHASE_VACL = 0,
+ PHASE_IACL = 1,
+};
+
+enum igr_filter {
+ IGR_FORWARD = 0,
+ IGR_DROP = 1,
+ IGR_TRAP = 2,
+};
+
+enum egr_filter {
+ EGR_DISABLE = 0,
+ EGR_ENABLE = 1,
+};
+
+/* Intermediate representation of a Packet Inspection Engine Rule
+ * as suggested by the Kernel's tc flower offload subsystem
+ * Field meaning is universal across SoC families, but data content is specific
+ * to SoC family (e.g. because of different port ranges) */
+struct pie_rule {
+ int id;
+ enum pie_phase phase; // Phase in which this template is applied
+ int packet_cntr; // ID of a packet counter assigned to this rule
+ int octet_cntr; // ID of a byte counter assigned to this rule
+ u32 last_packet_cnt;
+ u64 last_octet_cnt;
+
+ // The following are requirements for the pie template
+ bool is_egress;
+ bool is_ipv6; // This is a rule with IPv6 fields
+
+ // Fixed fields that are always matched against on RTL8380
+ u8 spmmask_fix;
+ u8 spn; // Source port number
+ bool stacking_port; // Source port is stacking port
+ bool mgnt_vlan; // Packet arrived on management VLAN
+ bool dmac_hit_sw; // The packet's destination MAC matches one of the device's
+ bool content_too_deep; // The content of the packet cannot be parsed: too many layers
+ bool not_first_frag; // Not the first IP fragment
+ u8 frame_type_l4; // 0: UDP, 1: TCP, 2: ICMP/ICMPv6, 3: IGMP
+ u8 frame_type; // 0: ARP, 1: L2 only, 2: IPv4, 3: IPv6
+ bool otag_fmt; // 0: outer tag packet, 1: outer priority tag or untagged
+ bool itag_fmt; // 0: inner tag packet, 1: inner priority tag or untagged
+ bool otag_exist; // packet with outer tag
+ bool itag_exist; // packet with inner tag
+ bool frame_type_l2; // 0: Ethernet, 1: LLC_SNAP, 2: LLC_Other, 3: Reserved
+ bool igr_normal_port; // Ingress port is not cpu or stacking port
+ u8 tid; // The template ID defining the what the templated fields mean
+
+ // Masks for the fields that are always matched against on RTL8380
+ u8 spmmask_fix_m;
+ u8 spn_m;
+ bool stacking_port_m;
+ bool mgnt_vlan_m;
+ bool dmac_hit_sw_m;
+ bool content_too_deep_m;
+ bool not_first_frag_m;
+ u8 frame_type_l4_m;
+ u8 frame_type_m;
+ bool otag_fmt_m;
+ bool itag_fmt_m;
+ bool otag_exist_m;
+ bool itag_exist_m;
+ bool frame_type_l2_m;
+ bool igr_normal_port_m;
+ u8 tid_m;
+
+ // Logical operations between rules, special rules for rule numbers apply
+ bool valid;
+ bool cond_not; // Matches when conditions not match
+ bool cond_and1; // And this rule 2n with the next rule 2n+1 in same block
+ bool cond_and2; // And this rule m in block 2n with rule m in block 2n+1
+ bool ivalid;
+
+ // Actions to be performed
+ bool drop; // Drop the packet
+ bool fwd_sel; // Forward packet: to port, portmask, dest route, next rule, drop
+ bool ovid_sel; // So something to outer vlan-id: shift, re-assign
+ bool ivid_sel; // Do something to inner vlan-id: shift, re-assign
+ bool flt_sel; // Filter the packet when sending to certain ports
+ bool log_sel; // Log the packet in one of the LOG-table counters
+ bool rmk_sel; // Re-mark the packet, i.e. change the priority-tag
+ bool meter_sel; // Meter the packet, i.e. limit rate of this type of packet
+ bool tagst_sel; // Change the ergress tag
+ bool mir_sel; // Mirror the packet to a Link Aggregation Group
+ bool nopri_sel; // Change the normal priority
+ bool cpupri_sel; // Change the CPU priority
+ bool otpid_sel; // Change Outer Tag Protocol Identifier (802.1q)
+ bool itpid_sel; // Change Inner Tag Protocol Identifier (802.1q)
+ bool shaper_sel; // Apply traffic shaper
+ bool mpls_sel; // MPLS actions
+ bool bypass_sel; // Bypass actions
+ bool fwd_sa_lrn; // Learn the source address when forwarding
+ bool fwd_mod_to_cpu; // Forward the modified VLAN tag format to CPU-port
+
+ // Fields used in predefined templates 0-2 on RTL8380 / 90 / 9300
+ u64 spm; // Source Port Matrix
+ u16 otag; // Outer VLAN-ID
+ u8 smac[ETH_ALEN]; // Source MAC address
+ u8 dmac[ETH_ALEN]; // Destination MAC address
+ u16 ethertype; // Ethernet frame type field in ethernet header
+ u16 itag; // Inner VLAN-ID
+ u16 field_range_check;
+ u32 sip; // Source IP
+ struct in6_addr sip6; // IPv6 Source IP
+ u32 dip; // Destination IP
+ struct in6_addr dip6; // IPv6 Destination IP
+ u16 tos_proto; // IPv4: TOS + Protocol fields, IPv6: Traffic class + next header
+ u16 sport; // TCP/UDP source port
+ u16 dport; // TCP/UDP destination port
+ u16 icmp_igmp;
+ u16 tcp_info;
+ u16 dsap_ssap; // Destination / Source Service Access Point bytes (802.3)
+
+ u64 spm_m;
+ u16 otag_m;
+ u8 smac_m[ETH_ALEN];
+ u8 dmac_m[ETH_ALEN];
+ u8 ethertype_m;
+ u16 itag_m;
+ u16 field_range_check_m;
+ u32 sip_m;
+ struct in6_addr sip6_m; // IPv6 Source IP mask
+ u32 dip_m;
+ struct in6_addr dip6_m; // IPv6 Destination IP mask
+ u16 tos_proto_m;
+ u16 sport_m;
+ u16 dport_m;
+ u16 icmp_igmp_m;
+ u16 tcp_info_m;
+ u16 dsap_ssap_m;
+
+ // Data associated with actions
+ u8 fwd_act; // Type of forwarding action
+ // 0: permit, 1: drop, 2: copy to port id, 4: copy to portmask
+ // 4: redirect to portid, 5: redirect to portmask
+ // 6: route, 7: vlan leaky (only 8380)
+ u16 fwd_data; // Additional data for forwarding action, e.g. destination port
+ u8 ovid_act;
+ u16 ovid_data; // Outer VLAN ID
+ u8 ivid_act;
+ u16 ivid_data; // Inner VLAN ID
+ u16 flt_data; // Filtering data
+ u16 log_data; // ID of packet or octet counter in LOG table, on RTL93xx
+ // unnecessary since PIE-Rule-ID == LOG-counter-ID
+ bool log_octets;
+ u8 mpls_act; // MPLS action type
+ u16 mpls_lib_idx; // MPLS action data
+
+ u16 rmk_data; // Data for remarking
+ u16 meter_data; // ID of meter for bandwidth control
+ u16 tagst_data;
+ u16 mir_data;
+ u16 nopri_data;
+ u16 cpupri_data;
+ u16 otpid_data;
+ u16 itpid_data;
+ u16 shaper_data;
+
+ // Bypass actions, ignored on RTL8380
+ bool bypass_all; // Not clear
+ bool bypass_igr_stp; // Bypass Ingress STP state
+ bool bypass_ibc_sc; // Bypass Ingress Bandwidth Control and Storm Control
+};
+
+struct rtl838x_l3_intf {
+ u16 vid;
+ u8 smac_idx;
+ u8 ip4_mtu_id;
+ u8 ip6_mtu_id;
+ u16 ip4_mtu;
+ u16 ip6_mtu;
+ u8 ttl_scope;
+ u8 hl_scope;
+ u8 ip4_icmp_redirect;
+ u8 ip6_icmp_redirect;
+ u8 ip4_pbr_icmp_redirect;
+ u8 ip6_pbr_icmp_redirect;
+};
+
+/*
+ * An entry in the RTL93XX SoC's ROUTER_MAC tables setting up a termination point
+ * for the L3 routing system. Packets arriving and matching an entry in this table
+ * will be considered for routing.
+ * Mask fields state whether the corresponding data fields matter for matching
+ */
+struct rtl93xx_rt_mac {
+ bool valid; // Valid or not
+ bool p_type; // Individual (0) or trunk (1) port
+ bool p_mask; // Whether the port type is used
+ u8 p_id;
+ u8 p_id_mask; // Mask for the port
+ u8 action; // Routing action performed: 0: FORWARD, 1: DROP, 2: TRAP2CPU
+ // 3: COPY2CPU, 4: TRAP2MASTERCPU, 5: COPY2MASTERCPU, 6: HARDDROP
+ u16 vid;
+ u16 vid_mask;
+ u64 mac; // MAC address used as source MAC in the routed packet
+ u64 mac_mask;
+};
+
+struct rtl83xx_nexthop {
+ u16 id; // ID: L3_NEXT_HOP table-index or route-index set in L2_NEXT_HOP
+ u32 dev_id;
+ u16 port;
+ u16 vid; // VLAN-ID for L2 table entry (saved from L2-UC entry)
+ u16 rvid; // Relay VID/FID for the L2 table entry
+ u64 mac; // The MAC address of the entry in the L2_NEXT_HOP table
+ u16 mac_id;
+ u16 l2_id; // Index of this next hop forwarding entry in L2 FIB table
+ u64 gw; // The gateway MAC address packets are forwarded to
+ int if_id; // Interface (into L3_EGR_INTF_IDX)
+};
+
+struct rtl838x_switch_priv;
+
+struct rtl83xx_flow {
+ unsigned long cookie;
+ struct rhash_head node;
+ struct rcu_head rcu_head;
+ struct rtl838x_switch_priv *priv;
+ struct pie_rule rule;
+ u32 flags;
+};
+
+struct rtl93xx_route_attr {
+ bool valid;
+ bool hit;
+ bool ttl_dec;
+ bool ttl_check;
+ bool dst_null;
+ bool qos_as;
+ u8 qos_prio;
+ u8 type;
+ u8 action;
+};
+
+struct rtl83xx_route {
+ u32 gw_ip; // IP of the route's gateway
+ u32 dst_ip; // IP of the destination net
+ struct in6_addr dst_ip6;
+ int prefix_len; // Network prefix len of the destination net
+ bool is_host_route;
+ int id; // ID number of this route
+ struct rhlist_head linkage;
+ u16 switch_mac_id; // Index into switch's own MACs, RTL839X only
+ struct rtl83xx_nexthop nh;
+ struct pie_rule pr;
+ struct rtl93xx_route_attr attr;
+};
+
+struct rtl838x_reg {
+ void (*mask_port_reg_be)(u64 clear, u64 set, int reg);
+ void (*set_port_reg_be)(u64 set, int reg);
+ u64 (*get_port_reg_be)(int reg);
+ void (*mask_port_reg_le)(u64 clear, u64 set, int reg);
+ void (*set_port_reg_le)(u64 set, int reg);
+ u64 (*get_port_reg_le)(int reg);
+ int stat_port_rst;
+ int stat_rst;
+ int stat_port_std_mib;
+ int (*port_iso_ctrl)(int p);
+ void (*traffic_enable)(int source, int dest);
+ void (*traffic_disable)(int source, int dest);
+ void (*traffic_set)(int source, u64 dest_matrix);
+ u64 (*traffic_get)(int source);
+ int l2_ctrl_0;
+ int l2_ctrl_1;
+ int smi_poll_ctrl;
+ u32 l2_port_aging_out;
+ int l2_tbl_flush_ctrl;
+ void (*exec_tbl0_cmd)(u32 cmd);
+ void (*exec_tbl1_cmd)(u32 cmd);
+ int (*tbl_access_data_0)(int i);
+ int isr_glb_src;
+ int isr_port_link_sts_chg;
+ int imr_port_link_sts_chg;
+ int imr_glb;
+ void (*vlan_tables_read)(u32 vlan, struct rtl838x_vlan_info *info);
+ void (*vlan_set_tagged)(u32 vlan, struct rtl838x_vlan_info *info);
+ void (*vlan_set_untagged)(u32 vlan, u64 portmask);
+ void (*vlan_profile_dump)(int index);
+ void (*vlan_profile_setup)(int profile);
+ void (*vlan_port_pvidmode_set)(int port, enum pbvlan_type type, enum pbvlan_mode mode);
+ void (*vlan_port_pvid_set)(int port, enum pbvlan_type type, int pvid);
+ void (*vlan_port_keep_tag_set)(int port, bool keep_outer, bool keep_inner);
+ void (*set_vlan_igr_filter)(int port, enum igr_filter state);
+ void (*set_vlan_egr_filter)(int port, enum egr_filter state);
+ void (*enable_learning)(int port, bool enable);
+ void (*enable_flood)(int port, bool enable);
+ void (*enable_mcast_flood)(int port, bool enable);
+ void (*enable_bcast_flood)(int port, bool enable);
+ void (*stp_get)(struct rtl838x_switch_priv *priv, u16 msti, u32 port_state[]);
+ void (*stp_set)(struct rtl838x_switch_priv *priv, u16 msti, u32 port_state[]);
+ int (*mac_force_mode_ctrl)(int port);
+ int (*mac_port_ctrl)(int port);
+ int (*l2_port_new_salrn)(int port);
+ int (*l2_port_new_sa_fwd)(int port);
+ int (*set_ageing_time)(unsigned long msec);
+ int mir_ctrl;
+ int mir_dpm;
+ int mir_spm;
+ int mac_link_sts;
+ int mac_link_dup_sts;
+ int (*mac_link_spd_sts)(int port);
+ int mac_rx_pause_sts;
+ int mac_tx_pause_sts;
+ u64 (*read_l2_entry_using_hash)(u32 hash, u32 position, struct rtl838x_l2_entry *e);
+ void (*write_l2_entry_using_hash)(u32 hash, u32 pos, struct rtl838x_l2_entry *e);
+ u64 (*read_cam)(int idx, struct rtl838x_l2_entry *e);
+ void (*write_cam)(int idx, struct rtl838x_l2_entry *e);
+ int (*trk_mbr_ctr)(int group);
+ int rma_bpdu_fld_pmask;
+ int spcl_trap_eapol_ctrl;
+ void (*init_eee)(struct rtl838x_switch_priv *priv, bool enable);
+ void (*port_eee_set)(struct rtl838x_switch_priv *priv, int port, bool enable);
+ int (*eee_port_ability)(struct rtl838x_switch_priv *priv,
+ struct ethtool_eee *e, int port);
+ u64 (*l2_hash_seed)(u64 mac, u32 vid);
+ u32 (*l2_hash_key)(struct rtl838x_switch_priv *priv, u64 seed);
+ u64 (*read_mcast_pmask)(int idx);
+ void (*write_mcast_pmask)(int idx, u64 portmask);
+ void (*vlan_fwd_on_inner)(int port, bool is_set);
+ void (*pie_init)(struct rtl838x_switch_priv *priv);
+ int (*pie_rule_read)(struct rtl838x_switch_priv *priv, int idx, struct pie_rule *pr);
+ int (*pie_rule_write)(struct rtl838x_switch_priv *priv, int idx, struct pie_rule *pr);
+ int (*pie_rule_add)(struct rtl838x_switch_priv *priv, struct pie_rule *rule);
+ void (*pie_rule_rm)(struct rtl838x_switch_priv *priv, struct pie_rule *rule);
+ void (*l2_learning_setup)(void);
+ u32 (*packet_cntr_read)(int counter);
+ void (*packet_cntr_clear)(int counter);
+ void (*route_read)(int idx, struct rtl83xx_route *rt);
+ void (*route_write)(int idx, struct rtl83xx_route *rt);
+ void (*host_route_write)(int idx, struct rtl83xx_route *rt);
+ int (*l3_setup)(struct rtl838x_switch_priv *priv);
+ void (*set_l3_nexthop)(int idx, u16 dmac_id, u16 interface);
+ void (*get_l3_nexthop)(int idx, u16 *dmac_id, u16 *interface);
+ u64 (*get_l3_egress_mac)(u32 idx);
+ void (*set_l3_egress_mac)(u32 idx, u64 mac);
+ int (*find_l3_slot)(struct rtl83xx_route *rt, bool must_exist);
+ int (*route_lookup_hw)(struct rtl83xx_route *rt);
+ void (*get_l3_router_mac)(u32 idx, struct rtl93xx_rt_mac *m);
+ void (*set_l3_router_mac)(u32 idx, struct rtl93xx_rt_mac *m);
+ void (*set_l3_egress_intf)(int idx, struct rtl838x_l3_intf *intf);
+ void (*set_distribution_algorithm)(int group, int algoidx, u32 algomask);
+ void (*set_receive_management_action)(int port, rma_ctrl_t type, action_type_t action);
+ void (*led_init)(struct rtl838x_switch_priv *priv);
+};
+
+struct rtl838x_switch_priv {
+ /* Switch operation */
+ struct dsa_switch *ds;
+ struct device *dev;
+ u16 id;
+ u16 family_id;
+ char version;
+ struct rtl838x_port ports[57];
+ struct mutex reg_mutex; // Mutex for individual register manipulations
+ struct mutex pie_mutex; // Mutex for Packet Inspection Engine
+ int link_state_irq;
+ int mirror_group_ports[4];
+ struct mii_bus *mii_bus;
+ const struct rtl838x_reg *r;
+ u8 cpu_port;
+ u8 port_mask;
+ u8 port_width;
+ u8 port_ignore;
+ u64 irq_mask;
+ u32 fib_entries;
+ int l2_bucket_size;
+ struct dentry *dbgfs_dir;
+ int n_lags;
+ u64 lags_port_members[MAX_LAGS];
+ struct net_device *lag_devs[MAX_LAGS];
+ u32 lag_primary[MAX_LAGS];
+ u32 is_lagmember[57];
+ u64 lagmembers;
+ struct notifier_block nb; // TODO: change to different name
+ struct notifier_block ne_nb;
+ struct notifier_block fib_nb;
+ bool eee_enabled;
+ unsigned long int mc_group_bm[MAX_MC_GROUPS >> 5];
+ int mc_group_saves[MAX_MC_GROUPS];
+ int n_pie_blocks;
+ struct rhashtable tc_ht;
+ unsigned long int pie_use_bm[MAX_PIE_ENTRIES >> 5];
+ int n_counters;
+ unsigned long int octet_cntr_use_bm[MAX_COUNTERS >> 5];
+ unsigned long int packet_cntr_use_bm[MAX_COUNTERS >> 4];
+ struct rhltable routes;
+ unsigned long int route_use_bm[MAX_ROUTES >> 5];
+ unsigned long int host_route_use_bm[MAX_HOST_ROUTES >> 5];
+ struct rtl838x_l3_intf *interfaces[MAX_INTERFACES];
+ u16 intf_mtus[MAX_INTF_MTUS];
+ int intf_mtu_count[MAX_INTF_MTUS];
+};
+
+void rtl838x_dbgfs_init(struct rtl838x_switch_priv *priv);
+void rtl930x_dbgfs_init(struct rtl838x_switch_priv *priv);
+
+#endif /* _RTL838X_H */
diff --git a/target/linux/realtek/files-5.15/drivers/net/dsa/rtl83xx/rtl839x.c b/target/linux/realtek/files-5.15/drivers/net/dsa/rtl83xx/rtl839x.c
new file mode 100644
index 0000000000..986a4b5f45
--- /dev/null
+++ b/target/linux/realtek/files-5.15/drivers/net/dsa/rtl83xx/rtl839x.c
@@ -0,0 +1,1937 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <asm/mach-rtl838x/mach-rtl83xx.h>
+#include "rtl83xx.h"
+
+#define RTL839X_VLAN_PORT_TAG_STS_UNTAG 0x0
+#define RTL839X_VLAN_PORT_TAG_STS_TAGGED 0x1
+#define RTL839X_VLAN_PORT_TAG_STS_PRIORITY_TAGGED 0x2
+
+#define RTL839X_VLAN_PORT_TAG_STS_CTRL_BASE 0x6828
+/* port 0-52 */
+#define RTL839X_VLAN_PORT_TAG_STS_CTRL(port) \
+ RTL839X_VLAN_PORT_TAG_STS_CTRL_BASE + (port << 2)
+#define RTL839X_VLAN_PORT_TAG_STS_CTRL_OTAG_STS_MASK GENMASK(7,6)
+#define RTL839X_VLAN_PORT_TAG_STS_CTRL_ITAG_STS_MASK GENMASK(5,4)
+#define RTL839X_VLAN_PORT_TAG_STS_CTRL_EGR_P_OTAG_KEEP_MASK GENMASK(3,3)
+#define RTL839X_VLAN_PORT_TAG_STS_CTRL_EGR_P_ITAG_KEEP_MASK GENMASK(2,2)
+#define RTL839X_VLAN_PORT_TAG_STS_CTRL_IGR_P_OTAG_KEEP_MASK GENMASK(1,1)
+#define RTL839X_VLAN_PORT_TAG_STS_CTRL_IGR_P_ITAG_KEEP_MASK GENMASK(0,0)
+
+extern struct mutex smi_lock;
+extern struct rtl83xx_soc_info soc_info;
+
+/* Definition of the RTL839X-specific template field IDs as used in the PIE */
+enum template_field_id {
+ TEMPLATE_FIELD_SPMMASK = 0,
+ TEMPLATE_FIELD_SPM0 = 1, // Source portmask ports 0-15
+ TEMPLATE_FIELD_SPM1 = 2, // Source portmask ports 16-31
+ TEMPLATE_FIELD_SPM2 = 3, // Source portmask ports 32-47
+ TEMPLATE_FIELD_SPM3 = 4, // Source portmask ports 48-56
+ TEMPLATE_FIELD_DMAC0 = 5, // Destination MAC [15:0]
+ TEMPLATE_FIELD_DMAC1 = 6, // Destination MAC [31:16]
+ TEMPLATE_FIELD_DMAC2 = 7, // Destination MAC [47:32]
+ TEMPLATE_FIELD_SMAC0 = 8, // Source MAC [15:0]
+ TEMPLATE_FIELD_SMAC1 = 9, // Source MAC [31:16]
+ TEMPLATE_FIELD_SMAC2 = 10, // Source MAC [47:32]
+ TEMPLATE_FIELD_ETHERTYPE = 11, // Ethernet frame type field
+ // Field-ID 12 is not used
+ TEMPLATE_FIELD_OTAG = 13,
+ TEMPLATE_FIELD_ITAG = 14,
+ TEMPLATE_FIELD_SIP0 = 15,
+ TEMPLATE_FIELD_SIP1 = 16,
+ TEMPLATE_FIELD_DIP0 = 17,
+ TEMPLATE_FIELD_DIP1 = 18,
+ TEMPLATE_FIELD_IP_TOS_PROTO = 19,
+ TEMPLATE_FIELD_IP_FLAG = 20,
+ TEMPLATE_FIELD_L4_SPORT = 21,
+ TEMPLATE_FIELD_L4_DPORT = 22,
+ TEMPLATE_FIELD_L34_HEADER = 23,
+ TEMPLATE_FIELD_ICMP_IGMP = 24,
+ TEMPLATE_FIELD_VID_RANG0 = 25,
+ TEMPLATE_FIELD_VID_RANG1 = 26,
+ TEMPLATE_FIELD_L4_PORT_RANG = 27,
+ TEMPLATE_FIELD_FIELD_SELECTOR_VALID = 28,
+ TEMPLATE_FIELD_FIELD_SELECTOR_0 = 29,
+ TEMPLATE_FIELD_FIELD_SELECTOR_1 = 30,
+ TEMPLATE_FIELD_FIELD_SELECTOR_2 = 31,
+ TEMPLATE_FIELD_FIELD_SELECTOR_3 = 32,
+ TEMPLATE_FIELD_FIELD_SELECTOR_4 = 33,
+ TEMPLATE_FIELD_FIELD_SELECTOR_5 = 34,
+ TEMPLATE_FIELD_SIP2 = 35,
+ TEMPLATE_FIELD_SIP3 = 36,
+ TEMPLATE_FIELD_SIP4 = 37,
+ TEMPLATE_FIELD_SIP5 = 38,
+ TEMPLATE_FIELD_SIP6 = 39,
+ TEMPLATE_FIELD_SIP7 = 40,
+ TEMPLATE_FIELD_OLABEL = 41,
+ TEMPLATE_FIELD_ILABEL = 42,
+ TEMPLATE_FIELD_OILABEL = 43,
+ TEMPLATE_FIELD_DPMMASK = 44,
+ TEMPLATE_FIELD_DPM0 = 45,
+ TEMPLATE_FIELD_DPM1 = 46,
+ TEMPLATE_FIELD_DPM2 = 47,
+ TEMPLATE_FIELD_DPM3 = 48,
+ TEMPLATE_FIELD_L2DPM0 = 49,
+ TEMPLATE_FIELD_L2DPM1 = 50,
+ TEMPLATE_FIELD_L2DPM2 = 51,
+ TEMPLATE_FIELD_L2DPM3 = 52,
+ TEMPLATE_FIELD_IVLAN = 53,
+ TEMPLATE_FIELD_OVLAN = 54,
+ TEMPLATE_FIELD_FWD_VID = 55,
+ TEMPLATE_FIELD_DIP2 = 56,
+ TEMPLATE_FIELD_DIP3 = 57,
+ TEMPLATE_FIELD_DIP4 = 58,
+ TEMPLATE_FIELD_DIP5 = 59,
+ TEMPLATE_FIELD_DIP6 = 60,
+ TEMPLATE_FIELD_DIP7 = 61,
+};
+
+// Number of fixed templates predefined in the SoC
+#define N_FIXED_TEMPLATES 5
+static enum template_field_id fixed_templates[N_FIXED_TEMPLATES][N_FIXED_FIELDS] =
+{
+ {
+ TEMPLATE_FIELD_SPM0, TEMPLATE_FIELD_SPM1, TEMPLATE_FIELD_ITAG,
+ TEMPLATE_FIELD_SMAC0, TEMPLATE_FIELD_SMAC1, TEMPLATE_FIELD_SMAC2,
+ TEMPLATE_FIELD_DMAC0, TEMPLATE_FIELD_DMAC1, TEMPLATE_FIELD_DMAC2,
+ TEMPLATE_FIELD_ETHERTYPE, TEMPLATE_FIELD_SPM2, TEMPLATE_FIELD_SPM3
+ }, {
+ TEMPLATE_FIELD_SIP0, TEMPLATE_FIELD_SIP1, TEMPLATE_FIELD_DIP0,
+ TEMPLATE_FIELD_DIP1,TEMPLATE_FIELD_IP_TOS_PROTO, TEMPLATE_FIELD_L4_SPORT,
+ TEMPLATE_FIELD_L4_DPORT, TEMPLATE_FIELD_ICMP_IGMP, TEMPLATE_FIELD_SPM0,
+ TEMPLATE_FIELD_SPM1, TEMPLATE_FIELD_SPM2, TEMPLATE_FIELD_SPM3
+ }, {
+ TEMPLATE_FIELD_DMAC0, TEMPLATE_FIELD_DMAC1, TEMPLATE_FIELD_DMAC2,
+ TEMPLATE_FIELD_ITAG, TEMPLATE_FIELD_ETHERTYPE, TEMPLATE_FIELD_IP_TOS_PROTO,
+ TEMPLATE_FIELD_L4_DPORT, TEMPLATE_FIELD_L4_SPORT, TEMPLATE_FIELD_SIP0,
+ TEMPLATE_FIELD_SIP1, TEMPLATE_FIELD_DIP0, TEMPLATE_FIELD_DIP1
+ }, {
+ TEMPLATE_FIELD_DIP0, TEMPLATE_FIELD_DIP1, TEMPLATE_FIELD_DIP2,
+ TEMPLATE_FIELD_DIP3, TEMPLATE_FIELD_DIP4, TEMPLATE_FIELD_DIP5,
+ TEMPLATE_FIELD_DIP6, TEMPLATE_FIELD_DIP7, TEMPLATE_FIELD_L4_DPORT,
+ TEMPLATE_FIELD_L4_SPORT, TEMPLATE_FIELD_ICMP_IGMP, TEMPLATE_FIELD_IP_TOS_PROTO
+ }, {
+ TEMPLATE_FIELD_SIP0, TEMPLATE_FIELD_SIP1, TEMPLATE_FIELD_SIP2,
+ TEMPLATE_FIELD_SIP3, TEMPLATE_FIELD_SIP4, TEMPLATE_FIELD_SIP5,
+ TEMPLATE_FIELD_SIP6, TEMPLATE_FIELD_SIP7, TEMPLATE_FIELD_SPM0,
+ TEMPLATE_FIELD_SPM1, TEMPLATE_FIELD_SPM2, TEMPLATE_FIELD_SPM3
+ },
+};
+
+void rtl839x_print_matrix(void)
+{
+ volatile u64 *ptr9;
+ int i;
+
+ ptr9 = RTL838X_SW_BASE + RTL839X_PORT_ISO_CTRL(0);
+ for (i = 0; i < 52; i += 4)
+ pr_debug("> %16llx %16llx %16llx %16llx\n",
+ ptr9[i + 0], ptr9[i + 1], ptr9[i + 2], ptr9[i + 3]);
+ pr_debug("CPU_PORT> %16llx\n", ptr9[52]);
+}
+
+static inline int rtl839x_port_iso_ctrl(int p)
+{
+ return RTL839X_PORT_ISO_CTRL(p);
+}
+
+static inline void rtl839x_exec_tbl0_cmd(u32 cmd)
+{
+ sw_w32(cmd, RTL839X_TBL_ACCESS_CTRL_0);
+ do { } while (sw_r32(RTL839X_TBL_ACCESS_CTRL_0) & BIT(16));
+}
+
+static inline void rtl839x_exec_tbl1_cmd(u32 cmd)
+{
+ sw_w32(cmd, RTL839X_TBL_ACCESS_CTRL_1);
+ do { } while (sw_r32(RTL839X_TBL_ACCESS_CTRL_1) & BIT(16));
+}
+
+inline void rtl839x_exec_tbl2_cmd(u32 cmd)
+{
+ sw_w32(cmd, RTL839X_TBL_ACCESS_CTRL_2);
+ do { } while (sw_r32(RTL839X_TBL_ACCESS_CTRL_2) & (1 << 9));
+}
+
+static inline int rtl839x_tbl_access_data_0(int i)
+{
+ return RTL839X_TBL_ACCESS_DATA_0(i);
+}
+
+static void rtl839x_vlan_tables_read(u32 vlan, struct rtl838x_vlan_info *info)
+{
+ u32 u, v, w;
+ // Read VLAN table (0) via register 0
+ struct table_reg *r = rtl_table_get(RTL8390_TBL_0, 0);
+
+ rtl_table_read(r, vlan);
+ u = sw_r32(rtl_table_data(r, 0));
+ v = sw_r32(rtl_table_data(r, 1));
+ w = sw_r32(rtl_table_data(r, 2));
+ rtl_table_release(r);
+
+ info->tagged_ports = u;
+ info->tagged_ports = (info->tagged_ports << 21) | ((v >> 11) & 0x1fffff);
+ info->profile_id = w >> 30 | ((v & 1) << 2);
+ info->hash_mc_fid = !!(w & BIT(2));
+ info->hash_uc_fid = !!(w & BIT(3));
+ info->fid = (v >> 3) & 0xff;
+
+ // Read UNTAG table (0) via table register 1
+ r = rtl_table_get(RTL8390_TBL_1, 0);
+ rtl_table_read(r, vlan);
+ u = sw_r32(rtl_table_data(r, 0));
+ v = sw_r32(rtl_table_data(r, 1));
+ rtl_table_release(r);
+
+ info->untagged_ports = u;
+ info->untagged_ports = (info->untagged_ports << 21) | ((v >> 11) & 0x1fffff);
+}
+
+static void rtl839x_vlan_set_tagged(u32 vlan, struct rtl838x_vlan_info *info)
+{
+ u32 u, v, w;
+ // Access VLAN table (0) via register 0
+ struct table_reg *r = rtl_table_get(RTL8390_TBL_0, 0);
+
+ u = info->tagged_ports >> 21;
+ v = info->tagged_ports << 11;
+ v |= ((u32)info->fid) << 3;
+ v |= info->hash_uc_fid ? BIT(2) : 0;
+ v |= info->hash_mc_fid ? BIT(1) : 0;
+ v |= (info->profile_id & 0x4) ? 1 : 0;
+ w = ((u32)(info->profile_id & 3)) << 30;
+
+ sw_w32(u, rtl_table_data(r, 0));
+ sw_w32(v, rtl_table_data(r, 1));
+ sw_w32(w, rtl_table_data(r, 2));
+
+ rtl_table_write(r, vlan);
+ rtl_table_release(r);
+}
+
+static void rtl839x_vlan_set_untagged(u32 vlan, u64 portmask)
+{
+ u32 u, v;
+
+ // Access UNTAG table (0) via table register 1
+ struct table_reg *r = rtl_table_get(RTL8390_TBL_1, 0);
+
+ u = portmask >> 21;
+ v = portmask << 11;
+
+ sw_w32(u, rtl_table_data(r, 0));
+ sw_w32(v, rtl_table_data(r, 1));
+ rtl_table_write(r, vlan);
+
+ rtl_table_release(r);
+}
+
+/* Sets the L2 forwarding to be based on either the inner VLAN tag or the outer
+ */
+static void rtl839x_vlan_fwd_on_inner(int port, bool is_set)
+{
+ if (is_set)
+ rtl839x_mask_port_reg_be(BIT_ULL(port), 0ULL, RTL839X_VLAN_PORT_FWD);
+ else
+ rtl839x_mask_port_reg_be(0ULL, BIT_ULL(port), RTL839X_VLAN_PORT_FWD);
+}
+
+/*
+ * Hash seed is vid (actually rvid) concatenated with the MAC address
+ */
+static u64 rtl839x_l2_hash_seed(u64 mac, u32 vid)
+{
+ u64 v = vid;
+
+ v <<= 48;
+ v |= mac;
+
+ return v;
+}
+
+/*
+ * Applies the same hash algorithm as the one used currently by the ASIC to the seed
+ * and returns a key into the L2 hash table
+ */
+static u32 rtl839x_l2_hash_key(struct rtl838x_switch_priv *priv, u64 seed)
+{
+ u32 h1, h2, h;
+
+ if (sw_r32(priv->r->l2_ctrl_0) & 1) {
+ h1 = (u32) (((seed >> 60) & 0x3f) ^ ((seed >> 54) & 0x3f)
+ ^ ((seed >> 36) & 0x3f) ^ ((seed >> 30) & 0x3f)
+ ^ ((seed >> 12) & 0x3f) ^ ((seed >> 6) & 0x3f));
+ h2 = (u32) (((seed >> 48) & 0x3f) ^ ((seed >> 42) & 0x3f)
+ ^ ((seed >> 24) & 0x3f) ^ ((seed >> 18) & 0x3f)
+ ^ (seed & 0x3f));
+ h = (h1 << 6) | h2;
+ } else {
+ h = (seed >> 60)
+ ^ ((((seed >> 48) & 0x3f) << 6) | ((seed >> 54) & 0x3f))
+ ^ ((seed >> 36) & 0xfff) ^ ((seed >> 24) & 0xfff)
+ ^ ((seed >> 12) & 0xfff) ^ (seed & 0xfff);
+ }
+
+ return h;
+}
+
+static inline int rtl839x_mac_force_mode_ctrl(int p)
+{
+ return RTL839X_MAC_FORCE_MODE_CTRL + (p << 2);
+}
+
+static inline int rtl839x_mac_port_ctrl(int p)
+{
+ return RTL839X_MAC_PORT_CTRL(p);
+}
+
+static inline int rtl839x_l2_port_new_salrn(int p)
+{
+ return RTL839X_L2_PORT_NEW_SALRN(p);
+}
+
+static inline int rtl839x_l2_port_new_sa_fwd(int p)
+{
+ return RTL839X_L2_PORT_NEW_SA_FWD(p);
+}
+
+static inline int rtl839x_mac_link_spd_sts(int p)
+{
+ return RTL839X_MAC_LINK_SPD_STS(p);
+}
+
+static inline int rtl839x_trk_mbr_ctr(int group)
+{
+ return RTL839X_TRK_MBR_CTR + (group << 3);
+}
+
+static void rtl839x_fill_l2_entry(u32 r[], struct rtl838x_l2_entry *e)
+{
+ /* Table contains different entry types, we need to identify the right one:
+ * Check for MC entries, first
+ */
+ e->is_ip_mc = !!(r[2] & BIT(31));
+ e->is_ipv6_mc = !!(r[2] & BIT(30));
+ e->type = L2_INVALID;
+ if (!e->is_ip_mc && !e->is_ipv6_mc) {
+ e->mac[0] = (r[0] >> 12);
+ e->mac[1] = (r[0] >> 4);
+ e->mac[2] = ((r[1] >> 28) | (r[0] << 4));
+ e->mac[3] = (r[1] >> 20);
+ e->mac[4] = (r[1] >> 12);
+ e->mac[5] = (r[1] >> 4);
+
+ e->vid = (r[2] >> 4) & 0xfff;
+ e->rvid = (r[0] >> 20) & 0xfff;
+
+ /* Is it a unicast entry? check multicast bit */
+ if (!(e->mac[0] & 1)) {
+ e->is_static = !!((r[2] >> 18) & 1);
+ e->port = (r[2] >> 24) & 0x3f;
+ e->block_da = !!(r[2] & (1 << 19));
+ e->block_sa = !!(r[2] & (1 << 20));
+ e->suspended = !!(r[2] & (1 << 17));
+ e->next_hop = !!(r[2] & (1 << 16));
+ if (e->next_hop) {
+ pr_debug("Found next hop entry, need to read data\n");
+ e->nh_vlan_target = !!(r[2] & BIT(15));
+ e->nh_route_id = (r[2] >> 4) & 0x1ff;
+ e->vid = e->rvid;
+ }
+ e->age = (r[2] >> 21) & 3;
+ e->valid = true;
+ if (!(r[2] & 0xc0fd0000)) /* Check for valid entry */
+ e->valid = false;
+ else
+ e->type = L2_UNICAST;
+ } else {
+ e->valid = true;
+ e->type = L2_MULTICAST;
+ e->mc_portmask_index = (r[2] >> 6) & 0xfff;
+ e->vid = e->rvid;
+ }
+ } else { // IPv4 and IPv6 multicast
+ e->vid = e->rvid = (r[0] << 20) & 0xfff;
+ e->mc_gip = r[1];
+ e->mc_portmask_index = (r[2] >> 6) & 0xfff;
+ }
+ if (e->is_ip_mc) {
+ e->valid = true;
+ e->type = IP4_MULTICAST;
+ }
+ if (e->is_ipv6_mc) {
+ e->valid = true;
+ e->type = IP6_MULTICAST;
+ }
+ // pr_info("%s: vid %d, rvid: %d\n", __func__, e->vid, e->rvid);
+}
+
+/*
+ * Fills the 3 SoC table registers r[] with the information in the rtl838x_l2_entry
+ */
+static void rtl839x_fill_l2_row(u32 r[], struct rtl838x_l2_entry *e)
+{
+ if (!e->valid) {
+ r[0] = r[1] = r[2] = 0;
+ return;
+ }
+
+ r[2] = e->is_ip_mc ? BIT(31) : 0;
+ r[2] |= e->is_ipv6_mc ? BIT(30) : 0;
+
+ if (!e->is_ip_mc && !e->is_ipv6_mc) {
+ r[0] = ((u32)e->mac[0]) << 12;
+ r[0] |= ((u32)e->mac[1]) << 4;
+ r[0] |= ((u32)e->mac[2]) >> 4;
+ r[1] = ((u32)e->mac[2]) << 28;
+ r[1] |= ((u32)e->mac[3]) << 20;
+ r[1] |= ((u32)e->mac[4]) << 12;
+ r[1] |= ((u32)e->mac[5]) << 4;
+
+ if (!(e->mac[0] & 1)) { // Not multicast
+ r[2] |= e->is_static ? BIT(18) : 0;
+ r[0] |= ((u32)e->rvid) << 20;
+ r[2] |= e->port << 24;
+ r[2] |= e->block_da ? BIT(19) : 0;
+ r[2] |= e->block_sa ? BIT(20) : 0;
+ r[2] |= e->suspended ? BIT(17) : 0;
+ r[2] |= ((u32)e->age) << 21;
+ if (e->next_hop) {
+ r[2] |= BIT(16);
+ r[2] |= e->nh_vlan_target ? BIT(15) : 0;
+ r[2] |= (e->nh_route_id & 0x7ff) << 4;
+ } else {
+ r[2] |= e->vid << 4;
+ }
+ pr_debug("Write L2 NH: %08x %08x %08x\n", r[0], r[1], r[2]);
+ } else { // L2 Multicast
+ r[0] |= ((u32)e->rvid) << 20;
+ r[2] |= ((u32)e->mc_portmask_index) << 6;
+ }
+ } else { // IPv4 or IPv6 MC entry
+ r[0] = ((u32)e->rvid) << 20;
+ r[1] = e->mc_gip;
+ r[2] |= ((u32)e->mc_portmask_index) << 6;
+ }
+}
+
+/*
+ * Read an L2 UC or MC entry out of a hash bucket of the L2 forwarding table
+ * hash is the id of the bucket and pos is the position of the entry in that bucket
+ * The data read from the SoC is filled into rtl838x_l2_entry
+ */
+static u64 rtl839x_read_l2_entry_using_hash(u32 hash, u32 pos, struct rtl838x_l2_entry *e)
+{
+ u32 r[3];
+ struct table_reg *q = rtl_table_get(RTL8390_TBL_L2, 0);
+ u32 idx = (0 << 14) | (hash << 2) | pos; // Search SRAM, with hash and at pos in bucket
+ int i;
+
+ rtl_table_read(q, idx);
+ for (i= 0; i < 3; i++)
+ r[i] = sw_r32(rtl_table_data(q, i));
+
+ rtl_table_release(q);
+
+ rtl839x_fill_l2_entry(r, e);
+ if (!e->valid)
+ return 0;
+
+ return rtl839x_l2_hash_seed(ether_addr_to_u64(&e->mac[0]), e->rvid);
+}
+
+static void rtl839x_write_l2_entry_using_hash(u32 hash, u32 pos, struct rtl838x_l2_entry *e)
+{
+ u32 r[3];
+ struct table_reg *q = rtl_table_get(RTL8390_TBL_L2, 0);
+ int i;
+
+ u32 idx = (0 << 14) | (hash << 2) | pos; // Access SRAM, with hash and at pos in bucket
+
+ rtl839x_fill_l2_row(r, e);
+
+ for (i= 0; i < 3; i++)
+ sw_w32(r[i], rtl_table_data(q, i));
+
+ rtl_table_write(q, idx);
+ rtl_table_release(q);
+}
+
+static u64 rtl839x_read_cam(int idx, struct rtl838x_l2_entry *e)
+{
+ u32 r[3];
+ struct table_reg *q = rtl_table_get(RTL8390_TBL_L2, 1); // Access L2 Table 1
+ int i;
+
+ rtl_table_read(q, idx);
+ for (i= 0; i < 3; i++)
+ r[i] = sw_r32(rtl_table_data(q, i));
+
+ rtl_table_release(q);
+
+ rtl839x_fill_l2_entry(r, e);
+ if (!e->valid)
+ return 0;
+
+ pr_debug("Found in CAM: R1 %x R2 %x R3 %x\n", r[0], r[1], r[2]);
+
+ // Return MAC with concatenated VID ac concatenated ID
+ return rtl839x_l2_hash_seed(ether_addr_to_u64(&e->mac[0]), e->rvid);
+}
+
+static void rtl839x_write_cam(int idx, struct rtl838x_l2_entry *e)
+{
+ u32 r[3];
+ struct table_reg *q = rtl_table_get(RTL8390_TBL_L2, 1); // Access L2 Table 1
+ int i;
+
+ rtl839x_fill_l2_row(r, e);
+
+ for (i= 0; i < 3; i++)
+ sw_w32(r[i], rtl_table_data(q, i));
+
+ rtl_table_write(q, idx);
+ rtl_table_release(q);
+}
+
+static u64 rtl839x_read_mcast_pmask(int idx)
+{
+ u64 portmask;
+ // Read MC_PMSK (2) via register RTL8390_TBL_L2
+ struct table_reg *q = rtl_table_get(RTL8390_TBL_L2, 2);
+
+ rtl_table_read(q, idx);
+ portmask = sw_r32(rtl_table_data(q, 0));
+ portmask <<= 32;
+ portmask |= sw_r32(rtl_table_data(q, 1));
+ portmask >>= 11; // LSB is bit 11 in data registers
+ rtl_table_release(q);
+
+ return portmask;
+}
+
+static void rtl839x_write_mcast_pmask(int idx, u64 portmask)
+{
+ // Access MC_PMSK (2) via register RTL8380_TBL_L2
+ struct table_reg *q = rtl_table_get(RTL8390_TBL_L2, 2);
+
+ portmask <<= 11; // LSB is bit 11 in data registers
+ sw_w32((u32)(portmask >> 32), rtl_table_data(q, 0));
+ sw_w32((u32)((portmask & 0xfffff800)), rtl_table_data(q, 1));
+ rtl_table_write(q, idx);
+ rtl_table_release(q);
+}
+
+static void rtl839x_vlan_profile_setup(int profile)
+{
+ u32 p[2];
+ u32 pmask_id = UNKNOWN_MC_PMASK;
+
+ p[0] = pmask_id; // Use portmaks 0xfff for unknown IPv6 MC flooding
+ // Enable L2 Learning BIT 0, portmask UNKNOWN_MC_PMASK for IP/L2-MC traffic flooding
+ p[1] = 1 | pmask_id << 1 | pmask_id << 13;
+
+ sw_w32(p[0], RTL839X_VLAN_PROFILE(profile));
+ sw_w32(p[1], RTL839X_VLAN_PROFILE(profile) + 4);
+
+ rtl839x_write_mcast_pmask(UNKNOWN_MC_PMASK, 0x001fffffffffffff);
+}
+
+u64 rtl839x_traffic_get(int source)
+{
+ return rtl839x_get_port_reg_be(rtl839x_port_iso_ctrl(source));
+}
+
+void rtl839x_traffic_set(int source, u64 dest_matrix)
+{
+ rtl839x_set_port_reg_be(dest_matrix, rtl839x_port_iso_ctrl(source));
+}
+
+void rtl839x_traffic_enable(int source, int dest)
+{
+ rtl839x_mask_port_reg_be(0, BIT_ULL(dest), rtl839x_port_iso_ctrl(source));
+}
+
+void rtl839x_traffic_disable(int source, int dest)
+{
+ rtl839x_mask_port_reg_be(BIT_ULL(dest), 0, rtl839x_port_iso_ctrl(source));
+}
+
+static void rtl839x_l2_learning_setup(void)
+{
+ /* Set portmask for broadcast (offset bit 12) and unknown unicast (offset 0)
+ * address flooding to the reserved entry in the portmask table used
+ * also for multicast flooding */
+ sw_w32(UNKNOWN_MC_PMASK << 12 | UNKNOWN_MC_PMASK, RTL839X_L2_FLD_PMSK);
+
+ // Limit learning to maximum: 32k entries, after that just flood (bits 0-1)
+ sw_w32((0x7fff << 2) | 0, RTL839X_L2_LRN_CONSTRT);
+
+ // Do not trap ARP packets to CPU_PORT
+ sw_w32(0, RTL839X_SPCL_TRAP_ARP_CTRL);
+}
+
+static void rtl839x_enable_learning(int port, bool enable)
+{
+ // Limit learning to maximum: 32k entries
+
+ sw_w32_mask(0x7fff << 2, enable ? (0x7fff << 2) : 0,
+ RTL839X_L2_PORT_LRN_CONSTRT + (port << 2));
+}
+
+static void rtl839x_enable_flood(int port, bool enable)
+{
+ /*
+ * 0: Forward
+ * 1: Disable
+ * 2: to CPU
+ * 3: Copy to CPU
+ */
+ sw_w32_mask(0x3, enable ? 0 : 1,
+ RTL839X_L2_PORT_LRN_CONSTRT + (port << 2));
+}
+
+static void rtl839x_enable_mcast_flood(int port, bool enable)
+{
+
+}
+
+static void rtl839x_enable_bcast_flood(int port, bool enable)
+{
+
+}
+irqreturn_t rtl839x_switch_irq(int irq, void *dev_id)
+{
+ struct dsa_switch *ds = dev_id;
+ u32 status = sw_r32(RTL839X_ISR_GLB_SRC);
+ u64 ports = rtl839x_get_port_reg_le(RTL839X_ISR_PORT_LINK_STS_CHG);
+ u64 link;
+ int i;
+
+ /* Clear status */
+ rtl839x_set_port_reg_le(ports, RTL839X_ISR_PORT_LINK_STS_CHG);
+ pr_debug("RTL8390 Link change: status: %x, ports %llx\n", status, ports);
+
+ for (i = 0; i < RTL839X_CPU_PORT; i++) {
+ if (ports & BIT_ULL(i)) {
+ link = rtl839x_get_port_reg_le(RTL839X_MAC_LINK_STS);
+ if (link & BIT_ULL(i))
+ dsa_port_phylink_mac_change(ds, i, true);
+ else
+ dsa_port_phylink_mac_change(ds, i, false);
+ }
+ }
+ return IRQ_HANDLED;
+}
+
+// TODO: unused
+int rtl8390_sds_power(int mac, int val)
+{
+ u32 offset = (mac == 48) ? 0x0 : 0x100;
+ u32 mode = val ? 0 : 1;
+
+ pr_debug("In %s: mac %d, set %d\n", __func__, mac, val);
+
+ if ((mac != 48) && (mac != 49)) {
+ pr_err("%s: not an SFP port: %d\n", __func__, mac);
+ return -1;
+ }
+
+ // Set bit 1003. 1000 starts at 7c
+ sw_w32_mask(BIT(11), mode << 11, RTL839X_SDS12_13_PWR0 + offset);
+
+ return 0;
+}
+
+static int rtl839x_smi_wait_op(int timeout)
+{
+ int ret = 0;
+ u32 val;
+
+ ret = readx_poll_timeout(sw_r32, RTL839X_PHYREG_ACCESS_CTRL,
+ val, !(val & 0x1), 20, timeout);
+ if (ret)
+ pr_err("%s: timeout\n", __func__);
+
+ return ret;
+}
+
+int rtl839x_read_phy(u32 port, u32 page, u32 reg, u32 *val)
+{
+ u32 v;
+ int err = 0;
+
+ if (port > 63 || page > 4095 || reg > 31)
+ return -ENOTSUPP;
+
+ // Take bug on RTL839x Rev <= C into account
+ if (port >= RTL839X_CPU_PORT)
+ return -EIO;
+
+ mutex_lock(&smi_lock);
+
+ sw_w32_mask(0xffff0000, port << 16, RTL839X_PHYREG_DATA_CTRL);
+ v = reg << 5 | page << 10 | ((page == 0x1fff) ? 0x1f : 0) << 23;
+ sw_w32(v, RTL839X_PHYREG_ACCESS_CTRL);
+
+ sw_w32(0x1ff, RTL839X_PHYREG_CTRL);
+
+ v |= 1;
+ sw_w32(v, RTL839X_PHYREG_ACCESS_CTRL);
+
+ err = rtl839x_smi_wait_op(100000);
+ if (err)
+ goto errout;
+
+ *val = sw_r32(RTL839X_PHYREG_DATA_CTRL) & 0xffff;
+
+errout:
+ mutex_unlock(&smi_lock);
+ return err;
+}
+
+int rtl839x_write_phy(u32 port, u32 page, u32 reg, u32 val)
+{
+ u32 v;
+ int err = 0;
+
+ val &= 0xffff;
+ if (port > 63 || page > 4095 || reg > 31)
+ return -ENOTSUPP;
+
+ // Take bug on RTL839x Rev <= C into account
+ if (port >= RTL839X_CPU_PORT)
+ return -EIO;
+
+ mutex_lock(&smi_lock);
+
+ // Set PHY to access
+ rtl839x_set_port_reg_le(BIT_ULL(port), RTL839X_PHYREG_PORT_CTRL);
+
+ sw_w32_mask(0xffff0000, val << 16, RTL839X_PHYREG_DATA_CTRL);
+
+ v = reg << 5 | page << 10 | ((page == 0x1fff) ? 0x1f : 0) << 23;
+ sw_w32(v, RTL839X_PHYREG_ACCESS_CTRL);
+
+ sw_w32(0x1ff, RTL839X_PHYREG_CTRL);
+
+ v |= BIT(3) | 1; /* Write operation and execute */
+ sw_w32(v, RTL839X_PHYREG_ACCESS_CTRL);
+
+ err = rtl839x_smi_wait_op(100000);
+ if (err)
+ goto errout;
+
+ if (sw_r32(RTL839X_PHYREG_ACCESS_CTRL) & 0x2)
+ err = -EIO;
+
+errout:
+ mutex_unlock(&smi_lock);
+ return err;
+}
+
+/*
+ * Read an mmd register of the PHY
+ */
+int rtl839x_read_mmd_phy(u32 port, u32 devnum, u32 regnum, u32 *val)
+{
+ int err = 0;
+ u32 v;
+
+ // Take bug on RTL839x Rev <= C into account
+ if (port >= RTL839X_CPU_PORT)
+ return -EIO;
+
+ mutex_lock(&smi_lock);
+
+ // Set PHY to access
+ sw_w32_mask(0xffff << 16, port << 16, RTL839X_PHYREG_DATA_CTRL);
+
+ // Set MMD device number and register to write to
+ sw_w32(devnum << 16 | (regnum & 0xffff), RTL839X_PHYREG_MMD_CTRL);
+
+ v = BIT(2) | BIT(0); // MMD-access | EXEC
+ sw_w32(v, RTL839X_PHYREG_ACCESS_CTRL);
+
+ err = rtl839x_smi_wait_op(100000);
+ if (err)
+ goto errout;
+
+ // There is no error-checking via BIT 1 of v, as it does not seem to be set correctly
+ *val = (sw_r32(RTL839X_PHYREG_DATA_CTRL) & 0xffff);
+ pr_debug("%s: port %d, regnum: %x, val: %x (err %d)\n", __func__, port, regnum, *val, err);
+
+errout:
+ mutex_unlock(&smi_lock);
+ return err;
+}
+
+/*
+ * Write to an mmd register of the PHY
+ */
+int rtl839x_write_mmd_phy(u32 port, u32 devnum, u32 regnum, u32 val)
+{
+ int err = 0;
+ u32 v;
+
+ // Take bug on RTL839x Rev <= C into account
+ if (port >= RTL839X_CPU_PORT)
+ return -EIO;
+
+ mutex_lock(&smi_lock);
+
+ // Set PHY to access
+ rtl839x_set_port_reg_le(BIT_ULL(port), RTL839X_PHYREG_PORT_CTRL);
+
+ // Set data to write
+ sw_w32_mask(0xffff << 16, val << 16, RTL839X_PHYREG_DATA_CTRL);
+
+ // Set MMD device number and register to write to
+ sw_w32(devnum << 16 | (regnum & 0xffff), RTL839X_PHYREG_MMD_CTRL);
+
+ v = BIT(3) | BIT(2) | BIT(0); // WRITE | MMD-access | EXEC
+ sw_w32(v, RTL839X_PHYREG_ACCESS_CTRL);
+
+ err = rtl839x_smi_wait_op(100000);
+ if (err)
+ goto errout;
+
+ pr_debug("%s: port %d, regnum: %x, val: %x (err %d)\n", __func__, port, regnum, val, err);
+
+errout:
+ mutex_unlock(&smi_lock);
+ return err;
+}
+
+void rtl8390_get_version(struct rtl838x_switch_priv *priv)
+{
+ u32 info, model;
+
+ sw_w32_mask(0xf << 28, 0xa << 28, RTL839X_CHIP_INFO);
+ info = sw_r32(RTL839X_CHIP_INFO);
+
+ model = sw_r32(RTL839X_MODEL_NAME_INFO);
+ priv->version = RTL8390_VERSION_A + ((model & 0x3f) >> 1);
+
+ pr_info("RTL839X Chip-Info: %x, version %c\n", info, priv->version);
+}
+
+void rtl839x_vlan_profile_dump(int profile)
+{
+ u32 p[2];
+
+ if (profile < 0 || profile > 7)
+ return;
+
+ p[0] = sw_r32(RTL839X_VLAN_PROFILE(profile));
+ p[1] = sw_r32(RTL839X_VLAN_PROFILE(profile) + 4);
+
+ pr_info("VLAN profile %d: L2 learning: %d, UNKN L2MC FLD PMSK %d, \
+ UNKN IPMC FLD PMSK %d, UNKN IPv6MC FLD PMSK: %d",
+ profile, p[1] & 1, (p[1] >> 1) & 0xfff, (p[1] >> 13) & 0xfff,
+ (p[0]) & 0xfff);
+ pr_info("VLAN profile %d: raw %08x, %08x\n", profile, p[0], p[1]);
+}
+
+static void rtl839x_stp_get(struct rtl838x_switch_priv *priv, u16 msti, u32 port_state[])
+{
+ int i;
+ u32 cmd = 1 << 16 /* Execute cmd */
+ | 0 << 15 /* Read */
+ | 5 << 12 /* Table type 0b101 */
+ | (msti & 0xfff);
+ priv->r->exec_tbl0_cmd(cmd);
+
+ for (i = 0; i < 4; i++)
+ port_state[i] = sw_r32(priv->r->tbl_access_data_0(i));
+}
+
+static void rtl839x_stp_set(struct rtl838x_switch_priv *priv, u16 msti, u32 port_state[])
+{
+ int i;
+ u32 cmd = 1 << 16 /* Execute cmd */
+ | 1 << 15 /* Write */
+ | 5 << 12 /* Table type 0b101 */
+ | (msti & 0xfff);
+ for (i = 0; i < 4; i++)
+ sw_w32(port_state[i], priv->r->tbl_access_data_0(i));
+ priv->r->exec_tbl0_cmd(cmd);
+}
+
+/*
+ * Enables or disables the EEE/EEEP capability of a port
+ */
+void rtl839x_port_eee_set(struct rtl838x_switch_priv *priv, int port, bool enable)
+{
+ u32 v;
+
+ // This works only for Ethernet ports, and on the RTL839X, ports above 47 are SFP
+ if (port >= 48)
+ return;
+
+ enable = true;
+ pr_debug("In %s: setting port %d to %d\n", __func__, port, enable);
+ v = enable ? 0xf : 0x0;
+
+ // Set EEE for 100, 500, 1000MBit and 10GBit
+ sw_w32_mask(0xf << 8, v << 8, rtl839x_mac_force_mode_ctrl(port));
+
+ // Set TX/RX EEE state
+ v = enable ? 0x3 : 0x0;
+ sw_w32(v, RTL839X_EEE_CTRL(port));
+
+ priv->ports[port].eee_enabled = enable;
+}
+
+/*
+ * Get EEE own capabilities and negotiation result
+ */
+int rtl839x_eee_port_ability(struct rtl838x_switch_priv *priv, struct ethtool_eee *e, int port)
+{
+ u64 link, a;
+
+ if (port >= 48)
+ return 0;
+
+ link = rtl839x_get_port_reg_le(RTL839X_MAC_LINK_STS);
+ if (!(link & BIT_ULL(port)))
+ return 0;
+
+ if (sw_r32(rtl839x_mac_force_mode_ctrl(port)) & BIT(8))
+ e->advertised |= ADVERTISED_100baseT_Full;
+
+ if (sw_r32(rtl839x_mac_force_mode_ctrl(port)) & BIT(10))
+ e->advertised |= ADVERTISED_1000baseT_Full;
+
+ a = rtl839x_get_port_reg_le(RTL839X_MAC_EEE_ABLTY);
+ pr_info("Link partner: %016llx\n", a);
+ if (rtl839x_get_port_reg_le(RTL839X_MAC_EEE_ABLTY) & BIT_ULL(port)) {
+ e->lp_advertised = ADVERTISED_100baseT_Full;
+ e->lp_advertised |= ADVERTISED_1000baseT_Full;
+ return 1;
+ }
+
+ return 0;
+}
+
+static void rtl839x_init_eee(struct rtl838x_switch_priv *priv, bool enable)
+{
+ int i;
+
+ pr_info("Setting up EEE, state: %d\n", enable);
+
+ // Set wake timer for TX and pause timer both to 0x21
+ sw_w32_mask(0xff << 20| 0xff, 0x21 << 20| 0x21, RTL839X_EEE_TX_TIMER_GELITE_CTRL);
+ // Set pause wake timer for GIGA-EEE to 0x11
+ sw_w32_mask(0xff << 20, 0x11 << 20, RTL839X_EEE_TX_TIMER_GIGA_CTRL);
+ // Set pause wake timer for 10GBit ports to 0x11
+ sw_w32_mask(0xff << 20, 0x11 << 20, RTL839X_EEE_TX_TIMER_10G_CTRL);
+
+ // Setup EEE on all ports
+ for (i = 0; i < priv->cpu_port; i++) {
+ if (priv->ports[i].phy)
+ rtl839x_port_eee_set(priv, i, enable);
+ }
+ priv->eee_enabled = enable;
+}
+
+static void rtl839x_pie_lookup_enable(struct rtl838x_switch_priv *priv, int index)
+{
+ int block = index / PIE_BLOCK_SIZE;
+
+ sw_w32_mask(0, BIT(block), RTL839X_ACL_BLK_LOOKUP_CTRL);
+}
+
+/*
+ * Delete a range of Packet Inspection Engine rules
+ */
+static int rtl839x_pie_rule_del(struct rtl838x_switch_priv *priv, int index_from, int index_to)
+{
+ u32 v = (index_from << 1)| (index_to << 13 ) | BIT(0);
+
+ pr_debug("%s: from %d to %d\n", __func__, index_from, index_to);
+ mutex_lock(&priv->reg_mutex);
+
+ // Write from-to and execute bit into control register
+ sw_w32(v, RTL839X_ACL_CLR_CTRL);
+
+ // Wait until command has completed
+ do {
+ } while (sw_r32(RTL839X_ACL_CLR_CTRL) & BIT(0));
+
+ mutex_unlock(&priv->reg_mutex);
+ return 0;
+}
+
+/*
+ * Reads the intermediate representation of the templated match-fields of the
+ * PIE rule in the pie_rule structure and fills in the raw data fields in the
+ * raw register space r[].
+ * The register space configuration size is identical for the RTL8380/90 and RTL9300,
+ * however the RTL9310 has 2 more registers / fields and the physical field-ids are different
+ * on all SoCs
+ * On the RTL8390 the template mask registers are not word-aligned!
+ */
+static void rtl839x_write_pie_templated(u32 r[], struct pie_rule *pr, enum template_field_id t[])
+{
+ int i;
+ enum template_field_id field_type;
+ u16 data, data_m;
+
+ for (i = 0; i < N_FIXED_FIELDS; i++) {
+ field_type = t[i];
+ data = data_m = 0;
+
+ switch (field_type) {
+ case TEMPLATE_FIELD_SPM0:
+ data = pr->spm;
+ data_m = pr->spm_m;
+ break;
+ case TEMPLATE_FIELD_SPM1:
+ data = pr->spm >> 16;
+ data_m = pr->spm_m >> 16;
+ break;
+ case TEMPLATE_FIELD_SPM2:
+ data = pr->spm >> 32;
+ data_m = pr->spm_m >> 32;
+ break;
+ case TEMPLATE_FIELD_SPM3:
+ data = pr->spm >> 48;
+ data_m = pr->spm_m >> 48;
+ break;
+ case TEMPLATE_FIELD_OTAG:
+ data = pr->otag;
+ data_m = pr->otag_m;
+ break;
+ case TEMPLATE_FIELD_SMAC0:
+ data = pr->smac[4];
+ data = (data << 8) | pr->smac[5];
+ data_m = pr->smac_m[4];
+ data_m = (data_m << 8) | pr->smac_m[5];
+ break;
+ case TEMPLATE_FIELD_SMAC1:
+ data = pr->smac[2];
+ data = (data << 8) | pr->smac[3];
+ data_m = pr->smac_m[2];
+ data_m = (data_m << 8) | pr->smac_m[3];
+ break;
+ case TEMPLATE_FIELD_SMAC2:
+ data = pr->smac[0];
+ data = (data << 8) | pr->smac[1];
+ data_m = pr->smac_m[0];
+ data_m = (data_m << 8) | pr->smac_m[1];
+ break;
+ case TEMPLATE_FIELD_DMAC0:
+ data = pr->dmac[4];
+ data = (data << 8) | pr->dmac[5];
+ data_m = pr->dmac_m[4];
+ data_m = (data_m << 8) | pr->dmac_m[5];
+ break;
+ case TEMPLATE_FIELD_DMAC1:
+ data = pr->dmac[2];
+ data = (data << 8) | pr->dmac[3];
+ data_m = pr->dmac_m[2];
+ data_m = (data_m << 8) | pr->dmac_m[3];
+ break;
+ case TEMPLATE_FIELD_DMAC2:
+ data = pr->dmac[0];
+ data = (data << 8) | pr->dmac[1];
+ data_m = pr->dmac_m[0];
+ data_m = (data_m << 8) | pr->dmac_m[1];
+ break;
+ case TEMPLATE_FIELD_ETHERTYPE:
+ data = pr->ethertype;
+ data_m = pr->ethertype_m;
+ break;
+ case TEMPLATE_FIELD_ITAG:
+ data = pr->itag;
+ data_m = pr->itag_m;
+ break;
+ case TEMPLATE_FIELD_SIP0:
+ if (pr->is_ipv6) {
+ data = pr->sip6.s6_addr16[7];
+ data_m = pr->sip6_m.s6_addr16[7];
+ } else {
+ data = pr->sip;
+ data_m = pr->sip_m;
+ }
+ break;
+ case TEMPLATE_FIELD_SIP1:
+ if (pr->is_ipv6) {
+ data = pr->sip6.s6_addr16[6];
+ data_m = pr->sip6_m.s6_addr16[6];
+ } else {
+ data = pr->sip >> 16;
+ data_m = pr->sip_m >> 16;
+ }
+ break;
+
+ case TEMPLATE_FIELD_SIP2:
+ case TEMPLATE_FIELD_SIP3:
+ case TEMPLATE_FIELD_SIP4:
+ case TEMPLATE_FIELD_SIP5:
+ case TEMPLATE_FIELD_SIP6:
+ case TEMPLATE_FIELD_SIP7:
+ data = pr->sip6.s6_addr16[5 - (field_type - TEMPLATE_FIELD_SIP2)];
+ data_m = pr->sip6_m.s6_addr16[5 - (field_type - TEMPLATE_FIELD_SIP2)];
+ break;
+
+ case TEMPLATE_FIELD_DIP0:
+ if (pr->is_ipv6) {
+ data = pr->dip6.s6_addr16[7];
+ data_m = pr->dip6_m.s6_addr16[7];
+ } else {
+ data = pr->dip;
+ data_m = pr->dip_m;
+ }
+ break;
+
+ case TEMPLATE_FIELD_DIP1:
+ if (pr->is_ipv6) {
+ data = pr->dip6.s6_addr16[6];
+ data_m = pr->dip6_m.s6_addr16[6];
+ } else {
+ data = pr->dip >> 16;
+ data_m = pr->dip_m >> 16;
+ }
+ break;
+
+ case TEMPLATE_FIELD_DIP2:
+ case TEMPLATE_FIELD_DIP3:
+ case TEMPLATE_FIELD_DIP4:
+ case TEMPLATE_FIELD_DIP5:
+ case TEMPLATE_FIELD_DIP6:
+ case TEMPLATE_FIELD_DIP7:
+ data = pr->dip6.s6_addr16[5 - (field_type - TEMPLATE_FIELD_DIP2)];
+ data_m = pr->dip6_m.s6_addr16[5 - (field_type - TEMPLATE_FIELD_DIP2)];
+ break;
+
+ case TEMPLATE_FIELD_IP_TOS_PROTO:
+ data = pr->tos_proto;
+ data_m = pr->tos_proto_m;
+ break;
+ case TEMPLATE_FIELD_L4_SPORT:
+ data = pr->sport;
+ data_m = pr->sport_m;
+ break;
+ case TEMPLATE_FIELD_L4_DPORT:
+ data = pr->dport;
+ data_m = pr->dport_m;
+ break;
+ case TEMPLATE_FIELD_ICMP_IGMP:
+ data = pr->icmp_igmp;
+ data_m = pr->icmp_igmp_m;
+ break;
+ default:
+ pr_info("%s: unknown field %d\n", __func__, field_type);
+ }
+
+ // On the RTL8390, the mask fields are not word aligned!
+ if (!(i % 2)) {
+ r[5 - i / 2] = data;
+ r[12 - i / 2] |= ((u32)data_m << 8);
+ } else {
+ r[5 - i / 2] |= ((u32)data) << 16;
+ r[12 - i / 2] |= ((u32)data_m) << 24;
+ r[11 - i / 2] |= ((u32)data_m) >> 8;
+ }
+ }
+}
+
+/*
+ * Creates the intermediate representation of the templated match-fields of the
+ * PIE rule in the pie_rule structure by reading the raw data fields in the
+ * raw register space r[].
+ * The register space configuration size is identical for the RTL8380/90 and RTL9300,
+ * however the RTL9310 has 2 more registers / fields and the physical field-ids
+ * On the RTL8390 the template mask registers are not word-aligned!
+ */
+void rtl839x_read_pie_templated(u32 r[], struct pie_rule *pr, enum template_field_id t[])
+{
+ int i;
+ enum template_field_id field_type;
+ u16 data, data_m;
+
+ for (i = 0; i < N_FIXED_FIELDS; i++) {
+ field_type = t[i];
+ if (!(i % 2)) {
+ data = r[5 - i / 2];
+ data_m = r[12 - i / 2];
+ } else {
+ data = r[5 - i / 2] >> 16;
+ data_m = r[12 - i / 2] >> 16;
+ }
+
+ switch (field_type) {
+ case TEMPLATE_FIELD_SPM0:
+ pr->spm = (pr->spn << 16) | data;
+ pr->spm_m = (pr->spn << 16) | data_m;
+ break;
+ case TEMPLATE_FIELD_SPM1:
+ pr->spm = data;
+ pr->spm_m = data_m;
+ break;
+ case TEMPLATE_FIELD_OTAG:
+ pr->otag = data;
+ pr->otag_m = data_m;
+ break;
+ case TEMPLATE_FIELD_SMAC0:
+ pr->smac[4] = data >> 8;
+ pr->smac[5] = data;
+ pr->smac_m[4] = data >> 8;
+ pr->smac_m[5] = data;
+ break;
+ case TEMPLATE_FIELD_SMAC1:
+ pr->smac[2] = data >> 8;
+ pr->smac[3] = data;
+ pr->smac_m[2] = data >> 8;
+ pr->smac_m[3] = data;
+ break;
+ case TEMPLATE_FIELD_SMAC2:
+ pr->smac[0] = data >> 8;
+ pr->smac[1] = data;
+ pr->smac_m[0] = data >> 8;
+ pr->smac_m[1] = data;
+ break;
+ case TEMPLATE_FIELD_DMAC0:
+ pr->dmac[4] = data >> 8;
+ pr->dmac[5] = data;
+ pr->dmac_m[4] = data >> 8;
+ pr->dmac_m[5] = data;
+ break;
+ case TEMPLATE_FIELD_DMAC1:
+ pr->dmac[2] = data >> 8;
+ pr->dmac[3] = data;
+ pr->dmac_m[2] = data >> 8;
+ pr->dmac_m[3] = data;
+ break;
+ case TEMPLATE_FIELD_DMAC2:
+ pr->dmac[0] = data >> 8;
+ pr->dmac[1] = data;
+ pr->dmac_m[0] = data >> 8;
+ pr->dmac_m[1] = data;
+ break;
+ case TEMPLATE_FIELD_ETHERTYPE:
+ pr->ethertype = data;
+ pr->ethertype_m = data_m;
+ break;
+ case TEMPLATE_FIELD_ITAG:
+ pr->itag = data;
+ pr->itag_m = data_m;
+ break;
+ case TEMPLATE_FIELD_SIP0:
+ pr->sip = data;
+ pr->sip_m = data_m;
+ break;
+ case TEMPLATE_FIELD_SIP1:
+ pr->sip = (pr->sip << 16) | data;
+ pr->sip_m = (pr->sip << 16) | data_m;
+ break;
+ case TEMPLATE_FIELD_SIP2:
+ pr->is_ipv6 = true;
+ // Make use of limitiations on the position of the match values
+ ipv6_addr_set(&pr->sip6, pr->sip, r[5 - i / 2],
+ r[4 - i / 2], r[3 - i / 2]);
+ ipv6_addr_set(&pr->sip6_m, pr->sip_m, r[5 - i / 2],
+ r[4 - i / 2], r[3 - i / 2]);
+ case TEMPLATE_FIELD_SIP3:
+ case TEMPLATE_FIELD_SIP4:
+ case TEMPLATE_FIELD_SIP5:
+ case TEMPLATE_FIELD_SIP6:
+ case TEMPLATE_FIELD_SIP7:
+ break;
+
+ case TEMPLATE_FIELD_DIP0:
+ pr->dip = data;
+ pr->dip_m = data_m;
+ break;
+
+ case TEMPLATE_FIELD_DIP1:
+ pr->dip = (pr->dip << 16) | data;
+ pr->dip_m = (pr->dip << 16) | data_m;
+ break;
+
+ case TEMPLATE_FIELD_DIP2:
+ pr->is_ipv6 = true;
+ ipv6_addr_set(&pr->dip6, pr->dip, r[5 - i / 2],
+ r[4 - i / 2], r[3 - i / 2]);
+ ipv6_addr_set(&pr->dip6_m, pr->dip_m, r[5 - i / 2],
+ r[4 - i / 2], r[3 - i / 2]);
+ case TEMPLATE_FIELD_DIP3:
+ case TEMPLATE_FIELD_DIP4:
+ case TEMPLATE_FIELD_DIP5:
+ case TEMPLATE_FIELD_DIP6:
+ case TEMPLATE_FIELD_DIP7:
+ break;
+ case TEMPLATE_FIELD_IP_TOS_PROTO:
+ pr->tos_proto = data;
+ pr->tos_proto_m = data_m;
+ break;
+ case TEMPLATE_FIELD_L4_SPORT:
+ pr->sport = data;
+ pr->sport_m = data_m;
+ break;
+ case TEMPLATE_FIELD_L4_DPORT:
+ pr->dport = data;
+ pr->dport_m = data_m;
+ break;
+ case TEMPLATE_FIELD_ICMP_IGMP:
+ pr->icmp_igmp = data;
+ pr->icmp_igmp_m = data_m;
+ break;
+ default:
+ pr_info("%s: unknown field %d\n", __func__, field_type);
+ }
+ }
+}
+
+static void rtl839x_read_pie_fixed_fields(u32 r[], struct pie_rule *pr)
+{
+ pr->spmmask_fix = (r[6] >> 30) & 0x3;
+ pr->spn = (r[6] >> 24) & 0x3f;
+ pr->mgnt_vlan = (r[6] >> 23) & 1;
+ pr->dmac_hit_sw = (r[6] >> 22) & 1;
+ pr->not_first_frag = (r[6] >> 21) & 1;
+ pr->frame_type_l4 = (r[6] >> 18) & 7;
+ pr->frame_type = (r[6] >> 16) & 3;
+ pr->otag_fmt = (r[6] >> 15) & 1;
+ pr->itag_fmt = (r[6] >> 14) & 1;
+ pr->otag_exist = (r[6] >> 13) & 1;
+ pr->itag_exist = (r[6] >> 12) & 1;
+ pr->frame_type_l2 = (r[6] >> 10) & 3;
+ pr->tid = (r[6] >> 8) & 3;
+
+ pr->spmmask_fix_m = (r[12] >> 6) & 0x3;
+ pr->spn_m = r[12] & 0x3f;
+ pr->mgnt_vlan_m = (r[13] >> 31) & 1;
+ pr->dmac_hit_sw_m = (r[13] >> 30) & 1;
+ pr->not_first_frag_m = (r[13] >> 29) & 1;
+ pr->frame_type_l4_m = (r[13] >> 26) & 7;
+ pr->frame_type_m = (r[13] >> 24) & 3;
+ pr->otag_fmt_m = (r[13] >> 23) & 1;
+ pr->itag_fmt_m = (r[13] >> 22) & 1;
+ pr->otag_exist_m = (r[13] >> 21) & 1;
+ pr->itag_exist_m = (r[13] >> 20) & 1;
+ pr->frame_type_l2_m = (r[13] >> 18) & 3;
+ pr->tid_m = (r[13] >> 16) & 3;
+
+ pr->valid = r[13] & BIT(15);
+ pr->cond_not = r[13] & BIT(14);
+ pr->cond_and1 = r[13] & BIT(13);
+ pr->cond_and2 = r[13] & BIT(12);
+}
+
+static void rtl839x_write_pie_fixed_fields(u32 r[], struct pie_rule *pr)
+{
+ r[6] = ((u32) (pr->spmmask_fix & 0x3)) << 30;
+ r[6] |= ((u32) (pr->spn & 0x3f)) << 24;
+ r[6] |= pr->mgnt_vlan ? BIT(23) : 0;
+ r[6] |= pr->dmac_hit_sw ? BIT(22) : 0;
+ r[6] |= pr->not_first_frag ? BIT(21) : 0;
+ r[6] |= ((u32) (pr->frame_type_l4 & 0x7)) << 18;
+ r[6] |= ((u32) (pr->frame_type & 0x3)) << 16;
+ r[6] |= pr->otag_fmt ? BIT(15) : 0;
+ r[6] |= pr->itag_fmt ? BIT(14) : 0;
+ r[6] |= pr->otag_exist ? BIT(13) : 0;
+ r[6] |= pr->itag_exist ? BIT(12) : 0;
+ r[6] |= ((u32) (pr->frame_type_l2 & 0x3)) << 10;
+ r[6] |= ((u32) (pr->tid & 0x3)) << 8;
+
+ r[12] |= ((u32) (pr->spmmask_fix_m & 0x3)) << 6;
+ r[12] |= (u32) (pr->spn_m & 0x3f);
+ r[13] |= pr->mgnt_vlan_m ? BIT(31) : 0;
+ r[13] |= pr->dmac_hit_sw_m ? BIT(30) : 0;
+ r[13] |= pr->not_first_frag_m ? BIT(29) : 0;
+ r[13] |= ((u32) (pr->frame_type_l4_m & 0x7)) << 26;
+ r[13] |= ((u32) (pr->frame_type_m & 0x3)) << 24;
+ r[13] |= pr->otag_fmt_m ? BIT(23) : 0;
+ r[13] |= pr->itag_fmt_m ? BIT(22) : 0;
+ r[13] |= pr->otag_exist_m ? BIT(21) : 0;
+ r[13] |= pr->itag_exist_m ? BIT(20) : 0;
+ r[13] |= ((u32) (pr->frame_type_l2_m & 0x3)) << 18;
+ r[13] |= ((u32) (pr->tid_m & 0x3)) << 16;
+
+ r[13] |= pr->valid ? BIT(15) : 0;
+ r[13] |= pr->cond_not ? BIT(14) : 0;
+ r[13] |= pr->cond_and1 ? BIT(13) : 0;
+ r[13] |= pr->cond_and2 ? BIT(12) : 0;
+}
+
+static void rtl839x_write_pie_action(u32 r[], struct pie_rule *pr)
+{
+ if (pr->drop) {
+ r[13] |= 0x9; // Set ACT_MASK_FWD & FWD_ACT = DROP
+ r[13] |= BIT(3);
+ } else {
+ r[13] |= pr->fwd_sel ? BIT(3) : 0;
+ r[13] |= pr->fwd_act;
+ }
+ r[13] |= pr->bypass_sel ? BIT(11) : 0;
+ r[13] |= pr->mpls_sel ? BIT(10) : 0;
+ r[13] |= pr->nopri_sel ? BIT(9) : 0;
+ r[13] |= pr->ovid_sel ? BIT(8) : 0;
+ r[13] |= pr->ivid_sel ? BIT(7) : 0;
+ r[13] |= pr->meter_sel ? BIT(6) : 0;
+ r[13] |= pr->mir_sel ? BIT(5) : 0;
+ r[13] |= pr->log_sel ? BIT(4) : 0;
+
+ r[14] |= ((u32)(pr->fwd_data & 0x3fff)) << 18;
+ r[14] |= pr->log_octets ? BIT(17) : 0;
+ r[14] |= ((u32)(pr->log_data & 0x7ff)) << 4;
+ r[14] |= (pr->mir_data & 0x3) << 3;
+ r[14] |= ((u32)(pr->meter_data >> 7)) & 0x7;
+ r[15] |= (u32)(pr->meter_data) << 26;
+ r[15] |= ((u32)(pr->ivid_act) << 23) & 0x3;
+ r[15] |= ((u32)(pr->ivid_data) << 9) & 0xfff;
+ r[15] |= ((u32)(pr->ovid_act) << 6) & 0x3;
+ r[15] |= ((u32)(pr->ovid_data) >> 4) & 0xff;
+ r[16] |= ((u32)(pr->ovid_data) & 0xf) << 28;
+ r[16] |= ((u32)(pr->nopri_data) & 0x7) << 20;
+ r[16] |= ((u32)(pr->mpls_act) & 0x7) << 20;
+ r[16] |= ((u32)(pr->mpls_lib_idx) & 0x7) << 20;
+ r[16] |= pr->bypass_all ? BIT(9) : 0;
+ r[16] |= pr->bypass_igr_stp ? BIT(8) : 0;
+ r[16] |= pr->bypass_ibc_sc ? BIT(7) : 0;
+}
+
+static void rtl839x_read_pie_action(u32 r[], struct pie_rule *pr)
+{
+ if (r[13] & BIT(3)) { // ACT_MASK_FWD set, is it a drop?
+ if ((r[14] & 0x7) == 1) {
+ pr->drop = true;
+ } else {
+ pr->fwd_sel = true;
+ pr->fwd_act = r[14] & 0x7;
+ }
+ }
+
+ pr->bypass_sel = r[13] & BIT(11);
+ pr->mpls_sel = r[13] & BIT(10);
+ pr->nopri_sel = r[13] & BIT(9);
+ pr->ovid_sel = r[13] & BIT(8);
+ pr->ivid_sel = r[13] & BIT(7);
+ pr->meter_sel = r[13] & BIT(6);
+ pr->mir_sel = r[13] & BIT(5);
+ pr->log_sel = r[13] & BIT(4);
+
+ // TODO: Read in data fields
+
+ pr->bypass_all = r[16] & BIT(9);
+ pr->bypass_igr_stp = r[16] & BIT(8);
+ pr->bypass_ibc_sc = r[16] & BIT(7);
+}
+
+void rtl839x_pie_rule_dump_raw(u32 r[])
+{
+ pr_info("Raw IACL table entry:\n");
+ pr_info("Match : %08x %08x %08x %08x %08x %08x\n", r[0], r[1], r[2], r[3], r[4], r[5]);
+ pr_info("Fixed : %06x\n", r[6] >> 8);
+ pr_info("Match M: %08x %08x %08x %08x %08x %08x\n",
+ (r[6] << 24) | (r[7] >> 8), (r[7] << 24) | (r[8] >> 8), (r[8] << 24) | (r[9] >> 8),
+ (r[9] << 24) | (r[10] >> 8), (r[10] << 24) | (r[11] >> 8),
+ (r[11] << 24) | (r[12] >> 8));
+ pr_info("R[13]: %08x\n", r[13]);
+ pr_info("Fixed M: %06x\n", ((r[12] << 16) | (r[13] >> 16)) & 0xffffff);
+ pr_info("Valid / not / and1 / and2 : %1x\n", (r[13] >> 12) & 0xf);
+ pr_info("r 13-16: %08x %08x %08x %08x\n", r[13], r[14], r[15], r[16]);
+}
+
+void rtl839x_pie_rule_dump(struct pie_rule *pr)
+{
+ pr_info("Drop: %d, fwd: %d, ovid: %d, ivid: %d, flt: %d, log: %d, rmk: %d, meter: %d tagst: %d, mir: %d, nopri: %d, cpupri: %d, otpid: %d, itpid: %d, shape: %d\n",
+ pr->drop, pr->fwd_sel, pr->ovid_sel, pr->ivid_sel, pr->flt_sel, pr->log_sel, pr->rmk_sel, pr->log_sel, pr->tagst_sel, pr->mir_sel, pr->nopri_sel,
+ pr->cpupri_sel, pr->otpid_sel, pr->itpid_sel, pr->shaper_sel);
+ if (pr->fwd_sel)
+ pr_info("FWD: %08x\n", pr->fwd_data);
+ pr_info("TID: %x, %x\n", pr->tid, pr->tid_m);
+}
+
+static int rtl839x_pie_rule_read(struct rtl838x_switch_priv *priv, int idx, struct pie_rule *pr)
+{
+ // Read IACL table (2) via register 0
+ struct table_reg *q = rtl_table_get(RTL8380_TBL_0, 2);
+ u32 r[17];
+ int i;
+ int block = idx / PIE_BLOCK_SIZE;
+ u32 t_select = sw_r32(RTL839X_ACL_BLK_TMPLTE_CTRL(block));
+
+ memset(pr, 0, sizeof(*pr));
+ rtl_table_read(q, idx);
+ for (i = 0; i < 17; i++)
+ r[i] = sw_r32(rtl_table_data(q, i));
+
+ rtl_table_release(q);
+
+ rtl839x_read_pie_fixed_fields(r, pr);
+ if (!pr->valid)
+ return 0;
+
+ pr_debug("%s: template_selectors %08x, tid: %d\n", __func__, t_select, pr->tid);
+ rtl839x_pie_rule_dump_raw(r);
+
+ rtl839x_read_pie_templated(r, pr, fixed_templates[(t_select >> (pr->tid * 3)) & 0x7]);
+
+ rtl839x_read_pie_action(r, pr);
+
+ return 0;
+}
+
+static int rtl839x_pie_rule_write(struct rtl838x_switch_priv *priv, int idx, struct pie_rule *pr)
+{
+ // Access IACL table (2) via register 0
+ struct table_reg *q = rtl_table_get(RTL8390_TBL_0, 2);
+ u32 r[17];
+ int i;
+ int block = idx / PIE_BLOCK_SIZE;
+ u32 t_select = sw_r32(RTL839X_ACL_BLK_TMPLTE_CTRL(block));
+
+ pr_debug("%s: %d, t_select: %08x\n", __func__, idx, t_select);
+
+ for (i = 0; i < 17; i++)
+ r[i] = 0;
+
+ if (!pr->valid) {
+ rtl_table_write(q, idx);
+ rtl_table_release(q);
+ return 0;
+ }
+ rtl839x_write_pie_fixed_fields(r, pr);
+
+ pr_debug("%s: template %d\n", __func__, (t_select >> (pr->tid * 3)) & 0x7);
+ rtl839x_write_pie_templated(r, pr, fixed_templates[(t_select >> (pr->tid * 3)) & 0x7]);
+
+ rtl839x_write_pie_action(r, pr);
+
+// rtl839x_pie_rule_dump_raw(r);
+
+ for (i = 0; i < 17; i++)
+ sw_w32(r[i], rtl_table_data(q, i));
+
+ rtl_table_write(q, idx);
+ rtl_table_release(q);
+
+ return 0;
+}
+
+static bool rtl839x_pie_templ_has(int t, enum template_field_id field_type)
+{
+ int i;
+ enum template_field_id ft;
+
+ for (i = 0; i < N_FIXED_FIELDS; i++) {
+ ft = fixed_templates[t][i];
+ if (field_type == ft)
+ return true;
+ }
+
+ return false;
+}
+
+static int rtl839x_pie_verify_template(struct rtl838x_switch_priv *priv,
+ struct pie_rule *pr, int t, int block)
+{
+ int i;
+
+ if (!pr->is_ipv6 && pr->sip_m && !rtl839x_pie_templ_has(t, TEMPLATE_FIELD_SIP0))
+ return -1;
+
+ if (!pr->is_ipv6 && pr->dip_m && !rtl839x_pie_templ_has(t, TEMPLATE_FIELD_DIP0))
+ return -1;
+
+ if (pr->is_ipv6) {
+ if ((pr->sip6_m.s6_addr32[0] || pr->sip6_m.s6_addr32[1]
+ || pr->sip6_m.s6_addr32[2] || pr->sip6_m.s6_addr32[3])
+ && !rtl839x_pie_templ_has(t, TEMPLATE_FIELD_SIP2))
+ return -1;
+ if ((pr->dip6_m.s6_addr32[0] || pr->dip6_m.s6_addr32[1]
+ || pr->dip6_m.s6_addr32[2] || pr->dip6_m.s6_addr32[3])
+ && !rtl839x_pie_templ_has(t, TEMPLATE_FIELD_DIP2))
+ return -1;
+ }
+
+ if (ether_addr_to_u64(pr->smac) && !rtl839x_pie_templ_has(t, TEMPLATE_FIELD_SMAC0))
+ return -1;
+
+ if (ether_addr_to_u64(pr->dmac) && !rtl839x_pie_templ_has(t, TEMPLATE_FIELD_DMAC0))
+ return -1;
+
+ // TODO: Check more
+
+ i = find_first_zero_bit(&priv->pie_use_bm[block * 4], PIE_BLOCK_SIZE);
+
+ if (i >= PIE_BLOCK_SIZE)
+ return -1;
+
+ return i + PIE_BLOCK_SIZE * block;
+}
+
+static int rtl839x_pie_rule_add(struct rtl838x_switch_priv *priv, struct pie_rule *pr)
+{
+ int idx, block, j, t;
+ int min_block = 0;
+ int max_block = priv->n_pie_blocks / 2;
+
+ if (pr->is_egress) {
+ min_block = max_block;
+ max_block = priv->n_pie_blocks;
+ }
+
+ mutex_lock(&priv->pie_mutex);
+
+ for (block = min_block; block < max_block; block++) {
+ for (j = 0; j < 2; j++) {
+ t = (sw_r32(RTL839X_ACL_BLK_TMPLTE_CTRL(block)) >> (j * 3)) & 0x7;
+ idx = rtl839x_pie_verify_template(priv, pr, t, block);
+ if (idx >= 0)
+ break;
+ }
+ if (j < 2)
+ break;
+ }
+
+ if (block >= priv->n_pie_blocks) {
+ mutex_unlock(&priv->pie_mutex);
+ return -EOPNOTSUPP;
+ }
+
+ set_bit(idx, priv->pie_use_bm);
+
+ pr->valid = true;
+ pr->tid = j; // Mapped to template number
+ pr->tid_m = 0x3;
+ pr->id = idx;
+
+ rtl839x_pie_lookup_enable(priv, idx);
+ rtl839x_pie_rule_write(priv, idx, pr);
+
+ mutex_unlock(&priv->pie_mutex);
+ return 0;
+}
+
+static void rtl839x_pie_rule_rm(struct rtl838x_switch_priv *priv, struct pie_rule *pr)
+{
+ int idx = pr->id;
+
+ rtl839x_pie_rule_del(priv, idx, idx);
+ clear_bit(idx, priv->pie_use_bm);
+}
+
+static void rtl839x_pie_init(struct rtl838x_switch_priv *priv)
+{
+ int i;
+ u32 template_selectors;
+
+ mutex_init(&priv->pie_mutex);
+
+ // Power on all PIE blocks
+ for (i = 0; i < priv->n_pie_blocks; i++)
+ sw_w32_mask(0, BIT(i), RTL839X_PS_ACL_PWR_CTRL);
+
+ // Set ingress and egress ACL blocks to 50/50: first Egress block is 9
+ sw_w32_mask(0x1f, 9, RTL839X_ACL_CTRL); // Writes 9 to cutline field
+
+ // Include IPG in metering
+ sw_w32(1, RTL839X_METER_GLB_CTRL);
+
+ // Delete all present rules
+ rtl839x_pie_rule_del(priv, 0, priv->n_pie_blocks * PIE_BLOCK_SIZE - 1);
+
+ // Enable predefined templates 0, 1 for blocks 0-2
+ template_selectors = 0 | (1 << 3);
+ for (i = 0; i < 3; i++)
+ sw_w32(template_selectors, RTL839X_ACL_BLK_TMPLTE_CTRL(i));
+
+ // Enable predefined templates 2, 3 for blocks 3-5
+ template_selectors = 2 | (3 << 3);
+ for (i = 3; i < 6; i++)
+ sw_w32(template_selectors, RTL839X_ACL_BLK_TMPLTE_CTRL(i));
+
+ // Enable predefined templates 1, 4 for blocks 6-8
+ template_selectors = 2 | (3 << 3);
+ for (i = 6; i < 9; i++)
+ sw_w32(template_selectors, RTL839X_ACL_BLK_TMPLTE_CTRL(i));
+
+ // Enable predefined templates 0, 1 for blocks 9-11
+ template_selectors = 0 | (1 << 3);
+ for (i = 9; i < 12; i++)
+ sw_w32(template_selectors, RTL839X_ACL_BLK_TMPLTE_CTRL(i));
+
+ // Enable predefined templates 2, 3 for blocks 12-14
+ template_selectors = 2 | (3 << 3);
+ for (i = 12; i < 15; i++)
+ sw_w32(template_selectors, RTL839X_ACL_BLK_TMPLTE_CTRL(i));
+
+ // Enable predefined templates 1, 4 for blocks 15-17
+ template_selectors = 2 | (3 << 3);
+ for (i = 15; i < 18; i++)
+ sw_w32(template_selectors, RTL839X_ACL_BLK_TMPLTE_CTRL(i));
+}
+
+static u32 rtl839x_packet_cntr_read(int counter)
+{
+ u32 v;
+
+ // Read LOG table (4) via register RTL8390_TBL_0
+ struct table_reg *r = rtl_table_get(RTL8390_TBL_0, 4);
+
+ pr_debug("In %s, id %d\n", __func__, counter);
+ rtl_table_read(r, counter / 2);
+
+ // The table has a size of 2 registers
+ if (counter % 2)
+ v = sw_r32(rtl_table_data(r, 0));
+ else
+ v = sw_r32(rtl_table_data(r, 1));
+
+ rtl_table_release(r);
+
+ return v;
+}
+
+static void rtl839x_packet_cntr_clear(int counter)
+{
+ // Access LOG table (4) via register RTL8390_TBL_0
+ struct table_reg *r = rtl_table_get(RTL8390_TBL_0, 4);
+
+ pr_debug("In %s, id %d\n", __func__, counter);
+ // The table has a size of 2 registers
+ if (counter % 2)
+ sw_w32(0, rtl_table_data(r, 0));
+ else
+ sw_w32(0, rtl_table_data(r, 1));
+
+ rtl_table_write(r, counter / 2);
+
+ rtl_table_release(r);
+}
+
+static void rtl839x_route_read(int idx, struct rtl83xx_route *rt)
+{
+ u64 v;
+ // Read ROUTING table (2) via register RTL8390_TBL_1
+ struct table_reg *r = rtl_table_get(RTL8390_TBL_1, 2);
+
+ pr_debug("In %s\n", __func__);
+ rtl_table_read(r, idx);
+
+ // The table has a size of 2 registers
+ v = sw_r32(rtl_table_data(r, 0));
+ v <<= 32;
+ v |= sw_r32(rtl_table_data(r, 1));
+ rt->switch_mac_id = (v >> 12) & 0xf;
+ rt->nh.gw = v >> 16;
+
+ rtl_table_release(r);
+}
+
+static void rtl839x_route_write(int idx, struct rtl83xx_route *rt)
+{
+ u32 v;
+
+ // Read ROUTING table (2) via register RTL8390_TBL_1
+ struct table_reg *r = rtl_table_get(RTL8390_TBL_1, 2);
+
+ pr_debug("In %s\n", __func__);
+ sw_w32(rt->nh.gw >> 16, rtl_table_data(r, 0));
+ v = rt->nh.gw << 16;
+ v |= rt->switch_mac_id << 12;
+ sw_w32(v, rtl_table_data(r, 1));
+ rtl_table_write(r, idx);
+
+ rtl_table_release(r);
+}
+
+/*
+ * Configure the switch's own MAC addresses used when routing packets
+ */
+static void rtl839x_setup_port_macs(struct rtl838x_switch_priv *priv)
+{
+ int i;
+ struct net_device *dev;
+ u64 mac;
+
+ pr_debug("%s: got port %08x\n", __func__, (u32)priv->ports[priv->cpu_port].dp);
+ dev = priv->ports[priv->cpu_port].dp->slave;
+ mac = ether_addr_to_u64(dev->dev_addr);
+
+ for (i = 0; i < 15; i++) {
+ mac++; // BUG: VRRP for testing
+ sw_w32(mac >> 32, RTL839X_ROUTING_SA_CTRL + i * 8);
+ sw_w32(mac, RTL839X_ROUTING_SA_CTRL + i * 8 + 4);
+ }
+}
+
+int rtl839x_l3_setup(struct rtl838x_switch_priv *priv)
+{
+ rtl839x_setup_port_macs(priv);
+
+ return 0;
+}
+
+void rtl839x_vlan_port_keep_tag_set(int port, bool keep_outer, bool keep_inner)
+{
+ sw_w32(FIELD_PREP(RTL839X_VLAN_PORT_TAG_STS_CTRL_OTAG_STS_MASK,
+ keep_outer ? RTL839X_VLAN_PORT_TAG_STS_TAGGED : RTL839X_VLAN_PORT_TAG_STS_UNTAG) |
+ FIELD_PREP(RTL839X_VLAN_PORT_TAG_STS_CTRL_ITAG_STS_MASK,
+ keep_inner ? RTL839X_VLAN_PORT_TAG_STS_TAGGED : RTL839X_VLAN_PORT_TAG_STS_UNTAG),
+ RTL839X_VLAN_PORT_TAG_STS_CTRL(port));
+}
+
+void rtl839x_vlan_port_pvidmode_set(int port, enum pbvlan_type type, enum pbvlan_mode mode)
+{
+ if (type == PBVLAN_TYPE_INNER)
+ sw_w32_mask(0x3, mode, RTL839X_VLAN_PORT_PB_VLAN + (port << 2));
+ else
+ sw_w32_mask(0x3 << 14, mode << 14, RTL839X_VLAN_PORT_PB_VLAN + (port << 2));
+}
+
+void rtl839x_vlan_port_pvid_set(int port, enum pbvlan_type type, int pvid)
+{
+ if (type == PBVLAN_TYPE_INNER)
+ sw_w32_mask(0xfff << 2, pvid << 2, RTL839X_VLAN_PORT_PB_VLAN + (port << 2));
+ else
+ sw_w32_mask(0xfff << 16, pvid << 16, RTL839X_VLAN_PORT_PB_VLAN + (port << 2));
+}
+
+static int rtl839x_set_ageing_time(unsigned long msec)
+{
+ int t = sw_r32(RTL839X_L2_CTRL_1);
+
+ t &= 0x1FFFFF;
+ t = t * 3 / 5; /* Aging time in seconds. 0: L2 aging disabled */
+ pr_debug("L2 AGING time: %d sec\n", t);
+
+ t = (msec * 5 + 2000) / 3000;
+ t = t > 0x1FFFFF ? 0x1FFFFF : t;
+ sw_w32_mask(0x1FFFFF, t, RTL839X_L2_CTRL_1);
+ pr_debug("Dynamic aging for ports: %x\n", sw_r32(RTL839X_L2_PORT_AGING_OUT));
+
+ return 0;
+}
+
+static void rtl839x_set_igr_filter(int port, enum igr_filter state)
+{
+ sw_w32_mask(0x3 << ((port & 0xf)<<1), state << ((port & 0xf)<<1),
+ RTL839X_VLAN_PORT_IGR_FLTR + (((port >> 4) << 2)));
+}
+
+static void rtl839x_set_egr_filter(int port, enum egr_filter state)
+{
+ sw_w32_mask(0x1 << (port % 0x20), state << (port % 0x20),
+ RTL839X_VLAN_PORT_EGR_FLTR + (((port >> 5) << 2)));
+}
+
+void rtl839x_set_distribution_algorithm(int group, int algoidx, u32 algomsk)
+{
+ sw_w32_mask(3 << ((group & 0xf) << 1), algoidx << ((group & 0xf) << 1),
+ RTL839X_TRK_HASH_IDX_CTRL + ((group >> 4) << 2));
+ sw_w32(algomsk, RTL839X_TRK_HASH_CTRL + (algoidx << 2));
+}
+
+void rtl839x_set_receive_management_action(int port, rma_ctrl_t type, action_type_t action)
+{
+ switch(type) {
+ case BPDU:
+ sw_w32_mask(3 << ((port & 0xf) << 1), (action & 0x3) << ((port & 0xf) << 1),
+ RTL839X_RMA_BPDU_CTRL + ((port >> 4) << 2));
+ break;
+ case PTP:
+ sw_w32_mask(3 << ((port & 0xf) << 1), (action & 0x3) << ((port & 0xf) << 1),
+ RTL839X_RMA_PTP_CTRL + ((port >> 4) << 2));
+ break;
+ case LLTP:
+ sw_w32_mask(3 << ((port & 0xf) << 1), (action & 0x3) << ((port & 0xf) << 1),
+ RTL839X_RMA_LLTP_CTRL + ((port >> 4) << 2));
+ break;
+ default:
+ break;
+ }
+}
+
+const struct rtl838x_reg rtl839x_reg = {
+ .mask_port_reg_be = rtl839x_mask_port_reg_be,
+ .set_port_reg_be = rtl839x_set_port_reg_be,
+ .get_port_reg_be = rtl839x_get_port_reg_be,
+ .mask_port_reg_le = rtl839x_mask_port_reg_le,
+ .set_port_reg_le = rtl839x_set_port_reg_le,
+ .get_port_reg_le = rtl839x_get_port_reg_le,
+ .stat_port_rst = RTL839X_STAT_PORT_RST,
+ .stat_rst = RTL839X_STAT_RST,
+ .stat_port_std_mib = RTL839X_STAT_PORT_STD_MIB,
+ .traffic_enable = rtl839x_traffic_enable,
+ .traffic_disable = rtl839x_traffic_disable,
+ .traffic_get = rtl839x_traffic_get,
+ .traffic_set = rtl839x_traffic_set,
+ .port_iso_ctrl = rtl839x_port_iso_ctrl,
+ .l2_ctrl_0 = RTL839X_L2_CTRL_0,
+ .l2_ctrl_1 = RTL839X_L2_CTRL_1,
+ .l2_port_aging_out = RTL839X_L2_PORT_AGING_OUT,
+ .set_ageing_time = rtl839x_set_ageing_time,
+ .smi_poll_ctrl = RTL839X_SMI_PORT_POLLING_CTRL,
+ .l2_tbl_flush_ctrl = RTL839X_L2_TBL_FLUSH_CTRL,
+ .exec_tbl0_cmd = rtl839x_exec_tbl0_cmd,
+ .exec_tbl1_cmd = rtl839x_exec_tbl1_cmd,
+ .tbl_access_data_0 = rtl839x_tbl_access_data_0,
+ .isr_glb_src = RTL839X_ISR_GLB_SRC,
+ .isr_port_link_sts_chg = RTL839X_ISR_PORT_LINK_STS_CHG,
+ .imr_port_link_sts_chg = RTL839X_IMR_PORT_LINK_STS_CHG,
+ .imr_glb = RTL839X_IMR_GLB,
+ .vlan_tables_read = rtl839x_vlan_tables_read,
+ .vlan_set_tagged = rtl839x_vlan_set_tagged,
+ .vlan_set_untagged = rtl839x_vlan_set_untagged,
+ .vlan_profile_dump = rtl839x_vlan_profile_dump,
+ .vlan_profile_setup = rtl839x_vlan_profile_setup,
+ .vlan_fwd_on_inner = rtl839x_vlan_fwd_on_inner,
+ .vlan_port_keep_tag_set = rtl839x_vlan_port_keep_tag_set,
+ .vlan_port_pvidmode_set = rtl839x_vlan_port_pvidmode_set,
+ .vlan_port_pvid_set = rtl839x_vlan_port_pvid_set,
+ .set_vlan_igr_filter = rtl839x_set_igr_filter,
+ .set_vlan_egr_filter = rtl839x_set_egr_filter,
+ .enable_learning = rtl839x_enable_learning,
+ .enable_flood = rtl839x_enable_flood,
+ .enable_mcast_flood = rtl839x_enable_mcast_flood,
+ .enable_bcast_flood = rtl839x_enable_bcast_flood,
+ .stp_get = rtl839x_stp_get,
+ .stp_set = rtl839x_stp_set,
+ .mac_force_mode_ctrl = rtl839x_mac_force_mode_ctrl,
+ .mac_port_ctrl = rtl839x_mac_port_ctrl,
+ .l2_port_new_salrn = rtl839x_l2_port_new_salrn,
+ .l2_port_new_sa_fwd = rtl839x_l2_port_new_sa_fwd,
+ .mir_ctrl = RTL839X_MIR_CTRL,
+ .mir_dpm = RTL839X_MIR_DPM_CTRL,
+ .mir_spm = RTL839X_MIR_SPM_CTRL,
+ .mac_link_sts = RTL839X_MAC_LINK_STS,
+ .mac_link_dup_sts = RTL839X_MAC_LINK_DUP_STS,
+ .mac_link_spd_sts = rtl839x_mac_link_spd_sts,
+ .mac_rx_pause_sts = RTL839X_MAC_RX_PAUSE_STS,
+ .mac_tx_pause_sts = RTL839X_MAC_TX_PAUSE_STS,
+ .read_l2_entry_using_hash = rtl839x_read_l2_entry_using_hash,
+ .write_l2_entry_using_hash = rtl839x_write_l2_entry_using_hash,
+ .read_cam = rtl839x_read_cam,
+ .write_cam = rtl839x_write_cam,
+ .trk_mbr_ctr = rtl839x_trk_mbr_ctr,
+ .rma_bpdu_fld_pmask = RTL839X_RMA_BPDU_FLD_PMSK,
+ .spcl_trap_eapol_ctrl = RTL839X_SPCL_TRAP_EAPOL_CTRL,
+ .init_eee = rtl839x_init_eee,
+ .port_eee_set = rtl839x_port_eee_set,
+ .eee_port_ability = rtl839x_eee_port_ability,
+ .l2_hash_seed = rtl839x_l2_hash_seed,
+ .l2_hash_key = rtl839x_l2_hash_key,
+ .read_mcast_pmask = rtl839x_read_mcast_pmask,
+ .write_mcast_pmask = rtl839x_write_mcast_pmask,
+ .pie_init = rtl839x_pie_init,
+ .pie_rule_read = rtl839x_pie_rule_read,
+ .pie_rule_write = rtl839x_pie_rule_write,
+ .pie_rule_add = rtl839x_pie_rule_add,
+ .pie_rule_rm = rtl839x_pie_rule_rm,
+ .l2_learning_setup = rtl839x_l2_learning_setup,
+ .packet_cntr_read = rtl839x_packet_cntr_read,
+ .packet_cntr_clear = rtl839x_packet_cntr_clear,
+ .route_read = rtl839x_route_read,
+ .route_write = rtl839x_route_write,
+ .l3_setup = rtl839x_l3_setup,
+ .set_distribution_algorithm = rtl839x_set_distribution_algorithm,
+ .set_receive_management_action = rtl839x_set_receive_management_action,
+};
diff --git a/target/linux/realtek/files-5.15/drivers/net/dsa/rtl83xx/rtl83xx.h b/target/linux/realtek/files-5.15/drivers/net/dsa/rtl83xx/rtl83xx.h
new file mode 100644
index 0000000000..107016469c
--- /dev/null
+++ b/target/linux/realtek/files-5.15/drivers/net/dsa/rtl83xx/rtl83xx.h
@@ -0,0 +1,137 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef _NET_DSA_RTL83XX_H
+#define _NET_DSA_RTL83XX_H
+
+#include <net/dsa.h>
+#include "rtl838x.h"
+
+
+#define RTL8380_VERSION_A 'A'
+#define RTL8390_VERSION_A 'A'
+#define RTL8380_VERSION_B 'B'
+
+struct fdb_update_work {
+ struct work_struct work;
+ struct net_device *ndev;
+ u64 macs[];
+};
+
+#define MIB_DESC(_size, _offset, _name) {.size = _size, .offset = _offset, .name = _name}
+struct rtl83xx_mib_desc {
+ unsigned int size;
+ unsigned int offset;
+ const char *name;
+};
+
+/* API for switch table access */
+struct table_reg {
+ u16 addr;
+ u16 data;
+ u8 max_data;
+ u8 c_bit;
+ u8 t_bit;
+ u8 rmode;
+ u8 tbl;
+ struct mutex lock;
+};
+
+#define TBL_DESC(_addr, _data, _max_data, _c_bit, _t_bit, _rmode) \
+ { .addr = _addr, .data = _data, .max_data = _max_data, .c_bit = _c_bit, \
+ .t_bit = _t_bit, .rmode = _rmode \
+ }
+
+typedef enum {
+ RTL8380_TBL_L2 = 0,
+ RTL8380_TBL_0,
+ RTL8380_TBL_1,
+ RTL8390_TBL_L2,
+ RTL8390_TBL_0,
+ RTL8390_TBL_1,
+ RTL8390_TBL_2,
+ RTL9300_TBL_L2,
+ RTL9300_TBL_0,
+ RTL9300_TBL_1,
+ RTL9300_TBL_2,
+ RTL9300_TBL_HSB,
+ RTL9300_TBL_HSA,
+ RTL9310_TBL_0,
+ RTL9310_TBL_1,
+ RTL9310_TBL_2,
+ RTL9310_TBL_3,
+ RTL9310_TBL_4,
+ RTL9310_TBL_5,
+ RTL_TBL_END
+} rtl838x_tbl_reg_t;
+
+void rtl_table_init(void);
+struct table_reg *rtl_table_get(rtl838x_tbl_reg_t r, int t);
+void rtl_table_release(struct table_reg *r);
+void rtl_table_read(struct table_reg *r, int idx);
+void rtl_table_write(struct table_reg *r, int idx);
+inline u16 rtl_table_data(struct table_reg *r, int i);
+inline u32 rtl_table_data_r(struct table_reg *r, int i);
+inline void rtl_table_data_w(struct table_reg *r, u32 v, int i);
+
+void __init rtl83xx_setup_qos(struct rtl838x_switch_priv *priv);
+
+int rtl83xx_packet_cntr_alloc(struct rtl838x_switch_priv *priv);
+
+int rtl83xx_port_is_under(const struct net_device * dev, struct rtl838x_switch_priv *priv);
+
+int read_phy(u32 port, u32 page, u32 reg, u32 *val);
+int write_phy(u32 port, u32 page, u32 reg, u32 val);
+
+/* Port register accessor functions for the RTL839x and RTL931X SoCs */
+void rtl839x_mask_port_reg_be(u64 clear, u64 set, int reg);
+u64 rtl839x_get_port_reg_be(int reg);
+void rtl839x_set_port_reg_be(u64 set, int reg);
+void rtl839x_mask_port_reg_le(u64 clear, u64 set, int reg);
+void rtl839x_set_port_reg_le(u64 set, int reg);
+u64 rtl839x_get_port_reg_le(int reg);
+
+/* Port register accessor functions for the RTL838x and RTL930X SoCs */
+void rtl838x_mask_port_reg(u64 clear, u64 set, int reg);
+void rtl838x_set_port_reg(u64 set, int reg);
+u64 rtl838x_get_port_reg(int reg);
+
+/* RTL838x-specific */
+u32 rtl838x_hash(struct rtl838x_switch_priv *priv, u64 seed);
+irqreturn_t rtl838x_switch_irq(int irq, void *dev_id);
+void rtl8380_get_version(struct rtl838x_switch_priv *priv);
+void rtl838x_vlan_profile_dump(int index);
+int rtl83xx_dsa_phy_read(struct dsa_switch *ds, int phy_addr, int phy_reg);
+void rtl8380_sds_rst(int mac);
+int rtl8380_sds_power(int mac, int val);
+void rtl838x_print_matrix(void);
+
+/* RTL839x-specific */
+u32 rtl839x_hash(struct rtl838x_switch_priv *priv, u64 seed);
+irqreturn_t rtl839x_switch_irq(int irq, void *dev_id);
+void rtl8390_get_version(struct rtl838x_switch_priv *priv);
+void rtl839x_vlan_profile_dump(int index);
+int rtl83xx_dsa_phy_write(struct dsa_switch *ds, int phy_addr, int phy_reg, u16 val);
+void rtl839x_exec_tbl2_cmd(u32 cmd);
+void rtl839x_print_matrix(void);
+
+/* RTL930x-specific */
+u32 rtl930x_hash(struct rtl838x_switch_priv *priv, u64 seed);
+irqreturn_t rtl930x_switch_irq(int irq, void *dev_id);
+irqreturn_t rtl839x_switch_irq(int irq, void *dev_id);
+void rtl930x_vlan_profile_dump(int index);
+int rtl9300_sds_power(int mac, int val);
+void rtl9300_sds_rst(int sds_num, u32 mode);
+int rtl9300_serdes_setup(int sds_num, phy_interface_t phy_mode);
+void rtl930x_print_matrix(void);
+
+/* RTL931x-specific */
+irqreturn_t rtl931x_switch_irq(int irq, void *dev_id);
+int rtl931x_sds_cmu_band_get(int sds, phy_interface_t mode);
+int rtl931x_sds_cmu_band_set(int sds, bool enable, u32 band, phy_interface_t mode);
+void rtl931x_sds_init(u32 sds, phy_interface_t mode);
+
+int rtl83xx_lag_add(struct dsa_switch *ds, int group, int port, struct netdev_lag_upper_info *info);
+int rtl83xx_lag_del(struct dsa_switch *ds, int group, int port);
+
+#endif /* _NET_DSA_RTL83XX_H */
+
diff --git a/target/linux/realtek/files-5.15/drivers/net/dsa/rtl83xx/rtl930x.c b/target/linux/realtek/files-5.15/drivers/net/dsa/rtl83xx/rtl930x.c
new file mode 100644
index 0000000000..5dde8353e2
--- /dev/null
+++ b/target/linux/realtek/files-5.15/drivers/net/dsa/rtl83xx/rtl930x.c
@@ -0,0 +1,2560 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <asm/mach-rtl838x/mach-rtl83xx.h>
+#include <linux/inetdevice.h>
+
+#include "rtl83xx.h"
+
+#define RTL930X_VLAN_PORT_TAG_STS_INTERNAL 0x0
+#define RTL930X_VLAN_PORT_TAG_STS_UNTAG 0x1
+#define RTL930X_VLAN_PORT_TAG_STS_TAGGED 0x2
+#define RTL930X_VLAN_PORT_TAG_STS_PRIORITY_TAGGED 0x3
+
+#define RTL930X_VLAN_PORT_TAG_STS_CTRL_BASE 0xCE24
+/* port 0-28 */
+#define RTL930X_VLAN_PORT_TAG_STS_CTRL(port) \
+ RTL930X_VLAN_PORT_TAG_STS_CTRL_BASE + (port << 2)
+#define RTL930X_VLAN_PORT_TAG_STS_CTRL_EGR_OTAG_STS_MASK GENMASK(7,6)
+#define RTL930X_VLAN_PORT_TAG_STS_CTRL_EGR_ITAG_STS_MASK GENMASK(5,4)
+#define RTL930X_VLAN_PORT_TAG_STS_CTRL_EGR_P_OTAG_KEEP_MASK GENMASK(3,3)
+#define RTL930X_VLAN_PORT_TAG_STS_CTRL_EGR_P_ITAG_KEEP_MASK GENMASK(2,2)
+#define RTL930X_VLAN_PORT_TAG_STS_CTRL_IGR_P_OTAG_KEEP_MASK GENMASK(1,1)
+#define RTL930X_VLAN_PORT_TAG_STS_CTRL_IGR_P_ITAG_KEEP_MASK GENMASK(0,0)
+
+extern struct mutex smi_lock;
+extern struct rtl83xx_soc_info soc_info;
+
+/* Definition of the RTL930X-specific template field IDs as used in the PIE */
+enum template_field_id {
+ TEMPLATE_FIELD_SPM0 = 0, // Source portmask ports 0-15
+ TEMPLATE_FIELD_SPM1 = 1, // Source portmask ports 16-31
+ TEMPLATE_FIELD_DMAC0 = 2, // Destination MAC [15:0]
+ TEMPLATE_FIELD_DMAC1 = 3, // Destination MAC [31:16]
+ TEMPLATE_FIELD_DMAC2 = 4, // Destination MAC [47:32]
+ TEMPLATE_FIELD_SMAC0 = 5, // Source MAC [15:0]
+ TEMPLATE_FIELD_SMAC1 = 6, // Source MAC [31:16]
+ TEMPLATE_FIELD_SMAC2 = 7, // Source MAC [47:32]
+ TEMPLATE_FIELD_ETHERTYPE = 8, // Ethernet frame type field
+ TEMPLATE_FIELD_OTAG = 9,
+ TEMPLATE_FIELD_ITAG = 10,
+ TEMPLATE_FIELD_SIP0 = 11,
+ TEMPLATE_FIELD_SIP1 = 12,
+ TEMPLATE_FIELD_DIP0 = 13,
+ TEMPLATE_FIELD_DIP1 = 14,
+ TEMPLATE_FIELD_IP_TOS_PROTO = 15,
+ TEMPLATE_FIELD_L4_SPORT = 16,
+ TEMPLATE_FIELD_L4_DPORT = 17,
+ TEMPLATE_FIELD_L34_HEADER = 18,
+ TEMPLATE_FIELD_TCP_INFO = 19,
+ TEMPLATE_FIELD_FIELD_SELECTOR_VALID = 20,
+ TEMPLATE_FIELD_FIELD_SELECTOR_0 = 21,
+ TEMPLATE_FIELD_FIELD_SELECTOR_1 = 22,
+ TEMPLATE_FIELD_FIELD_SELECTOR_2 = 23,
+ TEMPLATE_FIELD_FIELD_SELECTOR_3 = 24,
+ TEMPLATE_FIELD_FIELD_SELECTOR_4 = 25,
+ TEMPLATE_FIELD_FIELD_SELECTOR_5 = 26,
+ TEMPLATE_FIELD_SIP2 = 27,
+ TEMPLATE_FIELD_SIP3 = 28,
+ TEMPLATE_FIELD_SIP4 = 29,
+ TEMPLATE_FIELD_SIP5 = 30,
+ TEMPLATE_FIELD_SIP6 = 31,
+ TEMPLATE_FIELD_SIP7 = 32,
+ TEMPLATE_FIELD_DIP2 = 33,
+ TEMPLATE_FIELD_DIP3 = 34,
+ TEMPLATE_FIELD_DIP4 = 35,
+ TEMPLATE_FIELD_DIP5 = 36,
+ TEMPLATE_FIELD_DIP6 = 37,
+ TEMPLATE_FIELD_DIP7 = 38,
+ TEMPLATE_FIELD_PKT_INFO = 39,
+ TEMPLATE_FIELD_FLOW_LABEL = 40,
+ TEMPLATE_FIELD_DSAP_SSAP = 41,
+ TEMPLATE_FIELD_SNAP_OUI = 42,
+ TEMPLATE_FIELD_FWD_VID = 43,
+ TEMPLATE_FIELD_RANGE_CHK = 44,
+ TEMPLATE_FIELD_VLAN_GMSK = 45, // VLAN Group Mask/IP range check
+ TEMPLATE_FIELD_DLP = 46,
+ TEMPLATE_FIELD_META_DATA = 47,
+ TEMPLATE_FIELD_SRC_FWD_VID = 48,
+ TEMPLATE_FIELD_SLP = 49,
+};
+
+/* The meaning of TEMPLATE_FIELD_VLAN depends on phase and the configuration in
+ * RTL930X_PIE_CTRL. We use always the same definition and map to the inner VLAN tag:
+ */
+#define TEMPLATE_FIELD_VLAN TEMPLATE_FIELD_ITAG
+
+// Number of fixed templates predefined in the RTL9300 SoC
+#define N_FIXED_TEMPLATES 5
+// RTL9300 specific predefined templates
+static enum template_field_id fixed_templates[N_FIXED_TEMPLATES][N_FIXED_FIELDS] =
+{
+ {
+ TEMPLATE_FIELD_DMAC0, TEMPLATE_FIELD_DMAC1, TEMPLATE_FIELD_DMAC2,
+ TEMPLATE_FIELD_SMAC0, TEMPLATE_FIELD_SMAC1, TEMPLATE_FIELD_SMAC2,
+ TEMPLATE_FIELD_VLAN, TEMPLATE_FIELD_IP_TOS_PROTO, TEMPLATE_FIELD_DSAP_SSAP,
+ TEMPLATE_FIELD_ETHERTYPE, TEMPLATE_FIELD_SPM0, TEMPLATE_FIELD_SPM1
+ }, {
+ TEMPLATE_FIELD_SIP0, TEMPLATE_FIELD_SIP1, TEMPLATE_FIELD_DIP0,
+ TEMPLATE_FIELD_DIP1, TEMPLATE_FIELD_IP_TOS_PROTO, TEMPLATE_FIELD_TCP_INFO,
+ TEMPLATE_FIELD_L4_SPORT, TEMPLATE_FIELD_L4_DPORT, TEMPLATE_FIELD_VLAN,
+ TEMPLATE_FIELD_RANGE_CHK, TEMPLATE_FIELD_SPM0, TEMPLATE_FIELD_SPM1
+ }, {
+ TEMPLATE_FIELD_DMAC0, TEMPLATE_FIELD_DMAC1, TEMPLATE_FIELD_DMAC2,
+ TEMPLATE_FIELD_VLAN, TEMPLATE_FIELD_ETHERTYPE, TEMPLATE_FIELD_IP_TOS_PROTO,
+ TEMPLATE_FIELD_SIP0, TEMPLATE_FIELD_SIP1, TEMPLATE_FIELD_DIP0,
+ TEMPLATE_FIELD_DIP1, TEMPLATE_FIELD_L4_SPORT, TEMPLATE_FIELD_L4_DPORT
+ }, {
+ TEMPLATE_FIELD_DIP0, TEMPLATE_FIELD_DIP1, TEMPLATE_FIELD_DIP2,
+ TEMPLATE_FIELD_DIP3, TEMPLATE_FIELD_DIP4, TEMPLATE_FIELD_DIP5,
+ TEMPLATE_FIELD_DIP6, TEMPLATE_FIELD_DIP7, TEMPLATE_FIELD_IP_TOS_PROTO,
+ TEMPLATE_FIELD_TCP_INFO, TEMPLATE_FIELD_L4_SPORT, TEMPLATE_FIELD_L4_DPORT
+ }, {
+ TEMPLATE_FIELD_SIP0, TEMPLATE_FIELD_SIP1, TEMPLATE_FIELD_SIP2,
+ TEMPLATE_FIELD_SIP3, TEMPLATE_FIELD_SIP4, TEMPLATE_FIELD_SIP5,
+ TEMPLATE_FIELD_SIP6, TEMPLATE_FIELD_SIP7, TEMPLATE_FIELD_VLAN,
+ TEMPLATE_FIELD_RANGE_CHK, TEMPLATE_FIELD_SPM1, TEMPLATE_FIELD_SPM1
+ },
+};
+
+void rtl930x_print_matrix(void)
+{
+ int i;
+ struct table_reg *r = rtl_table_get(RTL9300_TBL_0, 6);
+
+ for (i = 0; i < 29; i++) {
+ rtl_table_read(r, i);
+ pr_debug("> %08x\n", sw_r32(rtl_table_data(r, 0)));
+ }
+ rtl_table_release(r);
+}
+
+inline void rtl930x_exec_tbl0_cmd(u32 cmd)
+{
+ sw_w32(cmd, RTL930X_TBL_ACCESS_CTRL_0);
+ do { } while (sw_r32(RTL930X_TBL_ACCESS_CTRL_0) & (1 << 17));
+}
+
+inline void rtl930x_exec_tbl1_cmd(u32 cmd)
+{
+ sw_w32(cmd, RTL930X_TBL_ACCESS_CTRL_1);
+ do { } while (sw_r32(RTL930X_TBL_ACCESS_CTRL_1) & (1 << 17));
+}
+
+inline int rtl930x_tbl_access_data_0(int i)
+{
+ return RTL930X_TBL_ACCESS_DATA_0(i);
+}
+
+static inline int rtl930x_l2_port_new_salrn(int p)
+{
+ return RTL930X_L2_PORT_SALRN(p);
+}
+
+static inline int rtl930x_l2_port_new_sa_fwd(int p)
+{
+ // TODO: The definition of the fields changed, because of the master-cpu in a stack
+ return RTL930X_L2_PORT_NEW_SA_FWD(p);
+}
+
+inline static int rtl930x_trk_mbr_ctr(int group)
+{
+ return RTL930X_TRK_MBR_CTRL + (group << 2);
+}
+
+static void rtl930x_vlan_tables_read(u32 vlan, struct rtl838x_vlan_info *info)
+{
+ u32 v, w;
+ // Read VLAN table (1) via register 0
+ struct table_reg *r = rtl_table_get(RTL9300_TBL_0, 1);
+
+ rtl_table_read(r, vlan);
+ v = sw_r32(rtl_table_data(r, 0));
+ w = sw_r32(rtl_table_data(r, 1));
+ pr_debug("VLAN_READ %d: %08x %08x\n", vlan, v, w);
+ rtl_table_release(r);
+
+ info->tagged_ports = v >> 3;
+ info->profile_id = (w >> 24) & 7;
+ info->hash_mc_fid = !!(w & BIT(27));
+ info->hash_uc_fid = !!(w & BIT(28));
+ info->fid = ((v & 0x7) << 3) | ((w >> 29) & 0x7);
+
+ // Read UNTAG table via table register 2
+ r = rtl_table_get(RTL9300_TBL_2, 0);
+ rtl_table_read(r, vlan);
+ v = sw_r32(rtl_table_data(r, 0));
+ rtl_table_release(r);
+
+ info->untagged_ports = v >> 3;
+}
+
+static void rtl930x_vlan_set_tagged(u32 vlan, struct rtl838x_vlan_info *info)
+{
+ u32 v, w;
+ // Access VLAN table (1) via register 0
+ struct table_reg *r = rtl_table_get(RTL9300_TBL_0, 1);
+
+ v = info->tagged_ports << 3;
+ v |= ((u32)info->fid) >> 3;
+
+ w = ((u32)info->fid) << 29;
+ w |= info->hash_mc_fid ? BIT(27) : 0;
+ w |= info->hash_uc_fid ? BIT(28) : 0;
+ w |= info->profile_id << 24;
+
+ sw_w32(v, rtl_table_data(r, 0));
+ sw_w32(w, rtl_table_data(r, 1));
+
+ rtl_table_write(r, vlan);
+ rtl_table_release(r);
+}
+
+void rtl930x_vlan_profile_dump(int profile)
+{
+ u32 p[5];
+
+ if (profile < 0 || profile > 7)
+ return;
+
+ p[0] = sw_r32(RTL930X_VLAN_PROFILE_SET(profile));
+ p[1] = sw_r32(RTL930X_VLAN_PROFILE_SET(profile) + 4);
+ p[2] = sw_r32(RTL930X_VLAN_PROFILE_SET(profile) + 8) & 0x1FFFFFFF;
+ p[3] = sw_r32(RTL930X_VLAN_PROFILE_SET(profile) + 12) & 0x1FFFFFFF;
+ p[4] = sw_r32(RTL930X_VLAN_PROFILE_SET(profile) + 16) & 0x1FFFFFFF;
+
+ pr_info("VLAN %d: L2 learn: %d; Unknown MC PMasks: L2 %0x, IPv4 %0x, IPv6: %0x",
+ profile, p[0] & (3 << 21), p[2], p[3], p[4]);
+ pr_info(" Routing enabled: IPv4 UC %c, IPv6 UC %c, IPv4 MC %c, IPv6 MC %c\n",
+ p[0] & BIT(17) ? 'y' : 'n', p[0] & BIT(16) ? 'y' : 'n',
+ p[0] & BIT(13) ? 'y' : 'n', p[0] & BIT(12) ? 'y' : 'n');
+ pr_info(" Bridge enabled: IPv4 MC %c, IPv6 MC %c,\n",
+ p[0] & BIT(15) ? 'y' : 'n', p[0] & BIT(14) ? 'y' : 'n');
+ pr_info("VLAN profile %d: raw %08x %08x %08x %08x %08x\n",
+ profile, p[0], p[1], p[2], p[3], p[4]);
+}
+
+static void rtl930x_vlan_set_untagged(u32 vlan, u64 portmask)
+{
+ struct table_reg *r = rtl_table_get(RTL9300_TBL_2, 0);
+
+ sw_w32(portmask << 3, rtl_table_data(r, 0));
+ rtl_table_write(r, vlan);
+ rtl_table_release(r);
+}
+
+/* Sets the L2 forwarding to be based on either the inner VLAN tag or the outer
+ */
+static void rtl930x_vlan_fwd_on_inner(int port, bool is_set)
+{
+ // Always set all tag modes to fwd based on either inner or outer tag
+ if (is_set)
+ sw_w32_mask(0, 0xf, RTL930X_VLAN_PORT_FWD + (port << 2));
+ else
+ sw_w32_mask(0xf, 0, RTL930X_VLAN_PORT_FWD + (port << 2));
+}
+
+static void rtl930x_vlan_profile_setup(int profile)
+{
+ u32 p[5];
+
+ pr_info("In %s\n", __func__);
+ p[0] = sw_r32(RTL930X_VLAN_PROFILE_SET(profile));
+ p[1] = sw_r32(RTL930X_VLAN_PROFILE_SET(profile) + 4);
+
+ // Enable routing of Ipv4/6 Unicast and IPv4/6 Multicast traffic
+ p[0] |= BIT(17) | BIT(16) | BIT(13) | BIT(12);
+ p[2] = 0x1fffffff; // L2 unknown MC flooding portmask all ports, including the CPU-port
+ p[3] = 0x1fffffff; // IPv4 unknown MC flooding portmask
+ p[4] = 0x1fffffff; // IPv6 unknown MC flooding portmask
+
+ sw_w32(p[0], RTL930X_VLAN_PROFILE_SET(profile));
+ sw_w32(p[1], RTL930X_VLAN_PROFILE_SET(profile) + 4);
+ sw_w32(p[2], RTL930X_VLAN_PROFILE_SET(profile) + 8);
+ sw_w32(p[3], RTL930X_VLAN_PROFILE_SET(profile) + 12);
+ sw_w32(p[4], RTL930X_VLAN_PROFILE_SET(profile) + 16);
+}
+
+static void rtl930x_l2_learning_setup(void)
+{
+ // Portmask for flooding broadcast traffic
+ sw_w32(0x1fffffff, RTL930X_L2_BC_FLD_PMSK);
+
+ // Portmask for flooding unicast traffic with unknown destination
+ sw_w32(0x1fffffff, RTL930X_L2_UNKN_UC_FLD_PMSK);
+
+ // Limit learning to maximum: 32k entries, after that just flood (bits 0-1)
+ sw_w32((0x7fff << 2) | 0, RTL930X_L2_LRN_CONSTRT_CTRL);
+}
+
+static void rtl930x_stp_get(struct rtl838x_switch_priv *priv, u16 msti, u32 port_state[])
+{
+ int i;
+ u32 cmd = 1 << 17 /* Execute cmd */
+ | 0 << 16 /* Read */
+ | 4 << 12 /* Table type 0b10 */
+ | (msti & 0xfff);
+ priv->r->exec_tbl0_cmd(cmd);
+
+ for (i = 0; i < 2; i++)
+ port_state[i] = sw_r32(RTL930X_TBL_ACCESS_DATA_0(i));
+ pr_debug("MSTI: %d STATE: %08x, %08x\n", msti, port_state[0], port_state[1]);
+}
+
+static void rtl930x_stp_set(struct rtl838x_switch_priv *priv, u16 msti, u32 port_state[])
+{
+ int i;
+ u32 cmd = 1 << 17 /* Execute cmd */
+ | 1 << 16 /* Write */
+ | 4 << 12 /* Table type 4 */
+ | (msti & 0xfff);
+
+ for (i = 0; i < 2; i++)
+ sw_w32(port_state[i], RTL930X_TBL_ACCESS_DATA_0(i));
+ priv->r->exec_tbl0_cmd(cmd);
+}
+
+static inline int rtl930x_mac_force_mode_ctrl(int p)
+{
+ return RTL930X_MAC_FORCE_MODE_CTRL + (p << 2);
+}
+
+static inline int rtl930x_mac_port_ctrl(int p)
+{
+ return RTL930X_MAC_L2_PORT_CTRL(p);
+}
+
+static inline int rtl930x_mac_link_spd_sts(int p)
+{
+ return RTL930X_MAC_LINK_SPD_STS(p);
+}
+
+static u64 rtl930x_l2_hash_seed(u64 mac, u32 vid)
+{
+ u64 v = vid;
+
+ v <<= 48;
+ v |= mac;
+
+ return v;
+}
+
+/*
+ * Calculate both the block 0 and the block 1 hash by applyingthe same hash
+ * algorithm as the one used currently by the ASIC to the seed, and return
+ * both hashes in the lower and higher word of the return value since only 12 bit of
+ * the hash are significant
+ */
+static u32 rtl930x_l2_hash_key(struct rtl838x_switch_priv *priv, u64 seed)
+{
+ u32 k0, k1, h1, h2, h;
+
+ k0 = (u32) (((seed >> 55) & 0x1f) ^ ((seed >> 44) & 0x7ff)
+ ^ ((seed >> 33) & 0x7ff) ^ ((seed >> 22) & 0x7ff)
+ ^ ((seed >> 11) & 0x7ff) ^ (seed & 0x7ff));
+
+ h1 = (seed >> 11) & 0x7ff;
+ h1 = ((h1 & 0x1f) << 6) | ((h1 >> 5) & 0x3f);
+
+ h2 = (seed >> 33) & 0x7ff;
+ h2 = ((h2 & 0x3f) << 5)| ((h2 >> 6) & 0x3f);
+
+ k1 = (u32) (((seed << 55) & 0x1f) ^ ((seed >> 44) & 0x7ff) ^ h2
+ ^ ((seed >> 22) & 0x7ff) ^ h1
+ ^ (seed & 0x7ff));
+
+ // Algorithm choice for block 0
+ if (sw_r32(RTL930X_L2_CTRL) & BIT(0))
+ h = k1;
+ else
+ h = k0;
+
+ /* Algorithm choice for block 1
+ * Since k0 and k1 are < 2048, adding 2048 will offset the hash into the second
+ * half of hash-space
+ * 2048 is in fact the hash-table size 16384 divided by 4 hashes per bucket
+ * divided by 2 to divide the hash space in 2
+ */
+ if (sw_r32(RTL930X_L2_CTRL) & BIT(1))
+ h |= (k1 + 2048) << 16;
+ else
+ h |= (k0 + 2048) << 16;
+
+ return h;
+}
+
+/*
+ * Fills an L2 entry structure from the SoC registers
+ */
+static void rtl930x_fill_l2_entry(u32 r[], struct rtl838x_l2_entry *e)
+{
+ pr_debug("In %s valid?\n", __func__);
+ e->valid = !!(r[2] & BIT(31));
+ if (!e->valid)
+ return;
+
+ pr_debug("In %s is valid\n", __func__);
+ e->is_ip_mc = false;
+ e->is_ipv6_mc = false;
+
+ // TODO: Is there not a function to copy directly MAC memory?
+ e->mac[0] = (r[0] >> 24);
+ e->mac[1] = (r[0] >> 16);
+ e->mac[2] = (r[0] >> 8);
+ e->mac[3] = r[0];
+ e->mac[4] = (r[1] >> 24);
+ e->mac[5] = (r[1] >> 16);
+
+ e->next_hop = !!(r[2] & BIT(12));
+ e->rvid = r[1] & 0xfff;
+
+ /* Is it a unicast entry? check multicast bit */
+ if (!(e->mac[0] & 1)) {
+ e->type = L2_UNICAST;
+ e->is_static = !!(r[2] & BIT(14));
+ e->port = (r[2] >> 20) & 0x3ff;
+ // Check for trunk port
+ if (r[2] & BIT(30)) {
+ e->is_trunk = true;
+ e->stack_dev = (e->port >> 9) & 1;
+ e->trunk = e->port & 0x3f;
+ } else {
+ e->is_trunk = false;
+ e->stack_dev = (e->port >> 6) & 0xf;
+ e->port = e->port & 0x3f;
+ }
+
+ e->block_da = !!(r[2] & BIT(15));
+ e->block_sa = !!(r[2] & BIT(16));
+ e->suspended = !!(r[2] & BIT(13));
+ e->age = (r[2] >> 17) & 3;
+ e->valid = true;
+ // the UC_VID field in hardware is used for the VID or for the route id
+ if (e->next_hop) {
+ e->nh_route_id = r[2] & 0x7ff;
+ e->vid = 0;
+ } else {
+ e->vid = r[2] & 0xfff;
+ e->nh_route_id = 0;
+ }
+ } else {
+ e->valid = true;
+ e->type = L2_MULTICAST;
+ e->mc_portmask_index = (r[2] >> 16) & 0x3ff;
+ }
+}
+
+/*
+ * Fills the 3 SoC table registers r[] with the information of in the rtl838x_l2_entry
+ */
+static void rtl930x_fill_l2_row(u32 r[], struct rtl838x_l2_entry *e)
+{
+ u32 port;
+
+ if (!e->valid) {
+ r[0] = r[1] = r[2] = 0;
+ return;
+ }
+
+ r[2] = BIT(31); // Set valid bit
+
+ r[0] = ((u32)e->mac[0]) << 24 | ((u32)e->mac[1]) << 16
+ | ((u32)e->mac[2]) << 8 | ((u32)e->mac[3]);
+ r[1] = ((u32)e->mac[4]) << 24 | ((u32)e->mac[5]) << 16;
+
+ r[2] |= e->next_hop ? BIT(12) : 0;
+
+ if (e->type == L2_UNICAST) {
+ r[2] |= e->is_static ? BIT(14) : 0;
+ r[1] |= e->rvid & 0xfff;
+ r[2] |= (e->port & 0x3ff) << 20;
+ if (e->is_trunk) {
+ r[2] |= BIT(30);
+ port = e->stack_dev << 9 | (e->port & 0x3f);
+ } else {
+ port = (e->stack_dev & 0xf) << 6;
+ port |= e->port & 0x3f;
+ }
+ r[2] |= port << 20;
+ r[2] |= e->block_da ? BIT(15) : 0;
+ r[2] |= e->block_sa ? BIT(17) : 0;
+ r[2] |= e->suspended ? BIT(13) : 0;
+ r[2] |= (e->age & 0x3) << 17;
+ // the UC_VID field in hardware is used for the VID or for the route id
+ if (e->next_hop)
+ r[2] |= e->nh_route_id & 0x7ff;
+ else
+ r[2] |= e->vid & 0xfff;
+ } else { // L2_MULTICAST
+ r[2] |= (e->mc_portmask_index & 0x3ff) << 16;
+ r[2] |= e->mc_mac_index & 0x7ff;
+ }
+}
+
+/*
+ * Read an L2 UC or MC entry out of a hash bucket of the L2 forwarding table
+ * hash is the id of the bucket and pos is the position of the entry in that bucket
+ * The data read from the SoC is filled into rtl838x_l2_entry
+ */
+static u64 rtl930x_read_l2_entry_using_hash(u32 hash, u32 pos, struct rtl838x_l2_entry *e)
+{
+ u32 r[3];
+ struct table_reg *q = rtl_table_get(RTL9300_TBL_L2, 0);
+ u32 idx;
+ int i;
+ u64 mac;
+ u64 seed;
+
+ pr_debug("%s: hash %08x, pos: %d\n", __func__, hash, pos);
+
+ /* On the RTL93xx, 2 different hash algorithms are used making it a total of
+ * 8 buckets that need to be searched, 4 for each hash-half
+ * Use second hash space when bucket is between 4 and 8 */
+ if (pos >= 4) {
+ pos -= 4;
+ hash >>= 16;
+ } else {
+ hash &= 0xffff;
+ }
+
+ idx = (0 << 14) | (hash << 2) | pos; // Search SRAM, with hash and at pos in bucket
+ pr_debug("%s: NOW hash %08x, pos: %d\n", __func__, hash, pos);
+
+ rtl_table_read(q, idx);
+ for (i = 0; i < 3; i++)
+ r[i] = sw_r32(rtl_table_data(q, i));
+
+ rtl_table_release(q);
+
+ rtl930x_fill_l2_entry(r, e);
+
+ pr_debug("%s: valid: %d, nh: %d\n", __func__, e->valid, e->next_hop);
+ if (!e->valid)
+ return 0;
+
+ mac = ((u64)e->mac[0]) << 40 | ((u64)e->mac[1]) << 32 | ((u64)e->mac[2]) << 24
+ | ((u64)e->mac[3]) << 16 | ((u64)e->mac[4]) << 8 | ((u64)e->mac[5]);
+
+ seed = rtl930x_l2_hash_seed(mac, e->rvid);
+ pr_debug("%s: mac %016llx, seed %016llx\n", __func__, mac, seed);
+ // return vid with concatenated mac as unique id
+ return seed;
+}
+
+static void rtl930x_write_l2_entry_using_hash(u32 hash, u32 pos, struct rtl838x_l2_entry *e)
+{
+ u32 r[3];
+ struct table_reg *q = rtl_table_get(RTL9300_TBL_L2, 0);
+ u32 idx = (0 << 14) | (hash << 2) | pos; // Access SRAM, with hash and at pos in bucket
+ int i;
+
+ pr_debug("%s: hash %d, pos %d\n", __func__, hash, pos);
+ pr_debug("%s: index %d -> mac %02x:%02x:%02x:%02x:%02x:%02x\n", __func__, idx,
+ e->mac[0], e->mac[1], e->mac[2], e->mac[3],e->mac[4],e->mac[5]);
+
+ rtl930x_fill_l2_row(r, e);
+
+ for (i= 0; i < 3; i++)
+ sw_w32(r[i], rtl_table_data(q, i));
+
+ rtl_table_write(q, idx);
+ rtl_table_release(q);
+}
+
+static u64 rtl930x_read_cam(int idx, struct rtl838x_l2_entry *e)
+{
+ u32 r[3];
+ struct table_reg *q = rtl_table_get(RTL9300_TBL_L2, 1);
+ int i;
+
+ rtl_table_read(q, idx);
+ for (i= 0; i < 3; i++)
+ r[i] = sw_r32(rtl_table_data(q, i));
+
+ rtl_table_release(q);
+
+ rtl930x_fill_l2_entry(r, e);
+ if (!e->valid)
+ return 0;
+
+ // return mac with concatenated vid as unique id
+ return ((u64)r[0] << 28) | ((r[1] & 0xffff0000) >> 4) | e->vid;
+}
+
+static void rtl930x_write_cam(int idx, struct rtl838x_l2_entry *e)
+{
+ u32 r[3];
+ struct table_reg *q = rtl_table_get(RTL9300_TBL_L2, 1); // Access L2 Table 1
+ int i;
+
+ rtl930x_fill_l2_row(r, e);
+
+ for (i= 0; i < 3; i++)
+ sw_w32(r[i], rtl_table_data(q, i));
+
+ rtl_table_write(q, idx);
+ rtl_table_release(q);
+}
+
+static u64 rtl930x_read_mcast_pmask(int idx)
+{
+ u32 portmask;
+ // Read MC_PORTMASK (2) via register RTL9300_TBL_L2
+ struct table_reg *q = rtl_table_get(RTL9300_TBL_L2, 2);
+
+ rtl_table_read(q, idx);
+ portmask = sw_r32(rtl_table_data(q, 0));
+ portmask >>= 3;
+ rtl_table_release(q);
+
+ pr_debug("%s: Index idx %d has portmask %08x\n", __func__, idx, portmask);
+ return portmask;
+}
+
+static void rtl930x_write_mcast_pmask(int idx, u64 portmask)
+{
+ u32 pm = portmask;
+
+ // Access MC_PORTMASK (2) via register RTL9300_TBL_L2
+ struct table_reg *q = rtl_table_get(RTL9300_TBL_L2, 2);
+
+ pr_debug("%s: Index idx %d has portmask %08x\n", __func__, idx, pm);
+ pm <<= 3;
+ sw_w32(pm, rtl_table_data(q, 0));
+ rtl_table_write(q, idx);
+ rtl_table_release(q);
+}
+
+u64 rtl930x_traffic_get(int source)
+{
+ u32 v;
+ struct table_reg *r = rtl_table_get(RTL9300_TBL_0, 6);
+
+ rtl_table_read(r, source);
+ v = sw_r32(rtl_table_data(r, 0));
+ rtl_table_release(r);
+ return v >> 3;
+}
+
+/*
+ * Enable traffic between a source port and a destination port matrix
+ */
+void rtl930x_traffic_set(int source, u64 dest_matrix)
+{
+ struct table_reg *r = rtl_table_get(RTL9300_TBL_0, 6);
+
+ sw_w32((dest_matrix << 3), rtl_table_data(r, 0));
+ rtl_table_write(r, source);
+ rtl_table_release(r);
+}
+
+void rtl930x_traffic_enable(int source, int dest)
+{
+ struct table_reg *r = rtl_table_get(RTL9300_TBL_0, 6);
+ rtl_table_read(r, source);
+ sw_w32_mask(0, BIT(dest + 3), rtl_table_data(r, 0));
+ rtl_table_write(r, source);
+ rtl_table_release(r);
+}
+
+void rtl930x_traffic_disable(int source, int dest)
+{
+ struct table_reg *r = rtl_table_get(RTL9300_TBL_0, 6);
+ rtl_table_read(r, source);
+ sw_w32_mask(BIT(dest + 3), 0, rtl_table_data(r, 0));
+ rtl_table_write(r, source);
+ rtl_table_release(r);
+}
+
+void rtl9300_dump_debug(void)
+{
+ int i;
+ u16 r = RTL930X_STAT_PRVTE_DROP_COUNTER0;
+
+ for (i = 0; i < 10; i ++) {
+ pr_info("# %d %08x %08x %08x %08x %08x %08x %08x %08x\n", i * 8,
+ sw_r32(r), sw_r32(r + 4), sw_r32(r + 8), sw_r32(r + 12),
+ sw_r32(r + 16), sw_r32(r + 20), sw_r32(r + 24), sw_r32(r + 28));
+ r += 32;
+ }
+ pr_info("# %08x %08x %08x %08x %08x\n",
+ sw_r32(r), sw_r32(r + 4), sw_r32(r + 8), sw_r32(r + 12), sw_r32(r + 16));
+ rtl930x_print_matrix();
+ pr_info("RTL930X_L2_PORT_SABLK_CTRL: %08x, RTL930X_L2_PORT_DABLK_CTRL %08x\n",
+ sw_r32(RTL930X_L2_PORT_SABLK_CTRL), sw_r32(RTL930X_L2_PORT_DABLK_CTRL)
+
+ );
+}
+
+irqreturn_t rtl930x_switch_irq(int irq, void *dev_id)
+{
+ struct dsa_switch *ds = dev_id;
+ u32 ports = sw_r32(RTL930X_ISR_PORT_LINK_STS_CHG);
+ u32 link;
+ int i;
+
+ /* Clear status */
+ sw_w32(ports, RTL930X_ISR_PORT_LINK_STS_CHG);
+
+ for (i = 0; i < 28; i++) {
+ if (ports & BIT(i)) {
+ /* Read the register twice because of issues with latency at least
+ * with the external RTL8226 PHY on the XGS1210 */
+ link = sw_r32(RTL930X_MAC_LINK_STS);
+ link = sw_r32(RTL930X_MAC_LINK_STS);
+ if (link & BIT(i))
+ dsa_port_phylink_mac_change(ds, i, true);
+ else
+ dsa_port_phylink_mac_change(ds, i, false);
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+
+int rtl930x_write_phy(u32 port, u32 page, u32 reg, u32 val)
+{
+ u32 v;
+ int err = 0;
+
+ pr_debug("%s: port %d, page: %d, reg: %x, val: %x\n", __func__, port, page, reg, val);
+
+ if (port > 63 || page > 4095 || reg > 31)
+ return -ENOTSUPP;
+
+ val &= 0xffff;
+ mutex_lock(&smi_lock);
+
+ sw_w32(BIT(port), RTL930X_SMI_ACCESS_PHY_CTRL_0);
+ sw_w32_mask(0xffff << 16, val << 16, RTL930X_SMI_ACCESS_PHY_CTRL_2);
+ v = reg << 20 | page << 3 | 0x1f << 15 | BIT(2) | BIT(0);
+ sw_w32(v, RTL930X_SMI_ACCESS_PHY_CTRL_1);
+
+ do {
+ v = sw_r32(RTL930X_SMI_ACCESS_PHY_CTRL_1);
+ } while (v & 0x1);
+
+ if (v & 0x2)
+ err = -EIO;
+
+ mutex_unlock(&smi_lock);
+
+ return err;
+}
+
+int rtl930x_read_phy(u32 port, u32 page, u32 reg, u32 *val)
+{
+ u32 v;
+ int err = 0;
+
+ if (port > 63 || page > 4095 || reg > 31)
+ return -ENOTSUPP;
+
+ mutex_lock(&smi_lock);
+
+ sw_w32_mask(0xffff << 16, port << 16, RTL930X_SMI_ACCESS_PHY_CTRL_2);
+ v = reg << 20 | page << 3 | 0x1f << 15 | 1;
+ sw_w32(v, RTL930X_SMI_ACCESS_PHY_CTRL_1);
+
+ do {
+ v = sw_r32(RTL930X_SMI_ACCESS_PHY_CTRL_1);
+ } while ( v & 0x1);
+
+ if (v & BIT(25)) {
+ pr_debug("Error reading phy %d, register %d\n", port, reg);
+ err = -EIO;
+ }
+ *val = (sw_r32(RTL930X_SMI_ACCESS_PHY_CTRL_2) & 0xffff);
+
+ pr_debug("%s: port %d, page: %d, reg: %x, val: %x\n", __func__, port, page, reg, *val);
+
+ mutex_unlock(&smi_lock);
+
+ return err;
+}
+
+/*
+ * Write to an mmd register of the PHY
+ */
+int rtl930x_write_mmd_phy(u32 port, u32 devnum, u32 regnum, u32 val)
+{
+ int err = 0;
+ u32 v;
+
+ mutex_lock(&smi_lock);
+
+ // Set PHY to access
+ sw_w32(BIT(port), RTL930X_SMI_ACCESS_PHY_CTRL_0);
+
+ // Set data to write
+ sw_w32_mask(0xffff << 16, val << 16, RTL930X_SMI_ACCESS_PHY_CTRL_2);
+
+ // Set MMD device number and register to write to
+ sw_w32(devnum << 16 | (regnum & 0xffff), RTL930X_SMI_ACCESS_PHY_CTRL_3);
+
+ v = BIT(2) | BIT(1) | BIT(0); // WRITE | MMD-access | EXEC
+ sw_w32(v, RTL930X_SMI_ACCESS_PHY_CTRL_1);
+
+ do {
+ v = sw_r32(RTL930X_SMI_ACCESS_PHY_CTRL_1);
+ } while (v & BIT(0));
+
+ pr_debug("%s: port %d, regnum: %x, val: %x (err %d)\n", __func__, port, regnum, val, err);
+ mutex_unlock(&smi_lock);
+ return err;
+}
+
+/*
+ * Read an mmd register of the PHY
+ */
+int rtl930x_read_mmd_phy(u32 port, u32 devnum, u32 regnum, u32 *val)
+{
+ int err = 0;
+ u32 v;
+
+ mutex_lock(&smi_lock);
+
+ // Set PHY to access
+ sw_w32_mask(0xffff << 16, port << 16, RTL930X_SMI_ACCESS_PHY_CTRL_2);
+
+ // Set MMD device number and register to write to
+ sw_w32(devnum << 16 | (regnum & 0xffff), RTL930X_SMI_ACCESS_PHY_CTRL_3);
+
+ v = BIT(1) | BIT(0); // MMD-access | EXEC
+ sw_w32(v, RTL930X_SMI_ACCESS_PHY_CTRL_1);
+
+ do {
+ v = sw_r32(RTL930X_SMI_ACCESS_PHY_CTRL_1);
+ } while (v & BIT(0));
+ // There is no error-checking via BIT 25 of v, as it does not seem to be set correctly
+ *val = (sw_r32(RTL930X_SMI_ACCESS_PHY_CTRL_2) & 0xffff);
+ pr_debug("%s: port %d, regnum: %x, val: %x (err %d)\n", __func__, port, regnum, *val, err);
+
+ mutex_unlock(&smi_lock);
+
+ return err;
+}
+
+/*
+ * Calculate both the block 0 and the block 1 hash, and return in
+ * lower and higher word of the return value since only 12 bit of
+ * the hash are significant
+ */
+u32 rtl930x_hash(struct rtl838x_switch_priv *priv, u64 seed)
+{
+ u32 k0, k1, h1, h2, h;
+
+ k0 = (u32) (((seed >> 55) & 0x1f) ^ ((seed >> 44) & 0x7ff)
+ ^ ((seed >> 33) & 0x7ff) ^ ((seed >> 22) & 0x7ff)
+ ^ ((seed >> 11) & 0x7ff) ^ (seed & 0x7ff));
+
+ h1 = (seed >> 11) & 0x7ff;
+ h1 = ((h1 & 0x1f) << 6) | ((h1 >> 5) & 0x3f);
+
+ h2 = (seed >> 33) & 0x7ff;
+ h2 = ((h2 & 0x3f) << 5)| ((h2 >> 6) & 0x3f);
+
+ k1 = (u32) (((seed << 55) & 0x1f) ^ ((seed >> 44) & 0x7ff) ^ h2
+ ^ ((seed >> 22) & 0x7ff) ^ h1
+ ^ (seed & 0x7ff));
+
+ // Algorithm choice for block 0
+ if (sw_r32(RTL930X_L2_CTRL) & BIT(0))
+ h = k1;
+ else
+ h = k0;
+
+ /* Algorithm choice for block 1
+ * Since k0 and k1 are < 2048, adding 2048 will offset the hash into the second
+ * half of hash-space
+ * 2048 is in fact the hash-table size 16384 divided by 4 hashes per bucket
+ * divided by 2 to divide the hash space in 2
+ */
+ if (sw_r32(RTL930X_L2_CTRL) & BIT(1))
+ h |= (k1 + 2048) << 16;
+ else
+ h |= (k0 + 2048) << 16;
+
+ return h;
+}
+
+/*
+ * Enables or disables the EEE/EEEP capability of a port
+ */
+void rtl930x_port_eee_set(struct rtl838x_switch_priv *priv, int port, bool enable)
+{
+ u32 v;
+
+ // This works only for Ethernet ports, and on the RTL930X, ports from 26 are SFP
+ if (port >= 26)
+ return;
+
+ pr_debug("In %s: setting port %d to %d\n", __func__, port, enable);
+ v = enable ? 0x3f : 0x0;
+
+ // Set EEE/EEEP state for 100, 500, 1000MBit and 2.5, 5 and 10GBit
+ sw_w32_mask(0, v << 10, rtl930x_mac_force_mode_ctrl(port));
+
+ // Set TX/RX EEE state
+ v = enable ? 0x3 : 0x0;
+ sw_w32(v, RTL930X_EEE_CTRL(port));
+
+ priv->ports[port].eee_enabled = enable;
+}
+
+/*
+ * Get EEE own capabilities and negotiation result
+ */
+int rtl930x_eee_port_ability(struct rtl838x_switch_priv *priv, struct ethtool_eee *e, int port)
+{
+ u32 link, a;
+
+ if (port >= 26)
+ return -ENOTSUPP;
+
+ pr_info("In %s, port %d\n", __func__, port);
+ link = sw_r32(RTL930X_MAC_LINK_STS);
+ link = sw_r32(RTL930X_MAC_LINK_STS);
+ if (!(link & BIT(port)))
+ return 0;
+
+ pr_info("Setting advertised\n");
+ if (sw_r32(rtl930x_mac_force_mode_ctrl(port)) & BIT(10))
+ e->advertised |= ADVERTISED_100baseT_Full;
+
+ if (sw_r32(rtl930x_mac_force_mode_ctrl(port)) & BIT(12))
+ e->advertised |= ADVERTISED_1000baseT_Full;
+
+ if (priv->ports[port].is2G5 && sw_r32(rtl930x_mac_force_mode_ctrl(port)) & BIT(13)) {
+ pr_info("ADVERTISING 2.5G EEE\n");
+ e->advertised |= ADVERTISED_2500baseX_Full;
+ }
+
+ if (priv->ports[port].is10G && sw_r32(rtl930x_mac_force_mode_ctrl(port)) & BIT(15))
+ e->advertised |= ADVERTISED_10000baseT_Full;
+
+ a = sw_r32(RTL930X_MAC_EEE_ABLTY);
+ a = sw_r32(RTL930X_MAC_EEE_ABLTY);
+ pr_info("Link partner: %08x\n", a);
+ if (a & BIT(port)) {
+ e->lp_advertised = ADVERTISED_100baseT_Full;
+ e->lp_advertised |= ADVERTISED_1000baseT_Full;
+ if (priv->ports[port].is2G5)
+ e->lp_advertised |= ADVERTISED_2500baseX_Full;
+ if (priv->ports[port].is10G)
+ e->lp_advertised |= ADVERTISED_10000baseT_Full;
+ }
+
+ // Read 2x to clear latched state
+ a = sw_r32(RTL930X_EEEP_PORT_CTRL(port));
+ a = sw_r32(RTL930X_EEEP_PORT_CTRL(port));
+ pr_info("%s RTL930X_EEEP_PORT_CTRL: %08x\n", __func__, a);
+
+ return 0;
+}
+
+static void rtl930x_init_eee(struct rtl838x_switch_priv *priv, bool enable)
+{
+ int i;
+
+ pr_info("Setting up EEE, state: %d\n", enable);
+
+ // Setup EEE on all ports
+ for (i = 0; i < priv->cpu_port; i++) {
+ if (priv->ports[i].phy)
+ rtl930x_port_eee_set(priv, i, enable);
+ }
+
+ priv->eee_enabled = enable;
+}
+#define HASH_PICK(val, lsb, len) ((val & (((1 << len) - 1) << lsb)) >> lsb)
+
+static u32 rtl930x_l3_hash4(u32 ip, int algorithm, bool move_dip)
+{
+ u32 rows[4];
+ u32 hash;
+ u32 s0, s1, pH;
+
+ memset(rows, 0, sizeof(rows));
+
+ rows[0] = HASH_PICK(ip, 27, 5);
+ rows[1] = HASH_PICK(ip, 18, 9);
+ rows[2] = HASH_PICK(ip, 9, 9);
+
+ if (!move_dip)
+ rows[3] = HASH_PICK(ip, 0, 9);
+
+ if (!algorithm) {
+ hash = rows[0] ^ rows[1] ^ rows[2] ^ rows[3];
+ } else {
+ s0 = rows[0] + rows[1] + rows[2];
+ s1 = (s0 & 0x1ff) + ((s0 & (0x1ff << 9)) >> 9);
+ pH = (s1 & 0x1ff) + ((s1 & (0x1ff << 9)) >> 9);
+ hash = pH ^ rows[3];
+ }
+ return hash;
+}
+
+static u32 rtl930x_l3_hash6(struct in6_addr *ip6, int algorithm, bool move_dip)
+{
+ u32 rows[16];
+ u32 hash;
+ u32 s0, s1, pH;
+
+ rows[0] = (HASH_PICK(ip6->s6_addr[0], 6, 2) << 0);
+ rows[1] = (HASH_PICK(ip6->s6_addr[0], 0, 6) << 3) | HASH_PICK(ip6->s6_addr[1], 5, 3);
+ rows[2] = (HASH_PICK(ip6->s6_addr[1], 0, 5) << 4) | HASH_PICK(ip6->s6_addr[2], 4, 4);
+ rows[3] = (HASH_PICK(ip6->s6_addr[2], 0, 4) << 5) | HASH_PICK(ip6->s6_addr[3], 3, 5);
+ rows[4] = (HASH_PICK(ip6->s6_addr[3], 0, 3) << 6) | HASH_PICK(ip6->s6_addr[4], 2, 6);
+ rows[5] = (HASH_PICK(ip6->s6_addr[4], 0, 2) << 7) | HASH_PICK(ip6->s6_addr[5], 1, 7);
+ rows[6] = (HASH_PICK(ip6->s6_addr[5], 0, 1) << 8) | HASH_PICK(ip6->s6_addr[6], 0, 8);
+ rows[7] = (HASH_PICK(ip6->s6_addr[7], 0, 8) << 1) | HASH_PICK(ip6->s6_addr[8], 7, 1);
+ rows[8] = (HASH_PICK(ip6->s6_addr[8], 0, 7) << 2) | HASH_PICK(ip6->s6_addr[9], 6, 2);
+ rows[9] = (HASH_PICK(ip6->s6_addr[9], 0, 6) << 3) | HASH_PICK(ip6->s6_addr[10], 5, 3);
+ rows[10] = (HASH_PICK(ip6->s6_addr[10], 0, 5) << 4) | HASH_PICK(ip6->s6_addr[11], 4, 4);
+ if (!algorithm) {
+ rows[11] = (HASH_PICK(ip6->s6_addr[11], 0, 4) << 5)
+ | (HASH_PICK(ip6->s6_addr[12], 3, 5) << 0);
+ rows[12] = (HASH_PICK(ip6->s6_addr[12], 0, 3) << 6)
+ | (HASH_PICK(ip6->s6_addr[13], 2, 6) << 0);
+ rows[13] = (HASH_PICK(ip6->s6_addr[13], 0, 2) << 7)
+ | (HASH_PICK(ip6->s6_addr[14], 1, 7) << 0);
+ if (!move_dip) {
+ rows[14] = (HASH_PICK(ip6->s6_addr[14], 0, 1) << 8)
+ | (HASH_PICK(ip6->s6_addr[15], 0, 8) << 0);
+ }
+ hash = rows[0] ^ rows[1] ^ rows[2] ^ rows[3] ^ rows[4] ^ rows[5] ^ rows[6]
+ ^ rows[7] ^ rows[8] ^ rows[9] ^ rows[10] ^ rows[11] ^ rows[12]
+ ^ rows[13] ^ rows[14];
+ } else {
+ rows[11] = (HASH_PICK(ip6->s6_addr[11], 0, 4) << 5);
+ rows[12] = (HASH_PICK(ip6->s6_addr[12], 3, 5) << 0);
+ rows[13] = (HASH_PICK(ip6->s6_addr[12], 0, 3) << 6)
+ | HASH_PICK(ip6->s6_addr[13], 2, 6);
+ rows[14] = (HASH_PICK(ip6->s6_addr[13], 0, 2) << 7)
+ | HASH_PICK(ip6->s6_addr[14], 1, 7);
+ if (!move_dip) {
+ rows[15] = (HASH_PICK(ip6->s6_addr[14], 0, 1) << 8)
+ | (HASH_PICK(ip6->s6_addr[15], 0, 8) << 0);
+ }
+ s0 = rows[12] + rows[13] + rows[14];
+ s1 = (s0 & 0x1ff) + ((s0 & (0x1ff << 9)) >> 9);
+ pH = (s1 & 0x1ff) + ((s1 & (0x1ff << 9)) >> 9);
+ hash = rows[0] ^ rows[1] ^ rows[2] ^ rows[3] ^ rows[4] ^ rows[5] ^ rows[6]
+ ^ rows[7] ^ rows[8] ^ rows[9] ^ rows[10] ^ rows[11] ^ pH ^ rows[15];
+ }
+ return hash;
+}
+
+/*
+ * Read a prefix route entry from the L3_PREFIX_ROUTE_IPUC table
+ * We currently only support IPv4 and IPv6 unicast route
+ */
+static void rtl930x_route_read(int idx, struct rtl83xx_route *rt)
+{
+ u32 v, ip4_m;
+ bool host_route, default_route;
+ struct in6_addr ip6_m;
+
+ // Read L3_PREFIX_ROUTE_IPUC table (2) via register RTL9300_TBL_1
+ struct table_reg *r = rtl_table_get(RTL9300_TBL_1, 2);
+
+ rtl_table_read(r, idx);
+ // The table has a size of 11 registers
+ rt->attr.valid = !!(sw_r32(rtl_table_data(r, 0)) & BIT(31));
+ if (!rt->attr.valid)
+ goto out;
+
+ rt->attr.type = (sw_r32(rtl_table_data(r, 0)) >> 29) & 0x3;
+
+ v = sw_r32(rtl_table_data(r, 10));
+ host_route = !!(v & BIT(21));
+ default_route = !!(v & BIT(20));
+ rt->prefix_len = -1;
+ pr_info("%s: host route %d, default_route %d\n", __func__, host_route, default_route);
+
+ switch (rt->attr.type) {
+ case 0: // IPv4 Unicast route
+ rt->dst_ip = sw_r32(rtl_table_data(r, 4));
+ ip4_m = sw_r32(rtl_table_data(r, 9));
+ pr_info("%s: Read ip4 mask: %08x\n", __func__, ip4_m);
+ rt->prefix_len = host_route ? 32 : -1;
+ rt->prefix_len = (rt->prefix_len < 0 && default_route) ? 0 : -1;
+ if (rt->prefix_len < 0)
+ rt->prefix_len = inet_mask_len(ip4_m);
+ break;
+ case 2: // IPv6 Unicast route
+ ipv6_addr_set(&rt->dst_ip6,
+ sw_r32(rtl_table_data(r, 1)), sw_r32(rtl_table_data(r, 2)),
+ sw_r32(rtl_table_data(r, 3)), sw_r32(rtl_table_data(r, 4)));
+ ipv6_addr_set(&ip6_m,
+ sw_r32(rtl_table_data(r, 6)), sw_r32(rtl_table_data(r, 7)),
+ sw_r32(rtl_table_data(r, 8)), sw_r32(rtl_table_data(r, 9)));
+ rt->prefix_len = host_route ? 128 : 0;
+ rt->prefix_len = (rt->prefix_len < 0 && default_route) ? 0 : -1;
+ if (rt->prefix_len < 0)
+ rt->prefix_len = find_last_bit((unsigned long int *)&ip6_m.s6_addr32,
+ 128);
+ break;
+ case 1: // IPv4 Multicast route
+ case 3: // IPv6 Multicast route
+ pr_warn("%s: route type not supported\n", __func__);
+ goto out;
+ }
+
+ rt->attr.hit = !!(v & BIT(22));
+ rt->attr.action = (v >> 18) & 3;
+ rt->nh.id = (v >> 7) & 0x7ff;
+ rt->attr.ttl_dec = !!(v & BIT(6));
+ rt->attr.ttl_check = !!(v & BIT(5));
+ rt->attr.dst_null = !!(v & BIT(4));
+ rt->attr.qos_as = !!(v & BIT(3));
+ rt->attr.qos_prio = v & 0x7;
+ pr_info("%s: index %d is valid: %d\n", __func__, idx, rt->attr.valid);
+ pr_info("%s: next_hop: %d, hit: %d, action :%d, ttl_dec %d, ttl_check %d, dst_null %d\n",
+ __func__, rt->nh.id, rt->attr.hit, rt->attr.action,
+ rt->attr.ttl_dec, rt->attr.ttl_check, rt->attr.dst_null);
+ pr_info("%s: GW: %pI4, prefix_len: %d\n", __func__, &rt->dst_ip, rt->prefix_len);
+out:
+ rtl_table_release(r);
+}
+
+static void rtl930x_net6_mask(int prefix_len, struct in6_addr *ip6_m)
+{
+ int o, b;
+ // Define network mask
+ o = prefix_len >> 3;
+ b = prefix_len & 0x7;
+ memset(ip6_m->s6_addr, 0xff, o);
+ ip6_m->s6_addr[o] |= b ? 0xff00 >> b : 0x00;
+}
+
+/*
+ * Read a host route entry from the table using its index
+ * We currently only support IPv4 and IPv6 unicast route
+ */
+static void rtl930x_host_route_read(int idx, struct rtl83xx_route *rt)
+{
+ u32 v;
+ // Read L3_HOST_ROUTE_IPUC table (1) via register RTL9300_TBL_1
+ struct table_reg *r = rtl_table_get(RTL9300_TBL_1, 1);
+
+ idx = ((idx / 6) * 8) + (idx % 6);
+
+ pr_debug("In %s, physical index %d\n", __func__, idx);
+ rtl_table_read(r, idx);
+ // The table has a size of 5 (for UC, 11 for MC) registers
+ v = sw_r32(rtl_table_data(r, 0));
+ rt->attr.valid = !!(v & BIT(31));
+ if (!rt->attr.valid)
+ goto out;
+ rt->attr.type = (v >> 29) & 0x3;
+ switch (rt->attr.type) {
+ case 0: // IPv4 Unicast route
+ rt->dst_ip = sw_r32(rtl_table_data(r, 4));
+ break;
+ case 2: // IPv6 Unicast route
+ ipv6_addr_set(&rt->dst_ip6,
+ sw_r32(rtl_table_data(r, 3)), sw_r32(rtl_table_data(r, 2)),
+ sw_r32(rtl_table_data(r, 1)), sw_r32(rtl_table_data(r, 0)));
+ break;
+ case 1: // IPv4 Multicast route
+ case 3: // IPv6 Multicast route
+ pr_warn("%s: route type not supported\n", __func__);
+ goto out;
+ }
+
+ rt->attr.hit = !!(v & BIT(20));
+ rt->attr.dst_null = !!(v & BIT(19));
+ rt->attr.action = (v >> 17) & 3;
+ rt->nh.id = (v >> 6) & 0x7ff;
+ rt->attr.ttl_dec = !!(v & BIT(5));
+ rt->attr.ttl_check = !!(v & BIT(4));
+ rt->attr.qos_as = !!(v & BIT(3));
+ rt->attr.qos_prio = v & 0x7;
+ pr_debug("%s: index %d is valid: %d\n", __func__, idx, rt->attr.valid);
+ pr_debug("%s: next_hop: %d, hit: %d, action :%d, ttl_dec %d, ttl_check %d, dst_null %d\n",
+ __func__, rt->nh.id, rt->attr.hit, rt->attr.action, rt->attr.ttl_dec, rt->attr.ttl_check,
+ rt->attr.dst_null);
+ pr_debug("%s: Destination: %pI4\n", __func__, &rt->dst_ip);
+
+out:
+ rtl_table_release(r);
+}
+
+/*
+ * Write a host route entry from the table using its index
+ * We currently only support IPv4 and IPv6 unicast route
+ */
+static void rtl930x_host_route_write(int idx, struct rtl83xx_route *rt)
+{
+ u32 v;
+ // Access L3_HOST_ROUTE_IPUC table (1) via register RTL9300_TBL_1
+ struct table_reg *r = rtl_table_get(RTL9300_TBL_1, 1);
+ // The table has a size of 5 (for UC, 11 for MC) registers
+
+ idx = ((idx / 6) * 8) + (idx % 6);
+
+ pr_debug("%s: index %d is valid: %d\n", __func__, idx, rt->attr.valid);
+ pr_debug("%s: next_hop: %d, hit: %d, action :%d, ttl_dec %d, ttl_check %d, dst_null %d\n",
+ __func__, rt->nh.id, rt->attr.hit, rt->attr.action, rt->attr.ttl_dec, rt->attr.ttl_check,
+ rt->attr.dst_null);
+ pr_debug("%s: GW: %pI4, prefix_len: %d\n", __func__, &rt->dst_ip, rt->prefix_len);
+
+ v = BIT(31); // Entry is valid
+ v |= (rt->attr.type & 0x3) << 29;
+ v |= rt->attr.hit ? BIT(20) : 0;
+ v |= rt->attr.dst_null ? BIT(19) : 0;
+ v |= (rt->attr.action & 0x3) << 17;
+ v |= (rt->nh.id & 0x7ff) << 6;
+ v |= rt->attr.ttl_dec ? BIT(5) : 0;
+ v |= rt->attr.ttl_check ? BIT(4) : 0;
+ v |= rt->attr.qos_as ? BIT(3) : 0;
+ v |= rt->attr.qos_prio & 0x7;
+
+ sw_w32(v, rtl_table_data(r, 0));
+ switch (rt->attr.type) {
+ case 0: // IPv4 Unicast route
+ sw_w32(0, rtl_table_data(r, 1));
+ sw_w32(0, rtl_table_data(r, 2));
+ sw_w32(0, rtl_table_data(r, 3));
+ sw_w32(rt->dst_ip, rtl_table_data(r, 4));
+ break;
+ case 2: // IPv6 Unicast route
+ sw_w32(rt->dst_ip6.s6_addr32[0], rtl_table_data(r, 1));
+ sw_w32(rt->dst_ip6.s6_addr32[1], rtl_table_data(r, 2));
+ sw_w32(rt->dst_ip6.s6_addr32[2], rtl_table_data(r, 3));
+ sw_w32(rt->dst_ip6.s6_addr32[3], rtl_table_data(r, 4));
+ break;
+ case 1: // IPv4 Multicast route
+ case 3: // IPv6 Multicast route
+ pr_warn("%s: route type not supported\n", __func__);
+ goto out;
+ }
+
+ rtl_table_write(r, idx);
+
+out:
+ rtl_table_release(r);
+}
+
+/*
+ * Look up the index of a prefix route in the routing table CAM for unicast IPv4/6 routes
+ * using hardware offload.
+ */
+static int rtl930x_route_lookup_hw(struct rtl83xx_route *rt)
+{
+ u32 ip4_m, v;
+ struct in6_addr ip6_m;
+ int i;
+
+ if (rt->attr.type == 1 || rt->attr.type == 3) // Hardware only supports UC routes
+ return -1;
+
+ sw_w32_mask(0x3 << 19, rt->attr.type, RTL930X_L3_HW_LU_KEY_CTRL);
+ if (rt->attr.type) { // IPv6
+ rtl930x_net6_mask(rt->prefix_len, &ip6_m);
+ for (i = 0; i < 4; i++)
+ sw_w32(rt->dst_ip6.s6_addr32[0] & ip6_m.s6_addr32[0],
+ RTL930X_L3_HW_LU_KEY_IP_CTRL + (i << 2));
+ } else { // IPv4
+ ip4_m = inet_make_mask(rt->prefix_len);
+ sw_w32(0, RTL930X_L3_HW_LU_KEY_IP_CTRL);
+ sw_w32(0, RTL930X_L3_HW_LU_KEY_IP_CTRL + 4);
+ sw_w32(0, RTL930X_L3_HW_LU_KEY_IP_CTRL + 8);
+ v = rt->dst_ip & ip4_m;
+ pr_info("%s: searching for %pI4\n", __func__, &v);
+ sw_w32(v, RTL930X_L3_HW_LU_KEY_IP_CTRL + 12);
+ }
+
+ // Execute CAM lookup in SoC
+ sw_w32(BIT(15), RTL930X_L3_HW_LU_CTRL);
+
+ // Wait until execute bit clears and result is ready
+ do {
+ v = sw_r32(RTL930X_L3_HW_LU_CTRL);
+ } while (v & BIT(15));
+
+ pr_info("%s: found: %d, index: %d\n", __func__, !!(v & BIT(14)), v & 0x1ff);
+
+ // Test if search successful (BIT 14 set)
+ if (v & BIT(14))
+ return v & 0x1ff;
+
+ return -1;
+}
+
+static int rtl930x_find_l3_slot(struct rtl83xx_route *rt, bool must_exist)
+{
+ int t, s, slot_width, algorithm, addr, idx;
+ u32 hash;
+ struct rtl83xx_route route_entry;
+
+ // IPv6 entries take up 3 slots
+ slot_width = (rt->attr.type == 0) || (rt->attr.type == 2) ? 1 : 3;
+
+ for (t = 0; t < 2; t++) {
+ algorithm = (sw_r32(RTL930X_L3_HOST_TBL_CTRL) >> (2 + t)) & 0x1;
+ hash = rtl930x_l3_hash4(rt->dst_ip, algorithm, false);
+
+ pr_debug("%s: table %d, algorithm %d, hash %04x\n", __func__, t, algorithm, hash);
+
+ for (s = 0; s < 6; s += slot_width) {
+ addr = (t << 12) | ((hash & 0x1ff) << 3) | s;
+ pr_debug("%s physical address %d\n", __func__, addr);
+ idx = ((addr / 8) * 6) + (addr % 8);
+ pr_debug("%s logical address %d\n", __func__, idx);
+
+ rtl930x_host_route_read(idx, &route_entry);
+ pr_debug("%s route valid %d, route dest: %pI4, hit %d\n", __func__,
+ rt->attr.valid, &rt->dst_ip, rt->attr.hit);
+ if (!must_exist && rt->attr.valid)
+ return idx;
+ if (must_exist && route_entry.dst_ip == rt->dst_ip)
+ return idx;
+ }
+ }
+
+ return -1;
+}
+
+/*
+ * Write a prefix route into the routing table CAM at position idx
+ * Currently only IPv4 and IPv6 unicast routes are supported
+ */
+static void rtl930x_route_write(int idx, struct rtl83xx_route *rt)
+{
+ u32 v, ip4_m;
+ struct in6_addr ip6_m;
+ // Access L3_PREFIX_ROUTE_IPUC table (2) via register RTL9300_TBL_1
+ // The table has a size of 11 registers (20 for MC)
+ struct table_reg *r = rtl_table_get(RTL9300_TBL_1, 2);
+
+ pr_debug("%s: index %d is valid: %d\n", __func__, idx, rt->attr.valid);
+ pr_debug("%s: nexthop: %d, hit: %d, action :%d, ttl_dec %d, ttl_check %d, dst_null %d\n",
+ __func__, rt->nh.id, rt->attr.hit, rt->attr.action,
+ rt->attr.ttl_dec, rt->attr.ttl_check, rt->attr.dst_null);
+ pr_debug("%s: GW: %pI4, prefix_len: %d\n", __func__, &rt->dst_ip, rt->prefix_len);
+
+ v = rt->attr.valid ? BIT(31) : 0;
+ v |= (rt->attr.type & 0x3) << 29;
+ sw_w32(v, rtl_table_data(r, 0));
+
+ v = rt->attr.hit ? BIT(22) : 0;
+ v |= (rt->attr.action & 0x3) << 18;
+ v |= (rt->nh.id & 0x7ff) << 7;
+ v |= rt->attr.ttl_dec ? BIT(6) : 0;
+ v |= rt->attr.ttl_check ? BIT(5) : 0;
+ v |= rt->attr.dst_null ? BIT(6) : 0;
+ v |= rt->attr.qos_as ? BIT(6) : 0;
+ v |= rt->attr.qos_prio & 0x7;
+ v |= rt->prefix_len == 0 ? BIT(20) : 0; // set default route bit
+
+ // set bit mask for entry type always to 0x3
+ sw_w32(0x3 << 29, rtl_table_data(r, 5));
+
+ switch (rt->attr.type) {
+ case 0: // IPv4 Unicast route
+ sw_w32(0, rtl_table_data(r, 1));
+ sw_w32(0, rtl_table_data(r, 2));
+ sw_w32(0, rtl_table_data(r, 3));
+ sw_w32(rt->dst_ip, rtl_table_data(r, 4));
+
+ v |= rt->prefix_len == 32 ? BIT(21) : 0; // set host-route bit
+ ip4_m = inet_make_mask(rt->prefix_len);
+ sw_w32(0, rtl_table_data(r, 6));
+ sw_w32(0, rtl_table_data(r, 7));
+ sw_w32(0, rtl_table_data(r, 8));
+ sw_w32(ip4_m, rtl_table_data(r, 9));
+ break;
+ case 2: // IPv6 Unicast route
+ sw_w32(rt->dst_ip6.s6_addr32[0], rtl_table_data(r, 1));
+ sw_w32(rt->dst_ip6.s6_addr32[1], rtl_table_data(r, 2));
+ sw_w32(rt->dst_ip6.s6_addr32[2], rtl_table_data(r, 3));
+ sw_w32(rt->dst_ip6.s6_addr32[3], rtl_table_data(r, 4));
+
+ v |= rt->prefix_len == 128 ? BIT(21) : 0; // set host-route bit
+
+ rtl930x_net6_mask(rt->prefix_len, &ip6_m);
+
+ sw_w32(ip6_m.s6_addr32[0], rtl_table_data(r, 6));
+ sw_w32(ip6_m.s6_addr32[1], rtl_table_data(r, 7));
+ sw_w32(ip6_m.s6_addr32[2], rtl_table_data(r, 8));
+ sw_w32(ip6_m.s6_addr32[3], rtl_table_data(r, 9));
+ break;
+ case 1: // IPv4 Multicast route
+ case 3: // IPv6 Multicast route
+ pr_warn("%s: route type not supported\n", __func__);
+ rtl_table_release(r);
+ return;
+ }
+ sw_w32(v, rtl_table_data(r, 10));
+
+ pr_debug("%s: %08x %08x %08x %08x %08x %08x %08x %08x %08x %08x %08x\n", __func__,
+ sw_r32(rtl_table_data(r, 0)), sw_r32(rtl_table_data(r, 1)), sw_r32(rtl_table_data(r, 2)),
+ sw_r32(rtl_table_data(r, 3)), sw_r32(rtl_table_data(r, 4)), sw_r32(rtl_table_data(r, 5)),
+ sw_r32(rtl_table_data(r, 6)), sw_r32(rtl_table_data(r, 7)), sw_r32(rtl_table_data(r, 8)),
+ sw_r32(rtl_table_data(r, 9)), sw_r32(rtl_table_data(r, 10)));
+
+ rtl_table_write(r, idx);
+ rtl_table_release(r);
+}
+
+
+/*
+ * Get the destination MAC and L3 egress interface ID of a nexthop entry from
+ * the SoC's L3_NEXTHOP table
+ */
+static void rtl930x_get_l3_nexthop(int idx, u16 *dmac_id, u16 *interface)
+{
+ u32 v;
+ // Read L3_NEXTHOP table (3) via register RTL9300_TBL_1
+ struct table_reg *r = rtl_table_get(RTL9300_TBL_1, 3);
+
+ rtl_table_read(r, idx);
+ // The table has a size of 1 register
+ v = sw_r32(rtl_table_data(r, 0));
+ rtl_table_release(r);
+
+ *dmac_id = (v >> 7) & 0x7fff;
+ *interface = v & 0x7f;
+}
+
+static int rtl930x_l3_mtu_del(struct rtl838x_switch_priv *priv, int mtu)
+{
+ int i;
+
+ for (i = 0; i < MAX_INTF_MTUS; i++) {
+ if (mtu == priv->intf_mtus[i])
+ break;
+ }
+ if (i >= MAX_INTF_MTUS || !priv->intf_mtu_count[i]) {
+ pr_err("%s: No MTU slot found for MTU: %d\n", __func__, mtu);
+ return -EINVAL;
+ }
+
+ priv->intf_mtu_count[i]--;
+}
+
+static int rtl930x_l3_mtu_add(struct rtl838x_switch_priv *priv, int mtu)
+{
+ int i, free_mtu;
+ int mtu_id;
+
+ // Try to find an existing mtu-value or a free slot
+ free_mtu = MAX_INTF_MTUS;
+ for (i = 0; i < MAX_INTF_MTUS && priv->intf_mtus[i] != mtu; i++) {
+ if ((!priv->intf_mtu_count[i]) && (free_mtu == MAX_INTF_MTUS))
+ free_mtu = i;
+ }
+ i = (i < MAX_INTF_MTUS) ? i : free_mtu;
+ if (i < MAX_INTF_MTUS) {
+ mtu_id = i;
+ } else {
+ pr_err("%s: No free MTU slot available!\n", __func__);
+ return -EINVAL;
+ }
+
+ priv->intf_mtus[i] = mtu;
+ pr_info("Writing MTU %d to slot %d\n", priv->intf_mtus[i], i);
+ // Set MTU-value of the slot TODO: distinguish between IPv4/IPv6 routes / slots
+ sw_w32_mask(0xffff << ((i % 2) * 16), priv->intf_mtus[i] << ((i % 2) * 16),
+ RTL930X_L3_IP_MTU_CTRL(i));
+ sw_w32_mask(0xffff << ((i % 2) * 16), priv->intf_mtus[i] << ((i % 2) * 16),
+ RTL930X_L3_IP6_MTU_CTRL(i));
+
+ priv->intf_mtu_count[i]++;
+
+ return mtu_id;
+}
+
+/*
+ * Creates an interface for a route by setting up the HW tables in the SoC
+ */
+static int rtl930x_l3_intf_add(struct rtl838x_switch_priv *priv, struct rtl838x_l3_intf *intf)
+{
+ int i, intf_id, mtu_id;
+ // number of MTU-values < 16384
+
+ // Use the same IPv6 mtu as the ip4 mtu for this route if unset
+ intf->ip6_mtu = intf->ip6_mtu ? intf->ip6_mtu : intf->ip4_mtu;
+
+ mtu_id = rtl930x_l3_mtu_add(priv, intf->ip4_mtu);
+ pr_info("%s: added mtu %d with mtu-id %d\n", __func__, intf->ip4_mtu, mtu_id);
+ if (mtu_id < 0)
+ return -ENOSPC;
+ intf->ip4_mtu_id = mtu_id;
+ intf->ip6_mtu_id = mtu_id;
+
+ for (i = 0; i < MAX_INTERFACES; i++) {
+ if (!priv->interfaces[i])
+ break;
+ }
+ if (i >= MAX_INTERFACES) {
+ pr_err("%s: cannot find free interface entry\n", __func__);
+ return -EINVAL;
+ }
+ intf_id = i;
+ priv->interfaces[i] = kzalloc(sizeof(struct rtl838x_l3_intf), GFP_KERNEL);
+ if (!priv->interfaces[i]) {
+ pr_err("%s: no memory to allocate new interface\n", __func__);
+ return -ENOMEM;
+ }
+}
+
+/*
+ * Set the destination MAC and L3 egress interface ID for a nexthop entry in the SoC's
+ * L3_NEXTHOP table. The nexthop entry is identified by idx.
+ * dmac_id is the reference to the L2 entry in the L2 forwarding table, special values are
+ * 0x7ffe: TRAP2CPU
+ * 0x7ffd: TRAP2MASTERCPU
+ * 0x7fff: DMAC_ID_DROP
+ */
+static void rtl930x_set_l3_nexthop(int idx, u16 dmac_id, u16 interface)
+{
+ // Access L3_NEXTHOP table (3) via register RTL9300_TBL_1
+ struct table_reg *r = rtl_table_get(RTL9300_TBL_1, 3);
+
+ pr_info("%s: Writing to L3_NEXTHOP table, index %d, dmac_id %d, interface %d\n",
+ __func__, idx, dmac_id, interface);
+ sw_w32(((dmac_id & 0x7fff) << 7) | (interface & 0x7f), rtl_table_data(r, 0));
+
+ pr_info("%s: %08x\n", __func__, sw_r32(rtl_table_data(r,0)));
+ rtl_table_write(r, idx);
+ rtl_table_release(r);
+}
+
+static void rtl930x_pie_lookup_enable(struct rtl838x_switch_priv *priv, int index)
+{
+ int block = index / PIE_BLOCK_SIZE;
+
+ sw_w32_mask(0, BIT(block), RTL930X_PIE_BLK_LOOKUP_CTRL);
+}
+
+/*
+ * Reads the intermediate representation of the templated match-fields of the
+ * PIE rule in the pie_rule structure and fills in the raw data fields in the
+ * raw register space r[].
+ * The register space configuration size is identical for the RTL8380/90 and RTL9300,
+ * however the RTL9310 has 2 more registers / fields and the physical field-ids are different
+ * on all SoCs
+ * On the RTL9300 the mask fields are not word-aligend!
+ */
+static void rtl930x_write_pie_templated(u32 r[], struct pie_rule *pr, enum template_field_id t[])
+{
+ int i;
+ enum template_field_id field_type;
+ u16 data, data_m;
+
+ for (i = 0; i < N_FIXED_FIELDS; i++) {
+ field_type = t[i];
+ data = data_m = 0;
+
+ switch (field_type) {
+ case TEMPLATE_FIELD_SPM0:
+ data = pr->spm;
+ data_m = pr->spm_m;
+ break;
+ case TEMPLATE_FIELD_SPM1:
+ data = pr->spm >> 16;
+ data_m = pr->spm_m >> 16;
+ break;
+ case TEMPLATE_FIELD_OTAG:
+ data = pr->otag;
+ data_m = pr->otag_m;
+ break;
+ case TEMPLATE_FIELD_SMAC0:
+ data = pr->smac[4];
+ data = (data << 8) | pr->smac[5];
+ data_m = pr->smac_m[4];
+ data_m = (data_m << 8) | pr->smac_m[5];
+ break;
+ case TEMPLATE_FIELD_SMAC1:
+ data = pr->smac[2];
+ data = (data << 8) | pr->smac[3];
+ data_m = pr->smac_m[2];
+ data_m = (data_m << 8) | pr->smac_m[3];
+ break;
+ case TEMPLATE_FIELD_SMAC2:
+ data = pr->smac[0];
+ data = (data << 8) | pr->smac[1];
+ data_m = pr->smac_m[0];
+ data_m = (data_m << 8) | pr->smac_m[1];
+ break;
+ case TEMPLATE_FIELD_DMAC0:
+ data = pr->dmac[4];
+ data = (data << 8) | pr->dmac[5];
+ data_m = pr->dmac_m[4];
+ data_m = (data_m << 8) | pr->dmac_m[5];
+ break;
+ case TEMPLATE_FIELD_DMAC1:
+ data = pr->dmac[2];
+ data = (data << 8) | pr->dmac[3];
+ data_m = pr->dmac_m[2];
+ data_m = (data_m << 8) | pr->dmac_m[3];
+ break;
+ case TEMPLATE_FIELD_DMAC2:
+ data = pr->dmac[0];
+ data = (data << 8) | pr->dmac[1];
+ data_m = pr->dmac_m[0];
+ data_m = (data_m << 8) | pr->dmac_m[1];
+ break;
+ case TEMPLATE_FIELD_ETHERTYPE:
+ data = pr->ethertype;
+ data_m = pr->ethertype_m;
+ break;
+ case TEMPLATE_FIELD_ITAG:
+ data = pr->itag;
+ data_m = pr->itag_m;
+ break;
+ case TEMPLATE_FIELD_SIP0:
+ if (pr->is_ipv6) {
+ data = pr->sip6.s6_addr16[7];
+ data_m = pr->sip6_m.s6_addr16[7];
+ } else {
+ data = pr->sip;
+ data_m = pr->sip_m;
+ }
+ break;
+ case TEMPLATE_FIELD_SIP1:
+ if (pr->is_ipv6) {
+ data = pr->sip6.s6_addr16[6];
+ data_m = pr->sip6_m.s6_addr16[6];
+ } else {
+ data = pr->sip >> 16;
+ data_m = pr->sip_m >> 16;
+ }
+ break;
+
+ case TEMPLATE_FIELD_SIP2:
+ case TEMPLATE_FIELD_SIP3:
+ case TEMPLATE_FIELD_SIP4:
+ case TEMPLATE_FIELD_SIP5:
+ case TEMPLATE_FIELD_SIP6:
+ case TEMPLATE_FIELD_SIP7:
+ data = pr->sip6.s6_addr16[5 - (field_type - TEMPLATE_FIELD_SIP2)];
+ data_m = pr->sip6_m.s6_addr16[5 - (field_type - TEMPLATE_FIELD_SIP2)];
+ break;
+
+ case TEMPLATE_FIELD_DIP0:
+ if (pr->is_ipv6) {
+ data = pr->dip6.s6_addr16[7];
+ data_m = pr->dip6_m.s6_addr16[7];
+ } else {
+ data = pr->dip;
+ data_m = pr->dip_m;
+ }
+ break;
+
+ case TEMPLATE_FIELD_DIP1:
+ if (pr->is_ipv6) {
+ data = pr->dip6.s6_addr16[6];
+ data_m = pr->dip6_m.s6_addr16[6];
+ } else {
+ data = pr->dip >> 16;
+ data_m = pr->dip_m >> 16;
+ }
+ break;
+
+ case TEMPLATE_FIELD_DIP2:
+ case TEMPLATE_FIELD_DIP3:
+ case TEMPLATE_FIELD_DIP4:
+ case TEMPLATE_FIELD_DIP5:
+ case TEMPLATE_FIELD_DIP6:
+ case TEMPLATE_FIELD_DIP7:
+ data = pr->dip6.s6_addr16[5 - (field_type - TEMPLATE_FIELD_DIP2)];
+ data_m = pr->dip6_m.s6_addr16[5 - (field_type - TEMPLATE_FIELD_DIP2)];
+ break;
+
+ case TEMPLATE_FIELD_IP_TOS_PROTO:
+ data = pr->tos_proto;
+ data_m = pr->tos_proto_m;
+ break;
+ case TEMPLATE_FIELD_L4_SPORT:
+ data = pr->sport;
+ data_m = pr->sport_m;
+ break;
+ case TEMPLATE_FIELD_L4_DPORT:
+ data = pr->dport;
+ data_m = pr->dport_m;
+ break;
+ case TEMPLATE_FIELD_DSAP_SSAP:
+ data = pr->dsap_ssap;
+ data_m = pr->dsap_ssap_m;
+ break;
+ case TEMPLATE_FIELD_TCP_INFO:
+ data = pr->tcp_info;
+ data_m = pr->tcp_info_m;
+ break;
+ case TEMPLATE_FIELD_RANGE_CHK:
+ pr_warn("Warning: TEMPLATE_FIELD_RANGE_CHK: not configured\n");
+ break;
+ default:
+ pr_info("%s: unknown field %d\n", __func__, field_type);
+ }
+
+ // On the RTL9300, the mask fields are not word aligned!
+ if (!(i % 2)) {
+ r[5 - i / 2] = data;
+ r[12 - i / 2] |= ((u32)data_m << 8);
+ } else {
+ r[5 - i / 2] |= ((u32)data) << 16;
+ r[12 - i / 2] |= ((u32)data_m) << 24;
+ r[11 - i / 2] |= ((u32)data_m) >> 8;
+ }
+ }
+}
+
+static void rtl930x_read_pie_fixed_fields(u32 r[], struct pie_rule *pr)
+{
+ pr->stacking_port = r[6] & BIT(31);
+ pr->spn = (r[6] >> 24) & 0x7f;
+ pr->mgnt_vlan = r[6] & BIT(23);
+ if (pr->phase == PHASE_IACL)
+ pr->dmac_hit_sw = r[6] & BIT(22);
+ else
+ pr->content_too_deep = r[6] & BIT(22);
+ pr->not_first_frag = r[6] & BIT(21);
+ pr->frame_type_l4 = (r[6] >> 18) & 7;
+ pr->frame_type = (r[6] >> 16) & 3;
+ pr->otag_fmt = (r[6] >> 15) & 1;
+ pr->itag_fmt = (r[6] >> 14) & 1;
+ pr->otag_exist = (r[6] >> 13) & 1;
+ pr->itag_exist = (r[6] >> 12) & 1;
+ pr->frame_type_l2 = (r[6] >> 10) & 3;
+ pr->igr_normal_port = (r[6] >> 9) & 1;
+ pr->tid = (r[6] >> 8) & 1;
+
+ pr->stacking_port_m = r[12] & BIT(7);
+ pr->spn_m = r[12] & 0x7f;
+ pr->mgnt_vlan_m = r[13] & BIT(31);
+ if (pr->phase == PHASE_IACL)
+ pr->dmac_hit_sw_m = r[13] & BIT(30);
+ else
+ pr->content_too_deep_m = r[13] & BIT(30);
+ pr->not_first_frag_m = r[13] & BIT(29);
+ pr->frame_type_l4_m = (r[13] >> 26) & 7;
+ pr->frame_type_m = (r[13] >> 24) & 3;
+ pr->otag_fmt_m = r[13] & BIT(23);
+ pr->itag_fmt_m = r[13] & BIT(22);
+ pr->otag_exist_m = r[13] & BIT(21);
+ pr->itag_exist_m = r[13] & BIT (20);
+ pr->frame_type_l2_m = (r[13] >> 18) & 3;
+ pr->igr_normal_port_m = r[13] & BIT(17);
+ pr->tid_m = (r[13] >> 16) & 1;
+
+ pr->valid = r[13] & BIT(15);
+ pr->cond_not = r[13] & BIT(14);
+ pr->cond_and1 = r[13] & BIT(13);
+ pr->cond_and2 = r[13] & BIT(12);
+}
+
+static void rtl930x_write_pie_fixed_fields(u32 r[], struct pie_rule *pr)
+{
+ r[6] = pr->stacking_port ? BIT(31) : 0;
+ r[6] |= ((u32) (pr->spn & 0x7f)) << 24;
+ r[6] |= pr->mgnt_vlan ? BIT(23) : 0;
+ if (pr->phase == PHASE_IACL)
+ r[6] |= pr->dmac_hit_sw ? BIT(22) : 0;
+ else
+ r[6] |= pr->content_too_deep ? BIT(22) : 0;
+ r[6] |= pr->not_first_frag ? BIT(21) : 0;
+ r[6] |= ((u32) (pr->frame_type_l4 & 0x7)) << 18;
+ r[6] |= ((u32) (pr->frame_type & 0x3)) << 16;
+ r[6] |= pr->otag_fmt ? BIT(15) : 0;
+ r[6] |= pr->itag_fmt ? BIT(14) : 0;
+ r[6] |= pr->otag_exist ? BIT(13) : 0;
+ r[6] |= pr->itag_exist ? BIT(12) : 0;
+ r[6] |= ((u32) (pr->frame_type_l2 & 0x3)) << 10;
+ r[6] |= pr->igr_normal_port ? BIT(9) : 0;
+ r[6] |= ((u32) (pr->tid & 0x1)) << 8;
+
+ r[12] |= pr->stacking_port_m ? BIT(7) : 0;
+ r[12] |= (u32) (pr->spn_m & 0x7f);
+ r[13] |= pr->mgnt_vlan_m ? BIT(31) : 0;
+ if (pr->phase == PHASE_IACL)
+ r[13] |= pr->dmac_hit_sw_m ? BIT(30) : 0;
+ else
+ r[13] |= pr->content_too_deep_m ? BIT(30) : 0;
+ r[13] |= pr->not_first_frag_m ? BIT(29) : 0;
+ r[13] |= ((u32) (pr->frame_type_l4_m & 0x7)) << 26;
+ r[13] |= ((u32) (pr->frame_type_m & 0x3)) << 24;
+ r[13] |= pr->otag_fmt_m ? BIT(23) : 0;
+ r[13] |= pr->itag_fmt_m ? BIT(22) : 0;
+ r[13] |= pr->otag_exist_m ? BIT(21) : 0;
+ r[13] |= pr->itag_exist_m ? BIT(20) : 0;
+ r[13] |= ((u32) (pr->frame_type_l2_m & 0x3)) << 18;
+ r[13] |= pr->igr_normal_port_m ? BIT(17) : 0;
+ r[13] |= ((u32) (pr->tid_m & 0x1)) << 16;
+
+ r[13] |= pr->valid ? BIT(15) : 0;
+ r[13] |= pr->cond_not ? BIT(14) : 0;
+ r[13] |= pr->cond_and1 ? BIT(13) : 0;
+ r[13] |= pr->cond_and2 ? BIT(12) : 0;
+}
+
+static void rtl930x_write_pie_action(u32 r[], struct pie_rule *pr)
+{
+ // Either drop or forward
+ if (pr->drop) {
+ r[14] |= BIT(24) | BIT(25) | BIT(26); // Do Green, Yellow and Red drops
+ // Actually DROP, not PERMIT in Green / Yellow / Red
+ r[14] |= BIT(23) | BIT(22) | BIT(20);
+ } else {
+ r[14] |= pr->fwd_sel ? BIT(27) : 0;
+ r[14] |= pr->fwd_act << 18;
+ r[14] |= BIT(14); // We overwrite any drop
+ }
+ if (pr->phase == PHASE_VACL)
+ r[14] |= pr->fwd_sa_lrn ? BIT(15) : 0;
+ r[13] |= pr->bypass_sel ? BIT(5) : 0;
+ r[13] |= pr->nopri_sel ? BIT(4) : 0;
+ r[13] |= pr->tagst_sel ? BIT(3) : 0;
+ r[13] |= pr->ovid_sel ? BIT(1) : 0;
+ r[14] |= pr->ivid_sel ? BIT(31) : 0;
+ r[14] |= pr->meter_sel ? BIT(30) : 0;
+ r[14] |= pr->mir_sel ? BIT(29) : 0;
+ r[14] |= pr->log_sel ? BIT(28) : 0;
+
+ r[14] |= ((u32)(pr->fwd_data & 0x3fff)) << 3;
+ r[15] |= pr->log_octets ? BIT(31) : 0;
+ r[15] |= (u32)(pr->meter_data) << 23;
+
+ r[15] |= ((u32)(pr->ivid_act) << 21) & 0x3;
+ r[15] |= ((u32)(pr->ivid_data) << 9) & 0xfff;
+ r[16] |= ((u32)(pr->ovid_act) << 30) & 0x3;
+ r[16] |= ((u32)(pr->ovid_data) & 0xfff) << 16;
+ r[16] |= (pr->mir_data & 0x3) << 6;
+ r[17] |= ((u32)(pr->tagst_data) & 0xf) << 28;
+ r[17] |= ((u32)(pr->nopri_data) & 0x7) << 25;
+ r[17] |= pr->bypass_ibc_sc ? BIT(16) : 0;
+}
+
+void rtl930x_pie_rule_dump_raw(u32 r[])
+{
+ pr_info("Raw IACL table entry:\n");
+ pr_info("r 0 - 7: %08x %08x %08x %08x %08x %08x %08x %08x\n",
+ r[0], r[1], r[2], r[3], r[4], r[5], r[6], r[7]);
+ pr_info("r 8 - 15: %08x %08x %08x %08x %08x %08x %08x %08x\n",
+ r[8], r[9], r[10], r[11], r[12], r[13], r[14], r[15]);
+ pr_info("r 16 - 18: %08x %08x %08x\n", r[16], r[17], r[18]);
+ pr_info("Match : %08x %08x %08x %08x %08x %08x\n", r[0], r[1], r[2], r[3], r[4], r[5]);
+ pr_info("Fixed : %06x\n", r[6] >> 8);
+ pr_info("Match M: %08x %08x %08x %08x %08x %08x\n",
+ (r[6] << 24) | (r[7] >> 8), (r[7] << 24) | (r[8] >> 8), (r[8] << 24) | (r[9] >> 8),
+ (r[9] << 24) | (r[10] >> 8), (r[10] << 24) | (r[11] >> 8),
+ (r[11] << 24) | (r[12] >> 8));
+ pr_info("R[13]: %08x\n", r[13]);
+ pr_info("Fixed M: %06x\n", ((r[12] << 16) | (r[13] >> 16)) & 0xffffff);
+ pr_info("Valid / not / and1 / and2 : %1x\n", (r[13] >> 12) & 0xf);
+ pr_info("r 13-16: %08x %08x %08x %08x\n", r[13], r[14], r[15], r[16]);
+}
+
+static int rtl930x_pie_rule_write(struct rtl838x_switch_priv *priv, int idx, struct pie_rule *pr)
+{
+ // Access IACL table (2) via register 0
+ struct table_reg *q = rtl_table_get(RTL9300_TBL_0, 2);
+ u32 r[19];
+ int i;
+ int block = idx / PIE_BLOCK_SIZE;
+ u32 t_select = sw_r32(RTL930X_PIE_BLK_TMPLTE_CTRL(block));
+
+ pr_debug("%s: %d, t_select: %08x\n", __func__, idx, t_select);
+
+ for (i = 0; i < 19; i++)
+ r[i] = 0;
+
+ if (!pr->valid) {
+ rtl_table_write(q, idx);
+ rtl_table_release(q);
+ return 0;
+ }
+ rtl930x_write_pie_fixed_fields(r, pr);
+
+ pr_debug("%s: template %d\n", __func__, (t_select >> (pr->tid * 4)) & 0xf);
+ rtl930x_write_pie_templated(r, pr, fixed_templates[(t_select >> (pr->tid * 4)) & 0xf]);
+
+ rtl930x_write_pie_action(r, pr);
+
+// rtl930x_pie_rule_dump_raw(r);
+
+ for (i = 0; i < 19; i++)
+ sw_w32(r[i], rtl_table_data(q, i));
+
+ rtl_table_write(q, idx);
+ rtl_table_release(q);
+
+ return 0;
+}
+
+static bool rtl930x_pie_templ_has(int t, enum template_field_id field_type)
+{
+ int i;
+ enum template_field_id ft;
+
+ for (i = 0; i < N_FIXED_FIELDS; i++) {
+ ft = fixed_templates[t][i];
+ if (field_type == ft)
+ return true;
+ }
+
+ return false;
+}
+
+/*
+ * Verify that the rule pr is compatible with a given template t in block block
+ * Note that this function is SoC specific since the values of e.g. TEMPLATE_FIELD_SIP0
+ * depend on the SoC
+ */
+static int rtl930x_pie_verify_template(struct rtl838x_switch_priv *priv,
+ struct pie_rule *pr, int t, int block)
+{
+ int i;
+
+ if (!pr->is_ipv6 && pr->sip_m && !rtl930x_pie_templ_has(t, TEMPLATE_FIELD_SIP0))
+ return -1;
+
+ if (!pr->is_ipv6 && pr->dip_m && !rtl930x_pie_templ_has(t, TEMPLATE_FIELD_DIP0))
+ return -1;
+
+ if (pr->is_ipv6) {
+ if ((pr->sip6_m.s6_addr32[0] || pr->sip6_m.s6_addr32[1]
+ || pr->sip6_m.s6_addr32[2] || pr->sip6_m.s6_addr32[3])
+ && !rtl930x_pie_templ_has(t, TEMPLATE_FIELD_SIP2))
+ return -1;
+ if ((pr->dip6_m.s6_addr32[0] || pr->dip6_m.s6_addr32[1]
+ || pr->dip6_m.s6_addr32[2] || pr->dip6_m.s6_addr32[3])
+ && !rtl930x_pie_templ_has(t, TEMPLATE_FIELD_DIP2))
+ return -1;
+ }
+
+ if (ether_addr_to_u64(pr->smac) && !rtl930x_pie_templ_has(t, TEMPLATE_FIELD_SMAC0))
+ return -1;
+
+ if (ether_addr_to_u64(pr->dmac) && !rtl930x_pie_templ_has(t, TEMPLATE_FIELD_DMAC0))
+ return -1;
+
+ // TODO: Check more
+
+ i = find_first_zero_bit(&priv->pie_use_bm[block * 4], PIE_BLOCK_SIZE);
+
+ if (i >= PIE_BLOCK_SIZE)
+ return -1;
+
+ return i + PIE_BLOCK_SIZE * block;
+}
+
+static int rtl930x_pie_rule_add(struct rtl838x_switch_priv *priv, struct pie_rule *pr)
+{
+ int idx, block, j, t;
+ int min_block = 0;
+ int max_block = priv->n_pie_blocks / 2;
+
+ if (pr->is_egress) {
+ min_block = max_block;
+ max_block = priv->n_pie_blocks;
+ }
+ pr_debug("In %s\n", __func__);
+
+ mutex_lock(&priv->pie_mutex);
+
+ for (block = min_block; block < max_block; block++) {
+ for (j = 0; j < 2; j++) {
+ t = (sw_r32(RTL930X_PIE_BLK_TMPLTE_CTRL(block)) >> (j * 4)) & 0xf;
+ pr_debug("Testing block %d, template %d, template id %d\n", block, j, t);
+ pr_debug("%s: %08x\n",
+ __func__, sw_r32(RTL930X_PIE_BLK_TMPLTE_CTRL(block)));
+ idx = rtl930x_pie_verify_template(priv, pr, t, block);
+ if (idx >= 0)
+ break;
+ }
+ if (j < 2)
+ break;
+ }
+
+ if (block >= priv->n_pie_blocks) {
+ mutex_unlock(&priv->pie_mutex);
+ return -EOPNOTSUPP;
+ }
+
+ pr_debug("Using block: %d, index %d, template-id %d\n", block, idx, j);
+ set_bit(idx, priv->pie_use_bm);
+
+ pr->valid = true;
+ pr->tid = j; // Mapped to template number
+ pr->tid_m = 0x1;
+ pr->id = idx;
+
+ rtl930x_pie_lookup_enable(priv, idx);
+ rtl930x_pie_rule_write(priv, idx, pr);
+
+ mutex_unlock(&priv->pie_mutex);
+ return 0;
+}
+
+/*
+ * Delete a range of Packet Inspection Engine rules
+ */
+static int rtl930x_pie_rule_del(struct rtl838x_switch_priv *priv, int index_from, int index_to)
+{
+ u32 v = (index_from << 1)| (index_to << 12 ) | BIT(0);
+
+ pr_debug("%s: from %d to %d\n", __func__, index_from, index_to);
+ mutex_lock(&priv->reg_mutex);
+
+ // Write from-to and execute bit into control register
+ sw_w32(v, RTL930X_PIE_CLR_CTRL);
+
+ // Wait until command has completed
+ do {
+ } while (sw_r32(RTL930X_PIE_CLR_CTRL) & BIT(0));
+
+ mutex_unlock(&priv->reg_mutex);
+ return 0;
+}
+
+static void rtl930x_pie_rule_rm(struct rtl838x_switch_priv *priv, struct pie_rule *pr)
+{
+ int idx = pr->id;
+
+ rtl930x_pie_rule_del(priv, idx, idx);
+ clear_bit(idx, priv->pie_use_bm);
+}
+
+static void rtl930x_pie_init(struct rtl838x_switch_priv *priv)
+{
+ int i;
+ u32 template_selectors;
+
+ mutex_init(&priv->pie_mutex);
+
+ pr_info("%s\n", __func__);
+ // Enable ACL lookup on all ports, including CPU_PORT
+ for (i = 0; i <= priv->cpu_port; i++)
+ sw_w32(1, RTL930X_ACL_PORT_LOOKUP_CTRL(i));
+
+ // Include IPG in metering
+ sw_w32_mask(0, 1, RTL930X_METER_GLB_CTRL);
+
+ // Delete all present rules, block size is 128 on all SoC families
+ rtl930x_pie_rule_del(priv, 0, priv->n_pie_blocks * 128 - 1);
+
+ // Assign blocks 0-7 to VACL phase (bit = 0), blocks 8-15 to IACL (bit = 1)
+ sw_w32(0xff00, RTL930X_PIE_BLK_PHASE_CTRL);
+
+ // Enable predefined templates 0, 1 for first quarter of all blocks
+ template_selectors = 0 | (1 << 4);
+ for (i = 0; i < priv->n_pie_blocks / 4; i++)
+ sw_w32(template_selectors, RTL930X_PIE_BLK_TMPLTE_CTRL(i));
+
+ // Enable predefined templates 2, 3 for second quarter of all blocks
+ template_selectors = 2 | (3 << 4);
+ for (i = priv->n_pie_blocks / 4; i < priv->n_pie_blocks / 2; i++)
+ sw_w32(template_selectors, RTL930X_PIE_BLK_TMPLTE_CTRL(i));
+
+ // Enable predefined templates 0, 1 for third half of all blocks
+ template_selectors = 0 | (1 << 4);
+ for (i = priv->n_pie_blocks / 2; i < priv->n_pie_blocks * 3 / 4; i++)
+ sw_w32(template_selectors, RTL930X_PIE_BLK_TMPLTE_CTRL(i));
+
+ // Enable predefined templates 2, 3 for fourth quater of all blocks
+ template_selectors = 2 | (3 << 4);
+ for (i = priv->n_pie_blocks * 3 / 4; i < priv->n_pie_blocks; i++)
+ sw_w32(template_selectors, RTL930X_PIE_BLK_TMPLTE_CTRL(i));
+
+}
+
+/*
+ * Sets up an egress interface for L3 actions
+ * Actions for ip4/6_icmp_redirect, ip4/6_pbr_icmp_redirect are:
+ * 0: FORWARD, 1: DROP, 2: TRAP2CPU, 3: COPY2CPU, 4: TRAP2MASTERCPU 5: COPY2MASTERCPU
+ * 6: HARDDROP
+ * idx is the index in the HW interface table: idx < 0x80
+ */
+static void rtl930x_set_l3_egress_intf(int idx, struct rtl838x_l3_intf *intf)
+{
+ u32 u, v;
+ // Read L3_EGR_INTF table (4) via register RTL9300_TBL_1
+ struct table_reg *r = rtl_table_get(RTL9300_TBL_1, 4);
+
+ // The table has 2 registers
+ u = (intf->vid & 0xfff) << 9;
+ u |= (intf->smac_idx & 0x3f) << 3;
+ u |= (intf->ip4_mtu_id & 0x7);
+
+ v = (intf->ip6_mtu_id & 0x7) << 28;
+ v |= (intf->ttl_scope & 0xff) << 20;
+ v |= (intf->hl_scope & 0xff) << 12;
+ v |= (intf->ip4_icmp_redirect & 0x7) << 9;
+ v |= (intf->ip6_icmp_redirect & 0x7)<< 6;
+ v |= (intf->ip4_pbr_icmp_redirect & 0x7) << 3;
+ v |= (intf->ip6_pbr_icmp_redirect & 0x7);
+
+ sw_w32(u, rtl_table_data(r, 0));
+ sw_w32(v, rtl_table_data(r, 1));
+
+ pr_info("%s writing to index %d: %08x %08x\n", __func__, idx, u, v);
+ rtl_table_write(r, idx & 0x7f);
+ rtl_table_release(r);
+}
+
+/*
+ * Reads a MAC entry for L3 termination as entry point for routing
+ * from the hardware table
+ * idx is the index into the L3_ROUTER_MAC table
+ */
+static void rtl930x_get_l3_router_mac(u32 idx, struct rtl93xx_rt_mac *m)
+{
+ u32 v, w;
+ // Read L3_ROUTER_MAC table (0) via register RTL9300_TBL_1
+ struct table_reg *r = rtl_table_get(RTL9300_TBL_1, 0);
+
+ rtl_table_read(r, idx);
+ // The table has a size of 7 registers, 64 entries
+ v = sw_r32(rtl_table_data(r, 0));
+ w = sw_r32(rtl_table_data(r, 3));
+ m->valid = !!(v & BIT(20));
+ if (!m->valid)
+ goto out;
+
+ m->p_type = !!(v & BIT(19));
+ m->p_id = (v >> 13) & 0x3f; // trunk id of port
+ m->vid = v & 0xfff;
+ m->vid_mask = w & 0xfff;
+ m->action = sw_r32(rtl_table_data(r, 6)) & 0x7;
+ m->mac_mask = ((((u64)sw_r32(rtl_table_data(r, 5))) << 32) & 0xffffffffffffULL)
+ | (sw_r32(rtl_table_data(r, 4)));
+ m->mac = ((((u64)sw_r32(rtl_table_data(r, 1))) << 32) & 0xffffffffffffULL)
+ | (sw_r32(rtl_table_data(r, 2)));
+ // Bits L3_INTF and BMSK_L3_INTF are 0
+
+out:
+ rtl_table_release(r);
+}
+
+/*
+ * Writes a MAC entry for L3 termination as entry point for routing
+ * into the hardware table
+ * idx is the index into the L3_ROUTER_MAC table
+ */
+static void rtl930x_set_l3_router_mac(u32 idx, struct rtl93xx_rt_mac *m)
+{
+ u32 v, w;
+ // Read L3_ROUTER_MAC table (0) via register RTL9300_TBL_1
+ struct table_reg *r = rtl_table_get(RTL9300_TBL_1, 0);
+
+ // The table has a size of 7 registers, 64 entries
+ v = BIT(20); // mac entry valid, port type is 0: individual
+ v |= (m->p_id & 0x3f) << 13;
+ v |= (m->vid & 0xfff); // Set the interface_id to the vlan id
+
+ w = m->vid_mask;
+ w |= (m->p_id_mask & 0x3f) << 13;
+
+ sw_w32(v, rtl_table_data(r, 0));
+ sw_w32(w, rtl_table_data(r, 3));
+
+ // Set MAC address, L3_INTF (bit 12 in register 1) needs to be 0
+ sw_w32((u32)(m->mac), rtl_table_data(r, 2));
+ sw_w32(m->mac >> 32, rtl_table_data(r, 1));
+
+ // Set MAC address mask, BMSK_L3_INTF (bit 12 in register 5) needs to be 0
+ sw_w32((u32)(m->mac_mask >> 32), rtl_table_data(r, 4));
+ sw_w32((u32)m->mac_mask, rtl_table_data(r, 5));
+
+ sw_w32(m->action & 0x7, rtl_table_data(r, 6));
+
+ pr_debug("%s writing index %d: %08x %08x %08x %08x %08x %08x %08x\n", __func__, idx,
+ sw_r32(rtl_table_data(r, 0)), sw_r32(rtl_table_data(r, 1)), sw_r32(rtl_table_data(r, 2)),
+ sw_r32(rtl_table_data(r, 3)), sw_r32(rtl_table_data(r, 4)), sw_r32(rtl_table_data(r, 5)),
+ sw_r32(rtl_table_data(r, 6))
+ );
+ rtl_table_write(r, idx);
+ rtl_table_release(r);
+}
+
+/*
+ * Get the Destination-MAC of an L3 egress interface or the Source MAC for routed packets
+ * from the SoC's L3_EGR_INTF_MAC table
+ * Indexes 0-2047 are DMACs, 2048+ are SMACs
+ */
+static u64 rtl930x_get_l3_egress_mac(u32 idx)
+{
+ u64 mac;
+ // Read L3_EGR_INTF_MAC table (2) via register RTL9300_TBL_2
+ struct table_reg *r = rtl_table_get(RTL9300_TBL_2, 2);
+
+ rtl_table_read(r, idx);
+ // The table has a size of 2 registers
+ mac = sw_r32(rtl_table_data(r, 0));
+ mac <<= 32;
+ mac |= sw_r32(rtl_table_data(r, 1));
+ rtl_table_release(r);
+
+ return mac;
+}
+/*
+ * Set the Destination-MAC of a route or the Source MAC of an L3 egress interface
+ * in the SoC's L3_EGR_INTF_MAC table
+ * Indexes 0-2047 are DMACs, 2048+ are SMACs
+ */
+static void rtl930x_set_l3_egress_mac(u32 idx, u64 mac)
+{
+ // Access L3_EGR_INTF_MAC table (2) via register RTL9300_TBL_2
+ struct table_reg *r = rtl_table_get(RTL9300_TBL_2, 2);
+
+ // The table has a size of 2 registers
+ sw_w32(mac >> 32, rtl_table_data(r, 0));
+ sw_w32(mac, rtl_table_data(r, 1));
+
+ pr_debug("%s: setting index %d to %016llx\n", __func__, idx, mac);
+ rtl_table_write(r, idx);
+ rtl_table_release(r);
+}
+
+/*
+ * Configure L3 routing settings of the device:
+ * - MTUs
+ * - Egress interface
+ * - The router's MAC address on which routed packets are expected
+ * - MAC addresses used as source macs of routed packets
+ */
+int rtl930x_l3_setup(struct rtl838x_switch_priv *priv)
+{
+ int i;
+
+ // Setup MTU with id 0 for default interface
+ for (i = 0; i < MAX_INTF_MTUS; i++)
+ priv->intf_mtu_count[i] = priv->intf_mtus[i] = 0;
+
+ priv->intf_mtu_count[0] = 0; // Needs to stay forever
+ priv->intf_mtus[0] = DEFAULT_MTU;
+ sw_w32_mask(0xffff, DEFAULT_MTU, RTL930X_L3_IP_MTU_CTRL(0));
+ sw_w32_mask(0xffff, DEFAULT_MTU, RTL930X_L3_IP6_MTU_CTRL(0));
+ priv->intf_mtus[1] = DEFAULT_MTU;
+ sw_w32_mask(0xffff0000, DEFAULT_MTU << 16, RTL930X_L3_IP_MTU_CTRL(0));
+ sw_w32_mask(0xffff0000, DEFAULT_MTU << 16, RTL930X_L3_IP6_MTU_CTRL(0));
+
+ sw_w32_mask(0xffff, DEFAULT_MTU, RTL930X_L3_IP_MTU_CTRL(1));
+ sw_w32_mask(0xffff, DEFAULT_MTU, RTL930X_L3_IP6_MTU_CTRL(1));
+ sw_w32_mask(0xffff0000, DEFAULT_MTU << 16, RTL930X_L3_IP_MTU_CTRL(1));
+ sw_w32_mask(0xffff0000, DEFAULT_MTU << 16, RTL930X_L3_IP6_MTU_CTRL(1));
+
+ // Clear all source port MACs
+ for (i = 0; i < MAX_SMACS; i++)
+ rtl930x_set_l3_egress_mac(L3_EGRESS_DMACS + i, 0ULL);
+
+ // Configure the default L3 hash algorithm
+ sw_w32_mask(BIT(2), 0, RTL930X_L3_HOST_TBL_CTRL); // Algorithm selection 0 = 0
+ sw_w32_mask(0, BIT(3), RTL930X_L3_HOST_TBL_CTRL); // Algorithm selection 1 = 1
+
+ pr_info("L3_IPUC_ROUTE_CTRL %08x, IPMC_ROUTE %08x, IP6UC_ROUTE %08x, IP6MC_ROUTE %08x\n",
+ sw_r32(RTL930X_L3_IPUC_ROUTE_CTRL), sw_r32(RTL930X_L3_IPMC_ROUTE_CTRL),
+ sw_r32(RTL930X_L3_IP6UC_ROUTE_CTRL), sw_r32(RTL930X_L3_IP6MC_ROUTE_CTRL));
+ sw_w32_mask(0, 1, RTL930X_L3_IPUC_ROUTE_CTRL);
+ sw_w32_mask(0, 1, RTL930X_L3_IP6UC_ROUTE_CTRL);
+ sw_w32_mask(0, 1, RTL930X_L3_IPMC_ROUTE_CTRL);
+ sw_w32_mask(0, 1, RTL930X_L3_IP6MC_ROUTE_CTRL);
+
+ sw_w32(0x00002001, RTL930X_L3_IPUC_ROUTE_CTRL);
+ sw_w32(0x00014581, RTL930X_L3_IP6UC_ROUTE_CTRL);
+ sw_w32(0x00000501, RTL930X_L3_IPMC_ROUTE_CTRL);
+ sw_w32(0x00012881, RTL930X_L3_IP6MC_ROUTE_CTRL);
+
+ pr_info("L3_IPUC_ROUTE_CTRL %08x, IPMC_ROUTE %08x, IP6UC_ROUTE %08x, IP6MC_ROUTE %08x\n",
+ sw_r32(RTL930X_L3_IPUC_ROUTE_CTRL), sw_r32(RTL930X_L3_IPMC_ROUTE_CTRL),
+ sw_r32(RTL930X_L3_IP6UC_ROUTE_CTRL), sw_r32(RTL930X_L3_IP6MC_ROUTE_CTRL));
+
+ // Trap non-ip traffic to the CPU-port (e.g. ARP so we stay reachable)
+ sw_w32_mask(0x3 << 8, 0x1 << 8, RTL930X_L3_IP_ROUTE_CTRL);
+ pr_info("L3_IP_ROUTE_CTRL %08x\n", sw_r32(RTL930X_L3_IP_ROUTE_CTRL));
+
+ // PORT_ISO_RESTRICT_ROUTE_CTRL ?
+
+ // Do not use prefix route 0 because of HW limitations
+ set_bit(0, priv->route_use_bm);
+
+ return 0;
+}
+
+static u32 rtl930x_packet_cntr_read(int counter)
+{
+ u32 v;
+
+ // Read LOG table (3) via register RTL9300_TBL_0
+ struct table_reg *r = rtl_table_get(RTL9300_TBL_0, 3);
+
+ pr_debug("In %s, id %d\n", __func__, counter);
+ rtl_table_read(r, counter / 2);
+
+ pr_debug("Registers: %08x %08x\n",
+ sw_r32(rtl_table_data(r, 0)), sw_r32(rtl_table_data(r, 1)));
+ // The table has a size of 2 registers
+ if (counter % 2)
+ v = sw_r32(rtl_table_data(r, 0));
+ else
+ v = sw_r32(rtl_table_data(r, 1));
+
+ rtl_table_release(r);
+
+ return v;
+}
+
+static void rtl930x_packet_cntr_clear(int counter)
+{
+ // Access LOG table (3) via register RTL9300_TBL_0
+ struct table_reg *r = rtl_table_get(RTL9300_TBL_0, 3);
+
+ pr_info("In %s, id %d\n", __func__, counter);
+ // The table has a size of 2 registers
+ if (counter % 2)
+ sw_w32(0, rtl_table_data(r, 0));
+ else
+ sw_w32(0, rtl_table_data(r, 1));
+
+ rtl_table_write(r, counter / 2);
+
+ rtl_table_release(r);
+}
+
+void rtl930x_vlan_port_keep_tag_set(int port, bool keep_outer, bool keep_inner)
+{
+ sw_w32(FIELD_PREP(RTL930X_VLAN_PORT_TAG_STS_CTRL_EGR_OTAG_STS_MASK,
+ keep_outer ? RTL930X_VLAN_PORT_TAG_STS_TAGGED : RTL930X_VLAN_PORT_TAG_STS_UNTAG) |
+ FIELD_PREP(RTL930X_VLAN_PORT_TAG_STS_CTRL_EGR_ITAG_STS_MASK,
+ keep_inner ? RTL930X_VLAN_PORT_TAG_STS_TAGGED : RTL930X_VLAN_PORT_TAG_STS_UNTAG),
+ RTL930X_VLAN_PORT_TAG_STS_CTRL(port));
+}
+
+void rtl930x_vlan_port_pvidmode_set(int port, enum pbvlan_type type, enum pbvlan_mode mode)
+{
+ if (type == PBVLAN_TYPE_INNER)
+ sw_w32_mask(0x3, mode, RTL930X_VLAN_PORT_PB_VLAN + (port << 2));
+ else
+ sw_w32_mask(0x3 << 14, mode << 14 ,RTL930X_VLAN_PORT_PB_VLAN + (port << 2));
+}
+
+void rtl930x_vlan_port_pvid_set(int port, enum pbvlan_type type, int pvid)
+{
+ if (type == PBVLAN_TYPE_INNER)
+ sw_w32_mask(0xfff << 2, pvid << 2, RTL930X_VLAN_PORT_PB_VLAN + (port << 2));
+ else
+ sw_w32_mask(0xfff << 16, pvid << 16, RTL930X_VLAN_PORT_PB_VLAN + (port << 2));
+}
+
+static int rtl930x_set_ageing_time(unsigned long msec)
+{
+ int t = sw_r32(RTL930X_L2_AGE_CTRL);
+
+ t &= 0x1FFFFF;
+ t = (t * 7) / 10;
+ pr_debug("L2 AGING time: %d sec\n", t);
+
+ t = (msec / 100 + 6) / 7;
+ t = t > 0x1FFFFF ? 0x1FFFFF : t;
+ sw_w32_mask(0x1FFFFF, t, RTL930X_L2_AGE_CTRL);
+ pr_debug("Dynamic aging for ports: %x\n", sw_r32(RTL930X_L2_PORT_AGE_CTRL));
+
+ return 0;
+}
+
+static void rtl930x_set_igr_filter(int port, enum igr_filter state)
+{
+ sw_w32_mask(0x3 << ((port & 0xf)<<1), state << ((port & 0xf)<<1),
+ RTL930X_VLAN_PORT_IGR_FLTR + (((port >> 4) << 2)));
+}
+
+static void rtl930x_set_egr_filter(int port, enum egr_filter state)
+{
+ sw_w32_mask(0x1 << (port % 0x1D), state << (port % 0x1D),
+ RTL930X_VLAN_PORT_EGR_FLTR + (((port / 29) << 2)));
+}
+
+void rtl930x_set_distribution_algorithm(int group, int algoidx, u32 algomsk)
+{
+ u32 l3shift = 0;
+ u32 newmask = 0;
+
+ /* TODO: for now we set algoidx to 0 */
+ algoidx = 0;
+ if (algomsk & TRUNK_DISTRIBUTION_ALGO_SIP_BIT) {
+ l3shift = 4;
+ newmask |= TRUNK_DISTRIBUTION_ALGO_L3_SIP_BIT;
+ }
+ if (algomsk & TRUNK_DISTRIBUTION_ALGO_DIP_BIT) {
+ l3shift = 4;
+ newmask |= TRUNK_DISTRIBUTION_ALGO_L3_DIP_BIT;
+ }
+ if (algomsk & TRUNK_DISTRIBUTION_ALGO_SRC_L4PORT_BIT) {
+ l3shift = 4;
+ newmask |= TRUNK_DISTRIBUTION_ALGO_L3_SRC_L4PORT_BIT;
+ }
+ if (algomsk & TRUNK_DISTRIBUTION_ALGO_SRC_L4PORT_BIT) {
+ l3shift = 4;
+ newmask |= TRUNK_DISTRIBUTION_ALGO_L3_SRC_L4PORT_BIT;
+ }
+
+ if (l3shift == 4) {
+ if (algomsk & TRUNK_DISTRIBUTION_ALGO_SMAC_BIT)
+ newmask |= TRUNK_DISTRIBUTION_ALGO_L3_SMAC_BIT;
+
+ if (algomsk & TRUNK_DISTRIBUTION_ALGO_DMAC_BIT)
+ newmask |= TRUNK_DISTRIBUTION_ALGO_L3_DMAC_BIT;
+ } else {
+ if (algomsk & TRUNK_DISTRIBUTION_ALGO_SMAC_BIT)
+ newmask |= TRUNK_DISTRIBUTION_ALGO_L2_SMAC_BIT;
+ if (algomsk & TRUNK_DISTRIBUTION_ALGO_DMAC_BIT)
+ newmask |= TRUNK_DISTRIBUTION_ALGO_L2_DMAC_BIT;
+ }
+
+ sw_w32(newmask << l3shift, RTL930X_TRK_HASH_CTRL + (algoidx << 2));
+}
+
+static void rtl930x_led_init(struct rtl838x_switch_priv *priv)
+{
+ int i, pos;
+ u32 v, pm = 0, set;
+ u32 setlen;
+ const __be32 *led_set;
+ char set_name[9];
+ struct device_node *node;
+
+ pr_info("%s called\n", __func__);
+ node = of_find_compatible_node(NULL, NULL, "realtek,rtl9300-leds");
+ if (!node) {
+ pr_info("%s No compatible LED node found\n", __func__);
+ return;
+ }
+
+ for (i= 0; i < priv->cpu_port; i++) {
+ pos = (i << 1) % 32;
+ sw_w32_mask(0x3 << pos, 0, RTL930X_LED_PORT_FIB_SET_SEL_CTRL(i));
+ sw_w32_mask(0x3 << pos, 0, RTL930X_LED_PORT_COPR_SET_SEL_CTRL(i));
+
+ if (!priv->ports[i].phy)
+ continue;
+
+ v = 0x1;
+ if (priv->ports[i].is10G)
+ v = 0x3;
+ if (priv->ports[i].phy_is_integrated)
+ v = 0x1;
+ sw_w32_mask(0x3 << pos, v << pos, RTL930X_LED_PORT_NUM_CTRL(i));
+
+ pm |= BIT(i);
+
+ set = priv->ports[i].led_set;
+ sw_w32_mask(0, set << pos, RTL930X_LED_PORT_COPR_SET_SEL_CTRL(i));
+ sw_w32_mask(0, set << pos, RTL930X_LED_PORT_FIB_SET_SEL_CTRL(i));
+ }
+
+ for (i = 0; i < 4; i++) {
+ sprintf(set_name, "led_set%d", i);
+ led_set = of_get_property(node, set_name, &setlen);
+ if (!led_set || setlen != 16)
+ break;
+ v = be32_to_cpup(led_set) << 16 | be32_to_cpup(led_set + 1);
+ sw_w32(v, RTL930X_LED_SET0_0_CTRL - 4 - i * 8);
+ v = be32_to_cpup(led_set + 2) << 16 | be32_to_cpup(led_set + 3);
+ sw_w32(v, RTL930X_LED_SET0_0_CTRL - i * 8);
+ }
+
+ // Set LED mode to serial (0x1)
+ sw_w32_mask(0x3, 0x1, RTL930X_LED_GLB_CTRL);
+
+ // Set port type masks
+ sw_w32(pm, RTL930X_LED_PORT_COPR_MASK_CTRL);
+ sw_w32(pm, RTL930X_LED_PORT_FIB_MASK_CTRL);
+ sw_w32(pm, RTL930X_LED_PORT_COMBO_MASK_CTRL);
+
+ for (i = 0; i < 24; i++)
+ pr_info("%s %08x: %08x\n",__func__, 0xbb00cc00 + i * 4, sw_r32(0xcc00 + i * 4));
+}
+
+const struct rtl838x_reg rtl930x_reg = {
+ .mask_port_reg_be = rtl838x_mask_port_reg,
+ .set_port_reg_be = rtl838x_set_port_reg,
+ .get_port_reg_be = rtl838x_get_port_reg,
+ .mask_port_reg_le = rtl838x_mask_port_reg,
+ .set_port_reg_le = rtl838x_set_port_reg,
+ .get_port_reg_le = rtl838x_get_port_reg,
+ .stat_port_rst = RTL930X_STAT_PORT_RST,
+ .stat_rst = RTL930X_STAT_RST,
+ .stat_port_std_mib = RTL930X_STAT_PORT_MIB_CNTR,
+ .traffic_enable = rtl930x_traffic_enable,
+ .traffic_disable = rtl930x_traffic_disable,
+ .traffic_get = rtl930x_traffic_get,
+ .traffic_set = rtl930x_traffic_set,
+ .l2_ctrl_0 = RTL930X_L2_CTRL,
+ .l2_ctrl_1 = RTL930X_L2_AGE_CTRL,
+ .l2_port_aging_out = RTL930X_L2_PORT_AGE_CTRL,
+ .set_ageing_time = rtl930x_set_ageing_time,
+ .smi_poll_ctrl = RTL930X_SMI_POLL_CTRL, // TODO: Difference to RTL9300_SMI_PRVTE_POLLING_CTRL
+ .l2_tbl_flush_ctrl = RTL930X_L2_TBL_FLUSH_CTRL,
+ .exec_tbl0_cmd = rtl930x_exec_tbl0_cmd,
+ .exec_tbl1_cmd = rtl930x_exec_tbl1_cmd,
+ .tbl_access_data_0 = rtl930x_tbl_access_data_0,
+ .isr_glb_src = RTL930X_ISR_GLB,
+ .isr_port_link_sts_chg = RTL930X_ISR_PORT_LINK_STS_CHG,
+ .imr_port_link_sts_chg = RTL930X_IMR_PORT_LINK_STS_CHG,
+ .imr_glb = RTL930X_IMR_GLB,
+ .vlan_tables_read = rtl930x_vlan_tables_read,
+ .vlan_set_tagged = rtl930x_vlan_set_tagged,
+ .vlan_set_untagged = rtl930x_vlan_set_untagged,
+ .vlan_profile_dump = rtl930x_vlan_profile_dump,
+ .vlan_profile_setup = rtl930x_vlan_profile_setup,
+ .vlan_fwd_on_inner = rtl930x_vlan_fwd_on_inner,
+ .set_vlan_igr_filter = rtl930x_set_igr_filter,
+ .set_vlan_egr_filter = rtl930x_set_egr_filter,
+ .stp_get = rtl930x_stp_get,
+ .stp_set = rtl930x_stp_set,
+ .mac_force_mode_ctrl = rtl930x_mac_force_mode_ctrl,
+ .mac_port_ctrl = rtl930x_mac_port_ctrl,
+ .l2_port_new_salrn = rtl930x_l2_port_new_salrn,
+ .l2_port_new_sa_fwd = rtl930x_l2_port_new_sa_fwd,
+ .mir_ctrl = RTL930X_MIR_CTRL,
+ .mir_dpm = RTL930X_MIR_DPM_CTRL,
+ .mir_spm = RTL930X_MIR_SPM_CTRL,
+ .mac_link_sts = RTL930X_MAC_LINK_STS,
+ .mac_link_dup_sts = RTL930X_MAC_LINK_DUP_STS,
+ .mac_link_spd_sts = rtl930x_mac_link_spd_sts,
+ .mac_rx_pause_sts = RTL930X_MAC_RX_PAUSE_STS,
+ .mac_tx_pause_sts = RTL930X_MAC_TX_PAUSE_STS,
+ .read_l2_entry_using_hash = rtl930x_read_l2_entry_using_hash,
+ .write_l2_entry_using_hash = rtl930x_write_l2_entry_using_hash,
+ .read_cam = rtl930x_read_cam,
+ .write_cam = rtl930x_write_cam,
+ .vlan_port_keep_tag_set = rtl930x_vlan_port_keep_tag_set,
+ .vlan_port_pvidmode_set = rtl930x_vlan_port_pvidmode_set,
+ .vlan_port_pvid_set = rtl930x_vlan_port_pvid_set,
+ .trk_mbr_ctr = rtl930x_trk_mbr_ctr,
+ .rma_bpdu_fld_pmask = RTL930X_RMA_BPDU_FLD_PMSK,
+ .init_eee = rtl930x_init_eee,
+ .port_eee_set = rtl930x_port_eee_set,
+ .eee_port_ability = rtl930x_eee_port_ability,
+ .l2_hash_seed = rtl930x_l2_hash_seed,
+ .l2_hash_key = rtl930x_l2_hash_key,
+ .read_mcast_pmask = rtl930x_read_mcast_pmask,
+ .write_mcast_pmask = rtl930x_write_mcast_pmask,
+ .pie_init = rtl930x_pie_init,
+ .pie_rule_write = rtl930x_pie_rule_write,
+ .pie_rule_add = rtl930x_pie_rule_add,
+ .pie_rule_rm = rtl930x_pie_rule_rm,
+ .l2_learning_setup = rtl930x_l2_learning_setup,
+ .packet_cntr_read = rtl930x_packet_cntr_read,
+ .packet_cntr_clear = rtl930x_packet_cntr_clear,
+ .route_read = rtl930x_route_read,
+ .route_write = rtl930x_route_write,
+ .host_route_write = rtl930x_host_route_write,
+ .l3_setup = rtl930x_l3_setup,
+ .set_l3_nexthop = rtl930x_set_l3_nexthop,
+ .get_l3_nexthop = rtl930x_get_l3_nexthop,
+ .get_l3_egress_mac = rtl930x_get_l3_egress_mac,
+ .set_l3_egress_mac = rtl930x_set_l3_egress_mac,
+ .find_l3_slot = rtl930x_find_l3_slot,
+ .route_lookup_hw = rtl930x_route_lookup_hw,
+ .get_l3_router_mac = rtl930x_get_l3_router_mac,
+ .set_l3_router_mac = rtl930x_set_l3_router_mac,
+ .set_l3_egress_intf = rtl930x_set_l3_egress_intf,
+ .set_distribution_algorithm = rtl930x_set_distribution_algorithm,
+ .led_init = rtl930x_led_init,
+};
diff --git a/target/linux/realtek/files-5.15/drivers/net/dsa/rtl83xx/rtl931x.c b/target/linux/realtek/files-5.15/drivers/net/dsa/rtl83xx/rtl931x.c
new file mode 100644
index 0000000000..ee8d6c2c73
--- /dev/null
+++ b/target/linux/realtek/files-5.15/drivers/net/dsa/rtl83xx/rtl931x.c
@@ -0,0 +1,1701 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <asm/mach-rtl838x/mach-rtl83xx.h>
+#include "rtl83xx.h"
+
+#define RTL931X_VLAN_PORT_TAG_STS_INTERNAL 0x0
+#define RTL931X_VLAN_PORT_TAG_STS_UNTAG 0x1
+#define RTL931X_VLAN_PORT_TAG_STS_TAGGED 0x2
+#define RTL931X_VLAN_PORT_TAG_STS_PRIORITY_TAGGED 0x3
+
+#define RTL931X_VLAN_PORT_TAG_CTRL_BASE 0x4860
+/* port 0-56 */
+#define RTL931X_VLAN_PORT_TAG_CTRL(port) \
+ RTL931X_VLAN_PORT_TAG_CTRL_BASE + (port << 2)
+#define RTL931X_VLAN_PORT_TAG_EGR_OTAG_STS_MASK GENMASK(13,12)
+#define RTL931X_VLAN_PORT_TAG_EGR_ITAG_STS_MASK GENMASK(11,10)
+#define RTL931X_VLAN_PORT_TAG_EGR_OTAG_KEEP_MASK GENMASK(9,9)
+#define RTL931X_VLAN_PORT_TAG_EGR_ITAG_KEEP_MASK GENMASK(8,8)
+#define RTL931X_VLAN_PORT_TAG_IGR_OTAG_KEEP_MASK GENMASK(7,7)
+#define RTL931X_VLAN_PORT_TAG_IGR_ITAG_KEEP_MASK GENMASK(6,6)
+#define RTL931X_VLAN_PORT_TAG_OTPID_IDX_MASK GENMASK(5,4)
+#define RTL931X_VLAN_PORT_TAG_OTPID_KEEP_MASK GENMASK(3,3)
+#define RTL931X_VLAN_PORT_TAG_ITPID_IDX_MASK GENMASK(2,1)
+#define RTL931X_VLAN_PORT_TAG_ITPID_KEEP_MASK GENMASK(0,0)
+
+extern struct mutex smi_lock;
+extern struct rtl83xx_soc_info soc_info;
+
+/* Definition of the RTL931X-specific template field IDs as used in the PIE */
+enum template_field_id {
+ TEMPLATE_FIELD_SPM0 = 1,
+ TEMPLATE_FIELD_SPM1 = 2,
+ TEMPLATE_FIELD_SPM2 = 3,
+ TEMPLATE_FIELD_SPM3 = 4,
+ TEMPLATE_FIELD_DMAC0 = 9,
+ TEMPLATE_FIELD_DMAC1 = 10,
+ TEMPLATE_FIELD_DMAC2 = 11,
+ TEMPLATE_FIELD_SMAC0 = 12,
+ TEMPLATE_FIELD_SMAC1 = 13,
+ TEMPLATE_FIELD_SMAC2 = 14,
+ TEMPLATE_FIELD_ETHERTYPE = 15,
+ TEMPLATE_FIELD_OTAG = 16,
+ TEMPLATE_FIELD_ITAG = 17,
+ TEMPLATE_FIELD_SIP0 = 18,
+ TEMPLATE_FIELD_SIP1 = 19,
+ TEMPLATE_FIELD_DIP0 = 20,
+ TEMPLATE_FIELD_DIP1 = 21,
+ TEMPLATE_FIELD_IP_TOS_PROTO = 22,
+ TEMPLATE_FIELD_L4_SPORT = 23,
+ TEMPLATE_FIELD_L4_DPORT = 24,
+ TEMPLATE_FIELD_L34_HEADER = 25,
+ TEMPLATE_FIELD_TCP_INFO = 26,
+ TEMPLATE_FIELD_SIP2 = 34,
+ TEMPLATE_FIELD_SIP3 = 35,
+ TEMPLATE_FIELD_SIP4 = 36,
+ TEMPLATE_FIELD_SIP5 = 37,
+ TEMPLATE_FIELD_SIP6 = 38,
+ TEMPLATE_FIELD_SIP7 = 39,
+ TEMPLATE_FIELD_DIP2 = 42,
+ TEMPLATE_FIELD_DIP3 = 43,
+ TEMPLATE_FIELD_DIP4 = 44,
+ TEMPLATE_FIELD_DIP5 = 45,
+ TEMPLATE_FIELD_DIP6 = 46,
+ TEMPLATE_FIELD_DIP7 = 47,
+ TEMPLATE_FIELD_FLOW_LABEL = 49,
+ TEMPLATE_FIELD_DSAP_SSAP = 50,
+ TEMPLATE_FIELD_FWD_VID = 52,
+ TEMPLATE_FIELD_RANGE_CHK = 53,
+ TEMPLATE_FIELD_SLP = 55,
+ TEMPLATE_FIELD_DLP = 56,
+ TEMPLATE_FIELD_META_DATA = 57,
+ TEMPLATE_FIELD_FIRST_MPLS1 = 60,
+ TEMPLATE_FIELD_FIRST_MPLS2 = 61,
+ TEMPLATE_FIELD_DPM3 = 8,
+};
+
+/* The meaning of TEMPLATE_FIELD_VLAN depends on phase and the configuration in
+ * RTL931X_PIE_CTRL. We use always the same definition and map to the inner VLAN tag:
+ */
+#define TEMPLATE_FIELD_VLAN TEMPLATE_FIELD_ITAG
+
+// Number of fixed templates predefined in the RTL9300 SoC
+#define N_FIXED_TEMPLATES 5
+// RTL931x specific predefined templates
+static enum template_field_id fixed_templates[N_FIXED_TEMPLATES][N_FIXED_FIELDS_RTL931X] =
+{
+ {
+ TEMPLATE_FIELD_DMAC0, TEMPLATE_FIELD_DMAC1, TEMPLATE_FIELD_DMAC2,
+ TEMPLATE_FIELD_SMAC0, TEMPLATE_FIELD_SMAC1, TEMPLATE_FIELD_SMAC2,
+ TEMPLATE_FIELD_VLAN, TEMPLATE_FIELD_IP_TOS_PROTO, TEMPLATE_FIELD_DSAP_SSAP,
+ TEMPLATE_FIELD_ETHERTYPE, TEMPLATE_FIELD_SPM0, TEMPLATE_FIELD_SPM1,
+ TEMPLATE_FIELD_SPM2, TEMPLATE_FIELD_SPM3
+ }, {
+ TEMPLATE_FIELD_SIP0, TEMPLATE_FIELD_SIP1, TEMPLATE_FIELD_DIP0,
+ TEMPLATE_FIELD_DIP1, TEMPLATE_FIELD_IP_TOS_PROTO, TEMPLATE_FIELD_TCP_INFO,
+ TEMPLATE_FIELD_L4_SPORT, TEMPLATE_FIELD_L4_DPORT, TEMPLATE_FIELD_VLAN,
+ TEMPLATE_FIELD_RANGE_CHK, TEMPLATE_FIELD_SPM0, TEMPLATE_FIELD_SPM1,
+ TEMPLATE_FIELD_SPM2, TEMPLATE_FIELD_SPM3
+ }, {
+ TEMPLATE_FIELD_DMAC0, TEMPLATE_FIELD_DMAC1, TEMPLATE_FIELD_DMAC2,
+ TEMPLATE_FIELD_VLAN, TEMPLATE_FIELD_ETHERTYPE, TEMPLATE_FIELD_IP_TOS_PROTO,
+ TEMPLATE_FIELD_SIP0, TEMPLATE_FIELD_SIP1, TEMPLATE_FIELD_DIP0,
+ TEMPLATE_FIELD_DIP1, TEMPLATE_FIELD_L4_SPORT, TEMPLATE_FIELD_L4_DPORT,
+ TEMPLATE_FIELD_META_DATA, TEMPLATE_FIELD_SLP
+ }, {
+ TEMPLATE_FIELD_DIP0, TEMPLATE_FIELD_DIP1, TEMPLATE_FIELD_DIP2,
+ TEMPLATE_FIELD_DIP3, TEMPLATE_FIELD_DIP4, TEMPLATE_FIELD_DIP5,
+ TEMPLATE_FIELD_DIP6, TEMPLATE_FIELD_DIP7, TEMPLATE_FIELD_IP_TOS_PROTO,
+ TEMPLATE_FIELD_TCP_INFO, TEMPLATE_FIELD_L4_SPORT, TEMPLATE_FIELD_L4_DPORT,
+ TEMPLATE_FIELD_RANGE_CHK, TEMPLATE_FIELD_SLP
+ }, {
+ TEMPLATE_FIELD_SIP0, TEMPLATE_FIELD_SIP1, TEMPLATE_FIELD_SIP2,
+ TEMPLATE_FIELD_SIP3, TEMPLATE_FIELD_SIP4, TEMPLATE_FIELD_SIP5,
+ TEMPLATE_FIELD_SIP6, TEMPLATE_FIELD_SIP7, TEMPLATE_FIELD_META_DATA,
+ TEMPLATE_FIELD_VLAN, TEMPLATE_FIELD_SPM0, TEMPLATE_FIELD_SPM1,
+ TEMPLATE_FIELD_SPM2, TEMPLATE_FIELD_SPM3
+ },
+};
+
+inline void rtl931x_exec_tbl0_cmd(u32 cmd)
+{
+ sw_w32(cmd, RTL931X_TBL_ACCESS_CTRL_0);
+ do { } while (sw_r32(RTL931X_TBL_ACCESS_CTRL_0) & (1 << 20));
+}
+
+inline void rtl931x_exec_tbl1_cmd(u32 cmd)
+{
+ sw_w32(cmd, RTL931X_TBL_ACCESS_CTRL_1);
+ do { } while (sw_r32(RTL931X_TBL_ACCESS_CTRL_1) & (1 << 17));
+}
+
+inline int rtl931x_tbl_access_data_0(int i)
+{
+ return RTL931X_TBL_ACCESS_DATA_0(i);
+}
+
+void rtl931x_vlan_profile_dump(int index)
+{
+ u64 profile[4];
+
+ if (index < 0 || index > 15)
+ return;
+
+ profile[0] = sw_r32(RTL931X_VLAN_PROFILE_SET(index));
+ profile[1] = (sw_r32(RTL931X_VLAN_PROFILE_SET(index) + 4) & 0x1FFFFFFFULL) << 32
+ | (sw_r32(RTL931X_VLAN_PROFILE_SET(index) + 8) & 0xFFFFFFFF);
+ profile[2] = (sw_r32(RTL931X_VLAN_PROFILE_SET(index) + 16) & 0x1FFFFFFFULL) << 32
+ | (sw_r32(RTL931X_VLAN_PROFILE_SET(index) + 12) & 0xFFFFFFFF);
+ profile[3] = (sw_r32(RTL931X_VLAN_PROFILE_SET(index) + 20) & 0x1FFFFFFFULL) << 32
+ | (sw_r32(RTL931X_VLAN_PROFILE_SET(index) + 24) & 0xFFFFFFFF);
+
+ pr_info("VLAN %d: L2 learning: %d, L2 Unknown MultiCast Field %llx, \
+ IPv4 Unknown MultiCast Field %llx, IPv6 Unknown MultiCast Field: %llx",
+ index, (u32) (profile[0] & (3 << 14)), profile[1], profile[2], profile[3]);
+}
+
+static void rtl931x_stp_get(struct rtl838x_switch_priv *priv, u16 msti, u32 port_state[])
+{
+ int i;
+ u32 cmd = 1 << 20 /* Execute cmd */
+ | 0 << 19 /* Read */
+ | 5 << 15 /* Table type 0b101 */
+ | (msti & 0x3fff);
+ priv->r->exec_tbl0_cmd(cmd);
+
+ for (i = 0; i < 4; i++)
+ port_state[i] = sw_r32(priv->r->tbl_access_data_0(i));
+}
+
+static void rtl931x_stp_set(struct rtl838x_switch_priv *priv, u16 msti, u32 port_state[])
+{
+ int i;
+ u32 cmd = 1 << 20 /* Execute cmd */
+ | 1 << 19 /* Write */
+ | 5 << 15 /* Table type 0b101 */
+ | (msti & 0x3fff);
+ for (i = 0; i < 4; i++)
+ sw_w32(port_state[i], priv->r->tbl_access_data_0(i));
+ priv->r->exec_tbl0_cmd(cmd);
+}
+
+inline static int rtl931x_trk_mbr_ctr(int group)
+{
+ return RTL931X_TRK_MBR_CTRL + (group << 2);
+}
+
+static void rtl931x_vlan_tables_read(u32 vlan, struct rtl838x_vlan_info *info)
+{
+ u32 v, w, x, y;
+ // Read VLAN table (3) via register 0
+ struct table_reg *r = rtl_table_get(RTL9310_TBL_0, 3);
+
+ rtl_table_read(r, vlan);
+ v = sw_r32(rtl_table_data(r, 0));
+ w = sw_r32(rtl_table_data(r, 1));
+ x = sw_r32(rtl_table_data(r, 2));
+ y = sw_r32(rtl_table_data(r, 3));
+ rtl_table_release(r);
+
+ pr_debug("VLAN_READ %d: %08x %08x %08x %08x\n", vlan, v, w, x, y);
+ info->tagged_ports = ((u64) v) << 25 | (w >> 7);
+ info->profile_id = (x >> 16) & 0xf;
+ info->fid = w & 0x7f; // AKA MSTI depending on context
+ info->hash_uc_fid = !!(x & BIT(31));
+ info->hash_mc_fid = !!(x & BIT(30));
+ info->if_id = (x >> 20) & 0x3ff;
+ info->profile_id = (x >> 16) & 0xf;
+ info->multicast_grp_mask = x & 0xffff;
+ if (x & BIT(31))
+ info->l2_tunnel_list_id = y >> 18;
+ else
+ info->l2_tunnel_list_id = -1;
+ pr_debug("%s read tagged %016llx, profile-id %d, uc %d, mc %d, intf-id %d\n", __func__,
+ info->tagged_ports, info->profile_id, info->hash_uc_fid, info->hash_mc_fid,
+ info->if_id);
+
+ // Read UNTAG table via table register 3
+ r = rtl_table_get(RTL9310_TBL_3, 0);
+ rtl_table_read(r, vlan);
+ v = ((u64)sw_r32(rtl_table_data(r, 0))) << 25;
+ v |= sw_r32(rtl_table_data(r, 1)) >> 7;
+ rtl_table_release(r);
+
+ info->untagged_ports = v;
+}
+
+static void rtl931x_vlan_set_tagged(u32 vlan, struct rtl838x_vlan_info *info)
+{
+ u32 v, w, x, y;
+ // Access VLAN table (1) via register 0
+ struct table_reg *r = rtl_table_get(RTL9310_TBL_0, 3);
+
+ v = info->tagged_ports >> 25;
+ w = (info->tagged_ports & 0x1fffff) << 7;
+ w |= info->fid & 0x7f;
+ x = info->hash_uc_fid ? BIT(31) : 0;
+ x |= info->hash_mc_fid ? BIT(30) : 0;
+ x |= info->if_id & 0x3ff << 20;
+ x |= (info->profile_id & 0xf) << 16;
+ x |= info->multicast_grp_mask & 0xffff;
+ if (info->l2_tunnel_list_id >= 0) {
+ y = info->l2_tunnel_list_id << 18;
+ y |= BIT(31);
+ } else {
+ y = 0;
+ }
+
+ sw_w32(v, rtl_table_data(r, 0));
+ sw_w32(w, rtl_table_data(r, 1));
+ sw_w32(x, rtl_table_data(r, 2));
+ sw_w32(y, rtl_table_data(r, 3));
+
+ rtl_table_write(r, vlan);
+ rtl_table_release(r);
+}
+
+static void rtl931x_vlan_set_untagged(u32 vlan, u64 portmask)
+{
+ struct table_reg *r = rtl_table_get(RTL9310_TBL_3, 0);
+
+ rtl839x_set_port_reg_be(portmask << 7, rtl_table_data(r, 0));
+ rtl_table_write(r, vlan);
+ rtl_table_release(r);
+}
+
+static inline int rtl931x_mac_force_mode_ctrl(int p)
+{
+ return RTL931X_MAC_FORCE_MODE_CTRL + (p << 2);
+}
+
+static inline int rtl931x_mac_link_spd_sts(int p)
+{
+ return RTL931X_MAC_LINK_SPD_STS + (((p >> 3) << 2));
+}
+
+static inline int rtl931x_mac_port_ctrl(int p)
+{
+ return RTL931X_MAC_L2_PORT_CTRL + (p << 7);
+}
+
+static inline int rtl931x_l2_port_new_salrn(int p)
+{
+ return RTL931X_L2_PORT_NEW_SALRN(p);
+}
+
+static inline int rtl931x_l2_port_new_sa_fwd(int p)
+{
+ return RTL931X_L2_PORT_NEW_SA_FWD(p);
+}
+
+irqreturn_t rtl931x_switch_irq(int irq, void *dev_id)
+{
+ struct dsa_switch *ds = dev_id;
+ u32 status = sw_r32(RTL931X_ISR_GLB_SRC);
+ u64 ports = rtl839x_get_port_reg_le(RTL931X_ISR_PORT_LINK_STS_CHG);
+ u64 link;
+ int i;
+
+ /* Clear status */
+ rtl839x_set_port_reg_le(ports, RTL931X_ISR_PORT_LINK_STS_CHG);
+ pr_debug("RTL931X Link change: status: %x, ports %016llx\n", status, ports);
+
+ link = rtl839x_get_port_reg_le(RTL931X_MAC_LINK_STS);
+ // Must re-read this to get correct status
+ link = rtl839x_get_port_reg_le(RTL931X_MAC_LINK_STS);
+ pr_debug("RTL931X Link change: status: %x, link status %016llx\n", status, link);
+
+ for (i = 0; i < 56; i++) {
+ if (ports & BIT_ULL(i)) {
+ if (link & BIT_ULL(i)) {
+ pr_info("%s port %d up\n", __func__, i);
+ dsa_port_phylink_mac_change(ds, i, true);
+ } else {
+ pr_info("%s port %d down\n", __func__, i);
+ dsa_port_phylink_mac_change(ds, i, false);
+ }
+ }
+ }
+ return IRQ_HANDLED;
+}
+
+int rtl931x_write_phy(u32 port, u32 page, u32 reg, u32 val)
+{
+ u32 v;
+ int err = 0;
+
+ val &= 0xffff;
+ if (port > 63 || page > 4095 || reg > 31)
+ return -ENOTSUPP;
+
+ mutex_lock(&smi_lock);
+ pr_debug("%s: writing to phy %d %d %d %d\n", __func__, port, page, reg, val);
+ /* Clear both port registers */
+ sw_w32(0, RTL931X_SMI_INDRT_ACCESS_CTRL_2);
+ sw_w32(0, RTL931X_SMI_INDRT_ACCESS_CTRL_2 + 4);
+ sw_w32_mask(0, BIT(port % 32), RTL931X_SMI_INDRT_ACCESS_CTRL_2 + (port / 32) * 4);
+
+ sw_w32_mask(0xffff, val, RTL931X_SMI_INDRT_ACCESS_CTRL_3);
+
+ v = reg << 6 | page << 11 ;
+ sw_w32(v, RTL931X_SMI_INDRT_ACCESS_CTRL_0);
+
+ sw_w32(0x1ff, RTL931X_SMI_INDRT_ACCESS_CTRL_1);
+
+ v |= BIT(4) | 1; /* Write operation and execute */
+ sw_w32(v, RTL931X_SMI_INDRT_ACCESS_CTRL_0);
+
+ do {
+ } while (sw_r32(RTL931X_SMI_INDRT_ACCESS_CTRL_0) & 0x1);
+
+ if (sw_r32(RTL931X_SMI_INDRT_ACCESS_CTRL_0) & 0x2)
+ err = -EIO;
+
+ mutex_unlock(&smi_lock);
+ return err;
+}
+
+int rtl931x_read_phy(u32 port, u32 page, u32 reg, u32 *val)
+{
+ u32 v;
+
+ if (port > 63 || page > 4095 || reg > 31)
+ return -ENOTSUPP;
+
+ mutex_lock(&smi_lock);
+
+ sw_w32(port << 5, RTL931X_SMI_INDRT_ACCESS_BC_PHYID_CTRL);
+
+ v = reg << 6 | page << 11 | 1;
+ sw_w32(v, RTL931X_SMI_INDRT_ACCESS_CTRL_0);
+
+ do {
+ } while (sw_r32(RTL931X_SMI_INDRT_ACCESS_CTRL_0) & 0x1);
+
+ v = sw_r32(RTL931X_SMI_INDRT_ACCESS_CTRL_0);
+ *val = sw_r32(RTL931X_SMI_INDRT_ACCESS_CTRL_3);
+ *val = (*val & 0xffff0000) >> 16;
+
+ pr_debug("%s: port %d, page: %d, reg: %x, val: %x, v: %08x\n",
+ __func__, port, page, reg, *val, v);
+
+ mutex_unlock(&smi_lock);
+ return 0;
+}
+
+/*
+ * Read an mmd register of the PHY
+ */
+int rtl931x_read_mmd_phy(u32 port, u32 devnum, u32 regnum, u32 *val)
+{
+ int err = 0;
+ u32 v;
+ /* Select PHY register type
+ * If select 1G/10G MMD register type, registers EXT_PAGE, MAIN_PAGE and REG settings are don’t care.
+ * 0x0 Normal register (Clause 22)
+ * 0x1: 1G MMD register (MMD via Clause 22 registers 13 and 14)
+ * 0x2: 10G MMD register (MMD via Clause 45)
+ */
+ int type = (regnum & MII_ADDR_C45)?2:1;
+
+ mutex_lock(&smi_lock);
+
+ // Set PHY to access via port-number
+ sw_w32(port << 5, RTL931X_SMI_INDRT_ACCESS_BC_PHYID_CTRL);
+
+ // Set MMD device number and register to write to
+ sw_w32(devnum << 16 | mdiobus_c45_regad(regnum), RTL931X_SMI_INDRT_ACCESS_MMD_CTRL);
+
+ v = type << 2 | BIT(0); // MMD-access-type | EXEC
+ sw_w32(v, RTL931X_SMI_INDRT_ACCESS_CTRL_0);
+
+ do {
+ v = sw_r32(RTL931X_SMI_INDRT_ACCESS_CTRL_0);
+ } while (v & BIT(0));
+
+ // Check for error condition
+ if (v & BIT(1))
+ err = -EIO;
+
+ *val = sw_r32(RTL931X_SMI_INDRT_ACCESS_CTRL_3) >> 16;
+
+ pr_debug("%s: port %d, dev: %x, regnum: %x, val: %x (err %d)\n", __func__,
+ port, devnum, mdiobus_c45_regad(regnum), *val, err);
+
+ mutex_unlock(&smi_lock);
+
+ return err;
+}
+
+/*
+ * Write to an mmd register of the PHY
+ */
+int rtl931x_write_mmd_phy(u32 port, u32 devnum, u32 regnum, u32 val)
+{
+ int err = 0;
+ u32 v;
+ int type = (regnum & MII_ADDR_C45)?2:1;
+ u64 pm;
+
+ mutex_lock(&smi_lock);
+
+ // Set PHY to access via port-mask
+ pm = (u64)1 << port;
+ sw_w32((u32)pm, RTL931X_SMI_INDRT_ACCESS_CTRL_2);
+ sw_w32((u32)(pm >> 32), RTL931X_SMI_INDRT_ACCESS_CTRL_2 + 4);
+
+ // Set data to write
+ sw_w32_mask(0xffff, val, RTL931X_SMI_INDRT_ACCESS_CTRL_3);
+
+ // Set MMD device number and register to write to
+ sw_w32(devnum << 16 | mdiobus_c45_regad(regnum), RTL931X_SMI_INDRT_ACCESS_MMD_CTRL);
+
+ v = BIT(4) | type << 2 | BIT(0); // WRITE | MMD-access-type | EXEC
+ sw_w32(v, RTL931X_SMI_INDRT_ACCESS_CTRL_0);
+
+ do {
+ v = sw_r32(RTL931X_SMI_INDRT_ACCESS_CTRL_0);
+ } while (v & BIT(0));
+
+ pr_debug("%s: port %d, dev: %x, regnum: %x, val: %x (err %d)\n", __func__,
+ port, devnum, mdiobus_c45_regad(regnum), val, err);
+ mutex_unlock(&smi_lock);
+ return err;
+}
+
+void rtl931x_print_matrix(void)
+{
+ volatile u64 *ptr = RTL838X_SW_BASE + RTL839X_PORT_ISO_CTRL(0);
+ int i;
+
+ for (i = 0; i < 52; i += 4)
+ pr_info("> %16llx %16llx %16llx %16llx\n",
+ ptr[i + 0], ptr[i + 1], ptr[i + 2], ptr[i + 3]);
+ pr_info("CPU_PORT> %16llx\n", ptr[52]);
+}
+
+void rtl931x_set_receive_management_action(int port, rma_ctrl_t type, action_type_t action)
+{
+ u32 value = 0;
+
+ /* hack for value mapping */
+ if (type == GRATARP && action == COPY2CPU)
+ action = TRAP2MASTERCPU;
+
+ switch(action) {
+ case FORWARD:
+ value = 0;
+ break;
+ case DROP:
+ value = 1;
+ break;
+ case TRAP2CPU:
+ value = 2;
+ break;
+ case TRAP2MASTERCPU:
+ value = 3;
+ break;
+ case FLOODALL:
+ value = 4;
+ break;
+ default:
+ break;
+ }
+
+ switch(type) {
+ case BPDU:
+ sw_w32_mask(7 << ((port % 10) * 3), value << ((port % 10) * 3), RTL931X_RMA_BPDU_CTRL + ((port / 10) << 2));
+ break;
+ case PTP:
+ //udp
+ sw_w32_mask(3 << 2, value << 2, RTL931X_RMA_PTP_CTRL + (port << 2));
+ //eth2
+ sw_w32_mask(3, value, RTL931X_RMA_PTP_CTRL + (port << 2));
+ break;
+ case PTP_UDP:
+ sw_w32_mask(3 << 2, value << 2, RTL931X_RMA_PTP_CTRL + (port << 2));
+ break;
+ case PTP_ETH2:
+ sw_w32_mask(3, value, RTL931X_RMA_PTP_CTRL + (port << 2));
+ break;
+ case LLTP:
+ sw_w32_mask(7 << ((port % 10) * 3), value << ((port % 10) * 3), RTL931X_RMA_LLTP_CTRL + ((port / 10) << 2));
+ break;
+ case EAPOL:
+ sw_w32_mask(7 << ((port % 10) * 3), value << ((port % 10) * 3), RTL931X_RMA_EAPOL_CTRL + ((port / 10) << 2));
+ break;
+ case GRATARP:
+ sw_w32_mask(3 << ((port & 0xf) << 1), value << ((port & 0xf) << 1), RTL931X_TRAP_ARP_GRAT_PORT_ACT + ((port >> 4) << 2));
+ break;
+ }
+}
+
+u64 rtl931x_traffic_get(int source)
+{
+ u32 v;
+ struct table_reg *r = rtl_table_get(RTL9310_TBL_0, 6);
+
+ rtl_table_read(r, source);
+ v = sw_r32(rtl_table_data(r, 0));
+ rtl_table_release(r);
+ return v >> 3;
+}
+
+/*
+ * Enable traffic between a source port and a destination port matrix
+ */
+void rtl931x_traffic_set(int source, u64 dest_matrix)
+{
+ struct table_reg *r = rtl_table_get(RTL9310_TBL_0, 6);
+
+ sw_w32((dest_matrix << 3), rtl_table_data(r, 0));
+ rtl_table_write(r, source);
+ rtl_table_release(r);
+}
+
+void rtl931x_traffic_enable(int source, int dest)
+{
+ struct table_reg *r = rtl_table_get(RTL9310_TBL_0, 6);
+ rtl_table_read(r, source);
+ sw_w32_mask(0, BIT(dest + 3), rtl_table_data(r, 0));
+ rtl_table_write(r, source);
+ rtl_table_release(r);
+}
+
+void rtl931x_traffic_disable(int source, int dest)
+{
+ struct table_reg *r = rtl_table_get(RTL9310_TBL_0, 6);
+ rtl_table_read(r, source);
+ sw_w32_mask(BIT(dest + 3), 0, rtl_table_data(r, 0));
+ rtl_table_write(r, source);
+ rtl_table_release(r);
+}
+
+static u64 rtl931x_l2_hash_seed(u64 mac, u32 vid)
+{
+ u64 v = vid;
+
+ v <<= 48;
+ v |= mac;
+
+ return v;
+}
+
+/*
+ * Calculate both the block 0 and the block 1 hash by applyingthe same hash
+ * algorithm as the one used currently by the ASIC to the seed, and return
+ * both hashes in the lower and higher word of the return value since only 12 bit of
+ * the hash are significant.
+ */
+static u32 rtl931x_l2_hash_key(struct rtl838x_switch_priv *priv, u64 seed)
+{
+ u32 h, h0, h1, h2, h3, h4, k0, k1;
+
+ h0 = seed & 0xfff;
+ h1 = (seed >> 12) & 0xfff;
+ h2 = (seed >> 24) & 0xfff;
+ h3 = (seed >> 36) & 0xfff;
+ h4 = (seed >> 48) & 0xfff;
+ h4 = ((h4 & 0x7) << 9) | ((h4 >> 3) & 0x1ff);
+ k0 = h0 ^ h1 ^ h2 ^ h3 ^ h4;
+
+ h0 = seed & 0xfff;
+ h0 = ((h0 & 0x1ff) << 3) | ((h0 >> 9) & 0x7);
+ h1 = (seed >> 12) & 0xfff;
+ h1 = ((h1 & 0x3f) << 6) | ((h1 >> 6) & 0x3f);
+ h2 = (seed >> 24) & 0xfff;
+ h3 = (seed >> 36) & 0xfff;
+ h3 = ((h3 & 0x3f) << 6) | ((h3 >> 6) & 0x3f);
+ h4 = (seed >> 48) & 0xfff;
+ k1 = h0 ^ h1 ^ h2 ^ h3 ^ h4;
+
+ // Algorithm choice for block 0
+ if (sw_r32(RTL931X_L2_CTRL) & BIT(0))
+ h = k1;
+ else
+ h = k0;
+
+ /* Algorithm choice for block 1
+ * Since k0 and k1 are < 4096, adding 4096 will offset the hash into the second
+ * half of hash-space
+ * 4096 is in fact the hash-table size 32768 divided by 4 hashes per bucket
+ * divided by 2 to divide the hash space in 2
+ */
+ if (sw_r32(RTL931X_L2_CTRL) & BIT(1))
+ h |= (k1 + 4096) << 16;
+ else
+ h |= (k0 + 4096) << 16;
+
+ return h;
+}
+
+/*
+ * Fills an L2 entry structure from the SoC registers
+ */
+static void rtl931x_fill_l2_entry(u32 r[], struct rtl838x_l2_entry *e)
+{
+ pr_debug("In %s valid?\n", __func__);
+ e->valid = !!(r[0] & BIT(31));
+ if (!e->valid)
+ return;
+
+ pr_debug("%s: entry valid, raw: %08x %08x %08x %08x\n", __func__, r[0], r[1], r[2], r[3]);
+ e->is_ip_mc = false;
+ e->is_ipv6_mc = false;
+
+ e->mac[0] = r[0] >> 8;
+ e->mac[1] = r[0];
+ e->mac[2] = r[1] >> 24;
+ e->mac[3] = r[1] >> 16;
+ e->mac[4] = r[1] >> 8;
+ e->mac[5] = r[1];
+
+ e->is_open_flow = !!(r[0] & BIT(30));
+ e->is_pe_forward = !!(r[0] & BIT(29));
+ e->next_hop = !!(r[2] & BIT(30));
+ e->rvid = (r[0] >> 16) & 0xfff;
+
+ /* Is it a unicast entry? check multicast bit */
+ if (!(e->mac[0] & 1)) {
+ e->type = L2_UNICAST;
+ e->is_l2_tunnel = !!(r[2] & BIT(31));
+ e->is_static = !!(r[2] & BIT(13));
+ e->port = (r[2] >> 19) & 0x3ff;
+ // Check for trunk port
+ if (r[2] & BIT(29)) {
+ e->is_trunk = true;
+ e->stack_dev = (e->port >> 9) & 1;
+ e->trunk = e->port & 0x3f;
+ } else {
+ e->is_trunk = false;
+ e->stack_dev = (e->port >> 6) & 0xf;
+ e->port = e->port & 0x3f;
+ }
+
+ e->block_da = !!(r[2] & BIT(14));
+ e->block_sa = !!(r[2] & BIT(15));
+ e->suspended = !!(r[2] & BIT(12));
+ e->age = (r[2] >> 16) & 3;
+
+ // the UC_VID field in hardware is used for the VID or for the route id
+ if (e->next_hop) {
+ e->nh_route_id = r[2] & 0x7ff;
+ e->vid = 0;
+ } else {
+ e->vid = r[2] & 0xfff;
+ e->nh_route_id = 0;
+ }
+ if (e->is_l2_tunnel)
+ e->l2_tunnel_id = ((r[2] & 0xff) << 4) | (r[3] >> 28);
+ // TODO: Implement VLAN conversion
+ } else {
+ e->type = L2_MULTICAST;
+ e->is_local_forward = !!(r[2] & BIT(31));
+ e->is_remote_forward = !!(r[2] & BIT(17));
+ e->mc_portmask_index = (r[2] >> 18) & 0xfff;
+ e->l2_tunnel_list_id = (r[2] >> 4) & 0x1fff;
+ }
+}
+
+/*
+ * Fills the 3 SoC table registers r[] with the information of in the rtl838x_l2_entry
+ */
+static void rtl931x_fill_l2_row(u32 r[], struct rtl838x_l2_entry *e)
+{
+ u32 port;
+
+ if (!e->valid) {
+ r[0] = r[1] = r[2] = 0;
+ return;
+ }
+
+ r[2] = BIT(31); // Set valid bit
+
+ r[0] = ((u32)e->mac[0]) << 24 | ((u32)e->mac[1]) << 16
+ | ((u32)e->mac[2]) << 8 | ((u32)e->mac[3]);
+ r[1] = ((u32)e->mac[4]) << 24 | ((u32)e->mac[5]) << 16;
+
+ r[2] |= e->next_hop ? BIT(12) : 0;
+
+ if (e->type == L2_UNICAST) {
+ r[2] |= e->is_static ? BIT(14) : 0;
+ r[1] |= e->rvid & 0xfff;
+ r[2] |= (e->port & 0x3ff) << 20;
+ if (e->is_trunk) {
+ r[2] |= BIT(30);
+ port = e->stack_dev << 9 | (e->port & 0x3f);
+ } else {
+ port = (e->stack_dev & 0xf) << 6;
+ port |= e->port & 0x3f;
+ }
+ r[2] |= port << 20;
+ r[2] |= e->block_da ? BIT(15) : 0;
+ r[2] |= e->block_sa ? BIT(17) : 0;
+ r[2] |= e->suspended ? BIT(13) : 0;
+ r[2] |= (e->age & 0x3) << 17;
+ // the UC_VID field in hardware is used for the VID or for the route id
+ if (e->next_hop)
+ r[2] |= e->nh_route_id & 0x7ff;
+ else
+ r[2] |= e->vid & 0xfff;
+ } else { // L2_MULTICAST
+ r[2] |= (e->mc_portmask_index & 0x3ff) << 16;
+ r[2] |= e->mc_mac_index & 0x7ff;
+ }
+}
+
+/*
+ * Read an L2 UC or MC entry out of a hash bucket of the L2 forwarding table
+ * hash is the id of the bucket and pos is the position of the entry in that bucket
+ * The data read from the SoC is filled into rtl838x_l2_entry
+ */
+static u64 rtl931x_read_l2_entry_using_hash(u32 hash, u32 pos, struct rtl838x_l2_entry *e)
+{
+ u32 r[4];
+ struct table_reg *q = rtl_table_get(RTL9310_TBL_0, 0);
+ u32 idx;
+ int i;
+ u64 mac;
+ u64 seed;
+
+ pr_debug("%s: hash %08x, pos: %d\n", __func__, hash, pos);
+
+ /* On the RTL93xx, 2 different hash algorithms are used making it a total of
+ * 8 buckets that need to be searched, 4 for each hash-half
+ * Use second hash space when bucket is between 4 and 8 */
+ if (pos >= 4) {
+ pos -= 4;
+ hash >>= 16;
+ } else {
+ hash &= 0xffff;
+ }
+
+ idx = (0 << 14) | (hash << 2) | pos; // Search SRAM, with hash and at pos in bucket
+ pr_debug("%s: NOW hash %08x, pos: %d\n", __func__, hash, pos);
+
+ rtl_table_read(q, idx);
+ for (i = 0; i < 4; i++)
+ r[i] = sw_r32(rtl_table_data(q, i));
+
+ rtl_table_release(q);
+
+ rtl931x_fill_l2_entry(r, e);
+
+ pr_debug("%s: valid: %d, nh: %d\n", __func__, e->valid, e->next_hop);
+ if (!e->valid)
+ return 0;
+
+ mac = ((u64)e->mac[0]) << 40 | ((u64)e->mac[1]) << 32 | ((u64)e->mac[2]) << 24
+ | ((u64)e->mac[3]) << 16 | ((u64)e->mac[4]) << 8 | ((u64)e->mac[5]);
+
+ seed = rtl931x_l2_hash_seed(mac, e->rvid);
+ pr_debug("%s: mac %016llx, seed %016llx\n", __func__, mac, seed);
+ // return vid with concatenated mac as unique id
+ return seed;
+}
+
+static u64 rtl931x_read_cam(int idx, struct rtl838x_l2_entry *e)
+{
+ return 0;
+}
+
+static void rtl931x_write_cam(int idx, struct rtl838x_l2_entry *e)
+{
+}
+
+static void rtl931x_write_l2_entry_using_hash(u32 hash, u32 pos, struct rtl838x_l2_entry *e)
+{
+ u32 r[4];
+ struct table_reg *q = rtl_table_get(RTL9310_TBL_0, 0);
+ u32 idx = (0 << 14) | (hash << 2) | pos; // Access SRAM, with hash and at pos in bucket
+ int i;
+
+ pr_info("%s: hash %d, pos %d\n", __func__, hash, pos);
+ pr_info("%s: index %d -> mac %02x:%02x:%02x:%02x:%02x:%02x\n", __func__, idx,
+ e->mac[0], e->mac[1], e->mac[2], e->mac[3],e->mac[4],e->mac[5]);
+
+ rtl931x_fill_l2_row(r, e);
+ pr_info("%s: %d: %08x %08x %08x\n", __func__, idx, r[0], r[1], r[2]);
+
+ for (i= 0; i < 4; i++)
+ sw_w32(r[i], rtl_table_data(q, i));
+
+ rtl_table_write(q, idx);
+ rtl_table_release(q);
+
+}
+
+static void rtl931x_vlan_fwd_on_inner(int port, bool is_set)
+{
+ // Always set all tag modes to fwd based on either inner or outer tag
+ if (is_set)
+ sw_w32_mask(0, 0xf, RTL931X_VLAN_PORT_FWD + (port << 2));
+ else
+ sw_w32_mask(0xf, 0, RTL931X_VLAN_PORT_FWD + (port << 2));
+}
+
+static void rtl931x_vlan_profile_setup(int profile)
+{
+ u32 p[7];
+ int i;
+
+ pr_info("In %s\n", __func__);
+
+ if (profile > 15)
+ return;
+
+ p[0] = sw_r32(RTL931X_VLAN_PROFILE_SET(profile));
+
+ // Enable routing of Ipv4/6 Unicast and IPv4/6 Multicast traffic
+ //p[0] |= BIT(17) | BIT(16) | BIT(13) | BIT(12);
+ p[0] |= 0x3 << 11; // COPY2CPU
+
+ p[1] = 0x1FFFFFF; // L2 unknwon MC flooding portmask all ports, including the CPU-port
+ p[2] = 0xFFFFFFFF;
+ p[3] = 0x1FFFFFF; // IPv4 unknwon MC flooding portmask
+ p[4] = 0xFFFFFFFF;
+ p[5] = 0x1FFFFFF; // IPv6 unknwon MC flooding portmask
+ p[6] = 0xFFFFFFFF;
+
+ for (i = 0; i < 7; i++)
+ sw_w32(p[i], RTL931X_VLAN_PROFILE_SET(profile) + i * 4);
+ pr_info("Leaving %s\n", __func__);
+}
+
+static void rtl931x_l2_learning_setup(void)
+{
+ // Portmask for flooding broadcast traffic
+ rtl839x_set_port_reg_be(0x1FFFFFFFFFFFFFF, RTL931X_L2_BC_FLD_PMSK);
+
+ // Portmask for flooding unicast traffic with unknown destination
+ rtl839x_set_port_reg_be(0x1FFFFFFFFFFFFFF, RTL931X_L2_UNKN_UC_FLD_PMSK);
+
+ // Limit learning to maximum: 64k entries, after that just flood (bits 0-2)
+ sw_w32((0xffff << 3) | FORWARD, RTL931X_L2_LRN_CONSTRT_CTRL);
+}
+
+static u64 rtl931x_read_mcast_pmask(int idx)
+{
+ u64 portmask;
+ // Read MC_PMSK (2) via register RTL9310_TBL_0
+ struct table_reg *q = rtl_table_get(RTL9310_TBL_0, 2);
+
+ rtl_table_read(q, idx);
+ portmask = sw_r32(rtl_table_data(q, 0));
+ portmask <<= 32;
+ portmask |= sw_r32(rtl_table_data(q, 1));
+ portmask >>= 7;
+ rtl_table_release(q);
+
+ pr_debug("%s: Index idx %d has portmask %016llx\n", __func__, idx, portmask);
+ return portmask;
+}
+
+static void rtl931x_write_mcast_pmask(int idx, u64 portmask)
+{
+ u64 pm = portmask;
+
+ // Access MC_PMSK (2) via register RTL9310_TBL_0
+ struct table_reg *q = rtl_table_get(RTL9310_TBL_0, 2);
+
+ pr_debug("%s: Index idx %d has portmask %016llx\n", __func__, idx, pm);
+ pm <<= 7;
+ sw_w32((u32)(pm >> 32), rtl_table_data(q, 0));
+ sw_w32((u32)pm, rtl_table_data(q, 1));
+ rtl_table_write(q, idx);
+ rtl_table_release(q);
+}
+
+
+static int rtl931x_set_ageing_time(unsigned long msec)
+{
+ int t = sw_r32(RTL931X_L2_AGE_CTRL);
+
+ t &= 0x1FFFFF;
+ t = (t * 8) / 10;
+ pr_debug("L2 AGING time: %d sec\n", t);
+
+ t = (msec / 100 + 7) / 8;
+ t = t > 0x1FFFFF ? 0x1FFFFF : t;
+ sw_w32_mask(0x1FFFFF, t, RTL931X_L2_AGE_CTRL);
+ pr_debug("Dynamic aging for ports: %x\n", sw_r32(RTL931X_L2_PORT_AGE_CTRL));
+ return 0;
+}
+void rtl931x_sw_init(struct rtl838x_switch_priv *priv)
+{
+// rtl931x_sds_init(priv);
+}
+
+static void rtl931x_pie_lookup_enable(struct rtl838x_switch_priv *priv, int index)
+{
+ int block = index / PIE_BLOCK_SIZE;
+
+ sw_w32_mask(0, BIT(block), RTL931X_PIE_BLK_LOOKUP_CTRL);
+}
+
+/*
+ * Fills the data in the intermediate representation in the pie_rule structure
+ * into a data field for a given template field field_type
+ * TODO: This function looks very similar to the function of the rtl9300, but
+ * since it uses the physical template_field_id, which are different for each
+ * SoC and there are other field types, it is actually not. If we would also use
+ * an intermediate representation for a field type, we would could have one
+ * pie_data_fill function for all SoCs, provided we have also for each SoC a
+ * function to map between physical and intermediate field type
+ */
+int rtl931x_pie_data_fill(enum template_field_id field_type, struct pie_rule *pr, u16 *data, u16 *data_m)
+{
+ *data = *data_m = 0;
+
+ switch (field_type) {
+ case TEMPLATE_FIELD_SPM0:
+ *data = pr->spm;
+ *data_m = pr->spm_m;
+ break;
+ case TEMPLATE_FIELD_SPM1:
+ *data = pr->spm >> 16;
+ *data_m = pr->spm_m >> 16;
+ break;
+ case TEMPLATE_FIELD_OTAG:
+ *data = pr->otag;
+ *data_m = pr->otag_m;
+ break;
+ case TEMPLATE_FIELD_SMAC0:
+ *data = pr->smac[4];
+ *data = (*data << 8) | pr->smac[5];
+ *data_m = pr->smac_m[4];
+ *data_m = (*data_m << 8) | pr->smac_m[5];
+ break;
+ case TEMPLATE_FIELD_SMAC1:
+ *data = pr->smac[2];
+ *data = (*data << 8) | pr->smac[3];
+ *data_m = pr->smac_m[2];
+ *data_m = (*data_m << 8) | pr->smac_m[3];
+ break;
+ case TEMPLATE_FIELD_SMAC2:
+ *data = pr->smac[0];
+ *data = (*data << 8) | pr->smac[1];
+ *data_m = pr->smac_m[0];
+ *data_m = (*data_m << 8) | pr->smac_m[1];
+ break;
+ case TEMPLATE_FIELD_DMAC0:
+ *data = pr->dmac[4];
+ *data = (*data << 8) | pr->dmac[5];
+ *data_m = pr->dmac_m[4];
+ *data_m = (*data_m << 8) | pr->dmac_m[5];
+ break;
+ case TEMPLATE_FIELD_DMAC1:
+ *data = pr->dmac[2];
+ *data = (*data << 8) | pr->dmac[3];
+ *data_m = pr->dmac_m[2];
+ *data_m = (*data_m << 8) | pr->dmac_m[3];
+ break;
+ case TEMPLATE_FIELD_DMAC2:
+ *data = pr->dmac[0];
+ *data = (*data << 8) | pr->dmac[1];
+ *data_m = pr->dmac_m[0];
+ *data_m = (*data_m << 8) | pr->dmac_m[1];
+ break;
+ case TEMPLATE_FIELD_ETHERTYPE:
+ *data = pr->ethertype;
+ *data_m = pr->ethertype_m;
+ break;
+ case TEMPLATE_FIELD_ITAG:
+ *data = pr->itag;
+ *data_m = pr->itag_m;
+ break;
+ case TEMPLATE_FIELD_SIP0:
+ if (pr->is_ipv6) {
+ *data = pr->sip6.s6_addr16[7];
+ *data_m = pr->sip6_m.s6_addr16[7];
+ } else {
+ *data = pr->sip;
+ *data_m = pr->sip_m;
+ }
+ break;
+ case TEMPLATE_FIELD_SIP1:
+ if (pr->is_ipv6) {
+ *data = pr->sip6.s6_addr16[6];
+ *data_m = pr->sip6_m.s6_addr16[6];
+ } else {
+ *data = pr->sip >> 16;
+ *data_m = pr->sip_m >> 16;
+ }
+ break;
+ case TEMPLATE_FIELD_SIP2:
+ case TEMPLATE_FIELD_SIP3:
+ case TEMPLATE_FIELD_SIP4:
+ case TEMPLATE_FIELD_SIP5:
+ case TEMPLATE_FIELD_SIP6:
+ case TEMPLATE_FIELD_SIP7:
+ *data = pr->sip6.s6_addr16[5 - (field_type - TEMPLATE_FIELD_SIP2)];
+ *data_m = pr->sip6_m.s6_addr16[5 - (field_type - TEMPLATE_FIELD_SIP2)];
+ break;
+
+ case TEMPLATE_FIELD_DIP0:
+ if (pr->is_ipv6) {
+ *data = pr->dip6.s6_addr16[7];
+ *data_m = pr->dip6_m.s6_addr16[7];
+ } else {
+ *data = pr->dip;
+ *data_m = pr->dip_m;
+ }
+ break;
+ case TEMPLATE_FIELD_DIP1:
+ if (pr->is_ipv6) {
+ *data = pr->dip6.s6_addr16[6];
+ *data_m = pr->dip6_m.s6_addr16[6];
+ } else {
+ *data = pr->dip >> 16;
+ *data_m = pr->dip_m >> 16;
+ }
+ break;
+
+ case TEMPLATE_FIELD_DIP2:
+ case TEMPLATE_FIELD_DIP3:
+ case TEMPLATE_FIELD_DIP4:
+ case TEMPLATE_FIELD_DIP5:
+ case TEMPLATE_FIELD_DIP6:
+ case TEMPLATE_FIELD_DIP7:
+ *data = pr->dip6.s6_addr16[5 - (field_type - TEMPLATE_FIELD_DIP2)];
+ *data_m = pr->dip6_m.s6_addr16[5 - (field_type - TEMPLATE_FIELD_DIP2)];
+ break;
+
+ case TEMPLATE_FIELD_IP_TOS_PROTO:
+ *data = pr->tos_proto;
+ *data_m = pr->tos_proto_m;
+ break;
+ case TEMPLATE_FIELD_L4_SPORT:
+ *data = pr->sport;
+ *data_m = pr->sport_m;
+ break;
+ case TEMPLATE_FIELD_L4_DPORT:
+ *data = pr->dport;
+ *data_m = pr->dport_m;
+ break;
+ case TEMPLATE_FIELD_DSAP_SSAP:
+ *data = pr->dsap_ssap;
+ *data_m = pr->dsap_ssap_m;
+ break;
+ case TEMPLATE_FIELD_TCP_INFO:
+ *data = pr->tcp_info;
+ *data_m = pr->tcp_info_m;
+ break;
+ case TEMPLATE_FIELD_RANGE_CHK:
+ pr_info("TEMPLATE_FIELD_RANGE_CHK: not configured\n");
+ break;
+ default:
+ pr_info("%s: unknown field %d\n", __func__, field_type);
+ return -1;
+ }
+
+ return 0;
+}
+
+/*
+ * Reads the intermediate representation of the templated match-fields of the
+ * PIE rule in the pie_rule structure and fills in the raw data fields in the
+ * raw register space r[].
+ * The register space configuration size is identical for the RTL8380/90 and RTL9300,
+ * however the RTL931X has 2 more registers / fields and the physical field-ids are different
+ * on all SoCs
+ * On the RTL9300 the mask fields are not word-aligend!
+ */
+static void rtl931x_write_pie_templated(u32 r[], struct pie_rule *pr, enum template_field_id t[])
+{
+ int i;
+ u16 data, data_m;
+
+ for (i = 0; i < N_FIXED_FIELDS; i++) {
+ rtl931x_pie_data_fill(t[i], pr, &data, &data_m);
+
+ // On the RTL9300, the mask fields are not word aligned!
+ if (!(i % 2)) {
+ r[5 - i / 2] = data;
+ r[12 - i / 2] |= ((u32)data_m << 8);
+ } else {
+ r[5 - i / 2] |= ((u32)data) << 16;
+ r[12 - i / 2] |= ((u32)data_m) << 24;
+ r[11 - i / 2] |= ((u32)data_m) >> 8;
+ }
+ }
+}
+
+static void rtl931x_read_pie_fixed_fields(u32 r[], struct pie_rule *pr)
+{
+ pr->mgnt_vlan = r[7] & BIT(31);
+ if (pr->phase == PHASE_IACL)
+ pr->dmac_hit_sw = r[7] & BIT(30);
+ else // TODO: EACL/VACL phase handling
+ pr->content_too_deep = r[7] & BIT(30);
+ pr->not_first_frag = r[7] & BIT(29);
+ pr->frame_type_l4 = (r[7] >> 26) & 7;
+ pr->frame_type = (r[7] >> 24) & 3;
+ pr->otag_fmt = (r[7] >> 23) & 1;
+ pr->itag_fmt = (r[7] >> 22) & 1;
+ pr->otag_exist = (r[7] >> 21) & 1;
+ pr->itag_exist = (r[7] >> 20) & 1;
+ pr->frame_type_l2 = (r[7] >> 18) & 3;
+ pr->igr_normal_port = (r[7] >> 17) & 1;
+ pr->tid = (r[7] >> 16) & 1;
+
+ pr->mgnt_vlan_m = r[14] & BIT(15);
+ if (pr->phase == PHASE_IACL)
+ pr->dmac_hit_sw_m = r[14] & BIT(14);
+ else
+ pr->content_too_deep_m = r[14] & BIT(14);
+ pr->not_first_frag_m = r[14] & BIT(13);
+ pr->frame_type_l4_m = (r[14] >> 10) & 7;
+ pr->frame_type_m = (r[14] >> 8) & 3;
+ pr->otag_fmt_m = r[14] & BIT(7);
+ pr->itag_fmt_m = r[14] & BIT(6);
+ pr->otag_exist_m = r[14] & BIT(5);
+ pr->itag_exist_m = r[14] & BIT (4);
+ pr->frame_type_l2_m = (r[14] >> 2) & 3;
+ pr->igr_normal_port_m = r[14] & BIT(1);
+ pr->tid_m = r[14] & 1;
+
+ pr->valid = r[15] & BIT(31);
+ pr->cond_not = r[15] & BIT(30);
+ pr->cond_and1 = r[15] & BIT(29);
+ pr->cond_and2 = r[15] & BIT(28);
+}
+
+static void rtl931x_write_pie_fixed_fields(u32 r[], struct pie_rule *pr)
+{
+ r[7] |= pr->mgnt_vlan ? BIT(31) : 0;
+ if (pr->phase == PHASE_IACL)
+ r[7] |= pr->dmac_hit_sw ? BIT(30) : 0;
+ else
+ r[7] |= pr->content_too_deep ? BIT(30) : 0;
+ r[7] |= pr->not_first_frag ? BIT(29) : 0;
+ r[7] |= ((u32) (pr->frame_type_l4 & 0x7)) << 26;
+ r[7] |= ((u32) (pr->frame_type & 0x3)) << 24;
+ r[7] |= pr->otag_fmt ? BIT(23) : 0;
+ r[7] |= pr->itag_fmt ? BIT(22) : 0;
+ r[7] |= pr->otag_exist ? BIT(21) : 0;
+ r[7] |= pr->itag_exist ? BIT(20) : 0;
+ r[7] |= ((u32) (pr->frame_type_l2 & 0x3)) << 18;
+ r[7] |= pr->igr_normal_port ? BIT(17) : 0;
+ r[7] |= ((u32) (pr->tid & 0x1)) << 16;
+
+ r[14] |= pr->mgnt_vlan_m ? BIT(15) : 0;
+ if (pr->phase == PHASE_IACL)
+ r[14] |= pr->dmac_hit_sw_m ? BIT(14) : 0;
+ else
+ r[14] |= pr->content_too_deep_m ? BIT(14) : 0;
+ r[14] |= pr->not_first_frag_m ? BIT(13) : 0;
+ r[14] |= ((u32) (pr->frame_type_l4_m & 0x7)) << 10;
+ r[14] |= ((u32) (pr->frame_type_m & 0x3)) << 8;
+ r[14] |= pr->otag_fmt_m ? BIT(7) : 0;
+ r[14] |= pr->itag_fmt_m ? BIT(6) : 0;
+ r[14] |= pr->otag_exist_m ? BIT(5) : 0;
+ r[14] |= pr->itag_exist_m ? BIT(4) : 0;
+ r[14] |= ((u32) (pr->frame_type_l2_m & 0x3)) << 2;
+ r[14] |= pr->igr_normal_port_m ? BIT(1) : 0;
+ r[14] |= (u32) (pr->tid_m & 0x1);
+
+ r[15] |= pr->valid ? BIT(31) : 0;
+ r[15] |= pr->cond_not ? BIT(30) : 0;
+ r[15] |= pr->cond_and1 ? BIT(29) : 0;
+ r[15] |= pr->cond_and2 ? BIT(28) : 0;
+}
+
+static void rtl931x_write_pie_action(u32 r[], struct pie_rule *pr)
+{
+ // Either drop or forward
+ if (pr->drop) {
+ r[15] |= BIT(11) | BIT(12) | BIT(13); // Do Green, Yellow and Red drops
+ // Actually DROP, not PERMIT in Green / Yellow / Red
+ r[16] |= BIT(27) | BIT(28) | BIT(29);
+ } else {
+ r[15] |= pr->fwd_sel ? BIT(14) : 0;
+ r[16] |= pr->fwd_act << 24;
+ r[16] |= BIT(21); // We overwrite any drop
+ }
+ if (pr->phase == PHASE_VACL)
+ r[16] |= pr->fwd_sa_lrn ? BIT(22) : 0;
+ r[15] |= pr->bypass_sel ? BIT(10) : 0;
+ r[15] |= pr->nopri_sel ? BIT(21) : 0;
+ r[15] |= pr->tagst_sel ? BIT(20) : 0;
+ r[15] |= pr->ovid_sel ? BIT(18) : 0;
+ r[15] |= pr->ivid_sel ? BIT(16) : 0;
+ r[15] |= pr->meter_sel ? BIT(27) : 0;
+ r[15] |= pr->mir_sel ? BIT(15) : 0;
+ r[15] |= pr->log_sel ? BIT(26) : 0;
+
+ r[16] |= ((u32)(pr->fwd_data & 0xfff)) << 9;
+// r[15] |= pr->log_octets ? BIT(31) : 0;
+ r[15] |= (u32)(pr->meter_data) >> 2;
+ r[16] |= (((u32)(pr->meter_data) >> 7) & 0x3) << 29;
+
+ r[16] |= ((u32)(pr->ivid_act & 0x3)) << 21;
+ r[15] |= ((u32)(pr->ivid_data & 0xfff)) << 9;
+ r[16] |= ((u32)(pr->ovid_act & 0x3)) << 30;
+ r[16] |= ((u32)(pr->ovid_data & 0xfff)) << 16;
+ r[16] |= ((u32)(pr->mir_data & 0x3)) << 6;
+ r[17] |= ((u32)(pr->tagst_data & 0xf)) << 28;
+ r[17] |= ((u32)(pr->nopri_data & 0x7)) << 25;
+ r[17] |= pr->bypass_ibc_sc ? BIT(16) : 0;
+}
+
+void rtl931x_pie_rule_dump_raw(u32 r[])
+{
+ pr_info("Raw IACL table entry:\n");
+ pr_info("r 0 - 7: %08x %08x %08x %08x %08x %08x %08x %08x\n",
+ r[0], r[1], r[2], r[3], r[4], r[5], r[6], r[7]);
+ pr_info("r 8 - 15: %08x %08x %08x %08x %08x %08x %08x %08x\n",
+ r[8], r[9], r[10], r[11], r[12], r[13], r[14], r[15]);
+ pr_info("r 16 - 18: %08x %08x %08x\n", r[16], r[17], r[18]);
+ pr_info("Match : %08x %08x %08x %08x %08x %08x\n", r[0], r[1], r[2], r[3], r[4], r[5]);
+ pr_info("Fixed : %06x\n", r[6] >> 8);
+ pr_info("Match M: %08x %08x %08x %08x %08x %08x\n",
+ (r[6] << 24) | (r[7] >> 8), (r[7] << 24) | (r[8] >> 8), (r[8] << 24) | (r[9] >> 8),
+ (r[9] << 24) | (r[10] >> 8), (r[10] << 24) | (r[11] >> 8),
+ (r[11] << 24) | (r[12] >> 8));
+ pr_info("R[13]: %08x\n", r[13]);
+ pr_info("Fixed M: %06x\n", ((r[12] << 16) | (r[13] >> 16)) & 0xffffff);
+ pr_info("Valid / not / and1 / and2 : %1x\n", (r[13] >> 12) & 0xf);
+ pr_info("r 13-16: %08x %08x %08x %08x\n", r[13], r[14], r[15], r[16]);
+}
+
+static int rtl931x_pie_rule_write(struct rtl838x_switch_priv *priv, int idx, struct pie_rule *pr)
+{
+ // Access IACL table (0) via register 1, the table size is 4096
+ struct table_reg *q = rtl_table_get(RTL9310_TBL_1, 0);
+ u32 r[22];
+ int i;
+ int block = idx / PIE_BLOCK_SIZE;
+ u32 t_select = sw_r32(RTL931X_PIE_BLK_TMPLTE_CTRL(block));
+
+ pr_info("%s: %d, t_select: %08x\n", __func__, idx, t_select);
+
+ for (i = 0; i < 22; i++)
+ r[i] = 0;
+
+ if (!pr->valid) {
+ rtl_table_write(q, idx);
+ rtl_table_release(q);
+ return 0;
+ }
+ rtl931x_write_pie_fixed_fields(r, pr);
+
+ pr_info("%s: template %d\n", __func__, (t_select >> (pr->tid * 4)) & 0xf);
+ rtl931x_write_pie_templated(r, pr, fixed_templates[(t_select >> (pr->tid * 4)) & 0xf]);
+
+ rtl931x_write_pie_action(r, pr);
+
+ rtl931x_pie_rule_dump_raw(r);
+
+ for (i = 0; i < 22; i++)
+ sw_w32(r[i], rtl_table_data(q, i));
+
+ rtl_table_write(q, idx);
+ rtl_table_release(q);
+
+ return 0;
+}
+
+static bool rtl931x_pie_templ_has(int t, enum template_field_id field_type)
+{
+ int i;
+ enum template_field_id ft;
+
+ for (i = 0; i < N_FIXED_FIELDS_RTL931X; i++) {
+ ft = fixed_templates[t][i];
+ if (field_type == ft)
+ return true;
+ }
+
+ return false;
+}
+
+/*
+ * Verify that the rule pr is compatible with a given template t in block block
+ * Note that this function is SoC specific since the values of e.g. TEMPLATE_FIELD_SIP0
+ * depend on the SoC
+ */
+static int rtl931x_pie_verify_template(struct rtl838x_switch_priv *priv,
+ struct pie_rule *pr, int t, int block)
+{
+ int i;
+
+ if (!pr->is_ipv6 && pr->sip_m && !rtl931x_pie_templ_has(t, TEMPLATE_FIELD_SIP0))
+ return -1;
+
+ if (!pr->is_ipv6 && pr->dip_m && !rtl931x_pie_templ_has(t, TEMPLATE_FIELD_DIP0))
+ return -1;
+
+ if (pr->is_ipv6) {
+ if ((pr->sip6_m.s6_addr32[0] || pr->sip6_m.s6_addr32[1]
+ || pr->sip6_m.s6_addr32[2] || pr->sip6_m.s6_addr32[3])
+ && !rtl931x_pie_templ_has(t, TEMPLATE_FIELD_SIP2))
+ return -1;
+ if ((pr->dip6_m.s6_addr32[0] || pr->dip6_m.s6_addr32[1]
+ || pr->dip6_m.s6_addr32[2] || pr->dip6_m.s6_addr32[3])
+ && !rtl931x_pie_templ_has(t, TEMPLATE_FIELD_DIP2))
+ return -1;
+ }
+
+ if (ether_addr_to_u64(pr->smac) && !rtl931x_pie_templ_has(t, TEMPLATE_FIELD_SMAC0))
+ return -1;
+
+ if (ether_addr_to_u64(pr->dmac) && !rtl931x_pie_templ_has(t, TEMPLATE_FIELD_DMAC0))
+ return -1;
+
+ // TODO: Check more
+
+ i = find_first_zero_bit(&priv->pie_use_bm[block * 4], PIE_BLOCK_SIZE);
+
+ if (i >= PIE_BLOCK_SIZE)
+ return -1;
+
+ return i + PIE_BLOCK_SIZE * block;
+}
+
+static int rtl931x_pie_rule_add(struct rtl838x_switch_priv *priv, struct pie_rule *pr)
+{
+ int idx, block, j, t;
+ int min_block = 0;
+ int max_block = priv->n_pie_blocks / 2;
+
+ if (pr->is_egress) {
+ min_block = max_block;
+ max_block = priv->n_pie_blocks;
+ }
+ pr_info("In %s\n", __func__);
+
+ mutex_lock(&priv->pie_mutex);
+
+ for (block = min_block; block < max_block; block++) {
+ for (j = 0; j < 2; j++) {
+ t = (sw_r32(RTL931X_PIE_BLK_TMPLTE_CTRL(block)) >> (j * 4)) & 0xf;
+ pr_info("Testing block %d, template %d, template id %d\n", block, j, t);
+ pr_info("%s: %08x\n",
+ __func__, sw_r32(RTL931X_PIE_BLK_TMPLTE_CTRL(block)));
+ idx = rtl931x_pie_verify_template(priv, pr, t, block);
+ if (idx >= 0)
+ break;
+ }
+ if (j < 2)
+ break;
+ }
+
+ if (block >= priv->n_pie_blocks) {
+ mutex_unlock(&priv->pie_mutex);
+ return -EOPNOTSUPP;
+ }
+
+ pr_info("Using block: %d, index %d, template-id %d\n", block, idx, j);
+ set_bit(idx, priv->pie_use_bm);
+
+ pr->valid = true;
+ pr->tid = j; // Mapped to template number
+ pr->tid_m = 0x1;
+ pr->id = idx;
+
+ rtl931x_pie_lookup_enable(priv, idx);
+ rtl931x_pie_rule_write(priv, idx, pr);
+
+ mutex_unlock(&priv->pie_mutex);
+ return 0;
+}
+
+/*
+ * Delete a range of Packet Inspection Engine rules
+ */
+static int rtl931x_pie_rule_del(struct rtl838x_switch_priv *priv, int index_from, int index_to)
+{
+ u32 v = (index_from << 1)| (index_to << 13 ) | BIT(0);
+
+ pr_info("%s: from %d to %d\n", __func__, index_from, index_to);
+ mutex_lock(&priv->reg_mutex);
+
+ // Write from-to and execute bit into control register
+ sw_w32(v, RTL931X_PIE_CLR_CTRL);
+
+ // Wait until command has completed
+ do {
+ } while (sw_r32(RTL931X_PIE_CLR_CTRL) & BIT(0));
+
+ mutex_unlock(&priv->reg_mutex);
+ return 0;
+}
+
+static void rtl931x_pie_rule_rm(struct rtl838x_switch_priv *priv, struct pie_rule *pr)
+{
+ int idx = pr->id;
+
+ rtl931x_pie_rule_del(priv, idx, idx);
+ clear_bit(idx, priv->pie_use_bm);
+}
+
+static void rtl931x_pie_init(struct rtl838x_switch_priv *priv)
+{
+ int i;
+ u32 template_selectors;
+
+ mutex_init(&priv->pie_mutex);
+
+ pr_info("%s\n", __func__);
+ // Enable ACL lookup on all ports, including CPU_PORT
+ for (i = 0; i <= priv->cpu_port; i++)
+ sw_w32(1, RTL931X_ACL_PORT_LOOKUP_CTRL(i));
+
+ // Include IPG in metering
+ sw_w32_mask(0, 1, RTL931X_METER_GLB_CTRL);
+
+ // Delete all present rules, block size is 128 on all SoC families
+ rtl931x_pie_rule_del(priv, 0, priv->n_pie_blocks * 128 - 1);
+
+ // Assign first half blocks 0-7 to VACL phase, second half to IACL
+ // 3 bits are used for each block, values for PIE blocks are
+ // 6: Disabled, 0: VACL, 1: IACL, 2: EACL
+ // And for OpenFlow Flow blocks: 3: Ingress Flow table 0,
+ // 4: Ingress Flow Table 3, 5: Egress flow table 0
+ for (i = 0; i < priv->n_pie_blocks; i++) {
+ int pos = (i % 10) * 3;
+ u32 r = RTL931X_PIE_BLK_PHASE_CTRL + 4 * (i / 10);
+
+ if (i < priv->n_pie_blocks / 2)
+ sw_w32_mask(0x7 << pos, 0, r);
+ else
+ sw_w32_mask(0x7 << pos, 1 << pos, r);
+ }
+
+ // Enable predefined templates 0, 1 for first quarter of all blocks
+ template_selectors = 0 | (1 << 4);
+ for (i = 0; i < priv->n_pie_blocks / 4; i++)
+ sw_w32(template_selectors, RTL931X_PIE_BLK_TMPLTE_CTRL(i));
+
+ // Enable predefined templates 2, 3 for second quarter of all blocks
+ template_selectors = 2 | (3 << 4);
+ for (i = priv->n_pie_blocks / 4; i < priv->n_pie_blocks / 2; i++)
+ sw_w32(template_selectors, RTL931X_PIE_BLK_TMPLTE_CTRL(i));
+
+ // Enable predefined templates 0, 1 for third quater of all blocks
+ template_selectors = 0 | (1 << 4);
+ for (i = priv->n_pie_blocks / 2; i < priv->n_pie_blocks * 3 / 4; i++)
+ sw_w32(template_selectors, RTL931X_PIE_BLK_TMPLTE_CTRL(i));
+
+ // Enable predefined templates 2, 3 for fourth quater of all blocks
+ template_selectors = 2 | (3 << 4);
+ for (i = priv->n_pie_blocks * 3 / 4; i < priv->n_pie_blocks; i++)
+ sw_w32(template_selectors, RTL931X_PIE_BLK_TMPLTE_CTRL(i));
+
+}
+
+int rtl931x_l3_setup(struct rtl838x_switch_priv *priv)
+{
+ return 0;
+}
+
+void rtl931x_vlan_port_keep_tag_set(int port, bool keep_outer, bool keep_inner)
+{
+ sw_w32(FIELD_PREP(RTL931X_VLAN_PORT_TAG_EGR_OTAG_STS_MASK,
+ keep_outer ? RTL931X_VLAN_PORT_TAG_STS_TAGGED : RTL931X_VLAN_PORT_TAG_STS_UNTAG) |
+ FIELD_PREP(RTL931X_VLAN_PORT_TAG_EGR_ITAG_STS_MASK,
+ keep_inner ? RTL931X_VLAN_PORT_TAG_STS_TAGGED : RTL931X_VLAN_PORT_TAG_STS_UNTAG),
+ RTL931X_VLAN_PORT_TAG_CTRL(port));
+}
+
+void rtl931x_vlan_port_pvidmode_set(int port, enum pbvlan_type type, enum pbvlan_mode mode)
+{
+ if (type == PBVLAN_TYPE_INNER)
+ sw_w32_mask(0x3 << 12, mode << 12, RTL931X_VLAN_PORT_IGR_CTRL + (port << 2));
+ else
+ sw_w32_mask(0x3 << 26, mode << 26, RTL931X_VLAN_PORT_IGR_CTRL + (port << 2));
+}
+
+void rtl931x_vlan_port_pvid_set(int port, enum pbvlan_type type, int pvid)
+{
+ if (type == PBVLAN_TYPE_INNER)
+ sw_w32_mask(0xfff, pvid, RTL931X_VLAN_PORT_IGR_CTRL + (port << 2));
+ else
+ sw_w32_mask(0xfff << 14, pvid << 14, RTL931X_VLAN_PORT_IGR_CTRL + (port << 2));
+}
+
+static void rtl931x_set_igr_filter(int port, enum igr_filter state)
+{
+ sw_w32_mask(0x3 << ((port & 0xf)<<1), state << ((port & 0xf)<<1),
+ RTL931X_VLAN_PORT_IGR_FLTR + (((port >> 4) << 2)));
+}
+
+static void rtl931x_set_egr_filter(int port, enum egr_filter state)
+{
+ sw_w32_mask(0x1 << (port % 0x20), state << (port % 0x20),
+ RTL931X_VLAN_PORT_EGR_FLTR + (((port >> 5) << 2)));
+}
+
+void rtl931x_set_distribution_algorithm(int group, int algoidx, u32 algomsk)
+{
+ u32 l3shift = 0;
+ u32 newmask = 0;
+
+ /* TODO: for now we set algoidx to 0 */
+ algoidx=0;
+
+ if (algomsk & TRUNK_DISTRIBUTION_ALGO_SIP_BIT) {
+ l3shift = 4;
+ newmask |= TRUNK_DISTRIBUTION_ALGO_L3_SIP_BIT;
+ }
+ if (algomsk & TRUNK_DISTRIBUTION_ALGO_DIP_BIT) {
+ l3shift = 4;
+ newmask |= TRUNK_DISTRIBUTION_ALGO_L3_DIP_BIT;
+ }
+ if (algomsk & TRUNK_DISTRIBUTION_ALGO_SRC_L4PORT_BIT) {
+ l3shift = 4;
+ newmask |= TRUNK_DISTRIBUTION_ALGO_L3_SRC_L4PORT_BIT;
+ }
+ if (algomsk & TRUNK_DISTRIBUTION_ALGO_SRC_L4PORT_BIT) {
+ l3shift = 4;
+ newmask |= TRUNK_DISTRIBUTION_ALGO_L3_SRC_L4PORT_BIT;
+ }
+
+ if (l3shift == 4) {
+ if (algomsk & TRUNK_DISTRIBUTION_ALGO_SMAC_BIT)
+ newmask |= TRUNK_DISTRIBUTION_ALGO_L3_SMAC_BIT;
+ if (algomsk & TRUNK_DISTRIBUTION_ALGO_DMAC_BIT)
+ newmask |= TRUNK_DISTRIBUTION_ALGO_L3_DMAC_BIT;
+ } else {
+ if (algomsk & TRUNK_DISTRIBUTION_ALGO_SMAC_BIT)
+ newmask |= TRUNK_DISTRIBUTION_ALGO_L2_SMAC_BIT;
+ if (algomsk & TRUNK_DISTRIBUTION_ALGO_DMAC_BIT)
+ newmask |= TRUNK_DISTRIBUTION_ALGO_L2_DMAC_BIT;
+ }
+
+ sw_w32(newmask << l3shift, RTL931X_TRK_HASH_CTRL + (algoidx << 2));
+}
+
+static void rtl931x_led_init(struct rtl838x_switch_priv *priv)
+{
+ int i, pos;
+ u32 v, set;
+ u64 pm_copper = 0, pm_fiber = 0;
+ u32 setlen;
+ const __be32 *led_set;
+ char set_name[9];
+ struct device_node *node;
+
+ pr_info("%s called\n", __func__);
+ node = of_find_compatible_node(NULL, NULL, "realtek,rtl9300-leds");
+ if (!node) {
+ pr_info("%s No compatible LED node found\n", __func__);
+ return;
+ }
+
+ for (i= 0; i < priv->cpu_port; i++) {
+ pos = (i << 1) % 32;
+ sw_w32_mask(0x3 << pos, 0, RTL931X_LED_PORT_FIB_SET_SEL_CTRL(i));
+ sw_w32_mask(0x3 << pos, 0, RTL931X_LED_PORT_COPR_SET_SEL_CTRL(i));
+
+ if (!priv->ports[i].phy)
+ continue;
+
+ v = 0x1; // Found on the EdgeCore, but we do not have any HW description
+ sw_w32_mask(0x3 << pos, v << pos, RTL931X_LED_PORT_NUM_CTRL(i));
+
+ if (priv->ports[i].phy_is_integrated)
+ pm_fiber |= BIT_ULL(i);
+ else
+ pm_copper |= BIT_ULL(i);
+
+ set = priv->ports[i].led_set;
+ sw_w32_mask(0, set << pos, RTL931X_LED_PORT_COPR_SET_SEL_CTRL(i));
+ sw_w32_mask(0, set << pos, RTL931X_LED_PORT_FIB_SET_SEL_CTRL(i));
+ }
+
+ for (i = 0; i < 4; i++) {
+ sprintf(set_name, "led_set%d", i);
+ pr_info(">%s<\n", set_name);
+ led_set = of_get_property(node, set_name, &setlen);
+ if (!led_set || setlen != 16)
+ break;
+ v = be32_to_cpup(led_set) << 16 | be32_to_cpup(led_set + 1);
+ sw_w32(v, RTL931X_LED_SET0_0_CTRL - 4 - i * 8);
+ v = be32_to_cpup(led_set + 2) << 16 | be32_to_cpup(led_set + 3);
+ sw_w32(v, RTL931X_LED_SET0_0_CTRL - i * 8);
+ }
+
+ // Set LED mode to serial (0x1)
+ sw_w32_mask(0x3, 0x1, RTL931X_LED_GLB_CTRL);
+
+ rtl839x_set_port_reg_le(pm_copper, RTL931X_LED_PORT_COPR_MASK_CTRL);
+ rtl839x_set_port_reg_le(pm_fiber, RTL931X_LED_PORT_FIB_MASK_CTRL);
+ rtl839x_set_port_reg_le(pm_copper | pm_fiber, RTL931X_LED_PORT_COMBO_MASK_CTRL);
+
+ for (i = 0; i < 32; i++)
+ pr_info("%s %08x: %08x\n",__func__, 0xbb000600 + i * 4, sw_r32(0x0600 + i * 4));
+
+}
+
+const struct rtl838x_reg rtl931x_reg = {
+ .mask_port_reg_be = rtl839x_mask_port_reg_be,
+ .set_port_reg_be = rtl839x_set_port_reg_be,
+ .get_port_reg_be = rtl839x_get_port_reg_be,
+ .mask_port_reg_le = rtl839x_mask_port_reg_le,
+ .set_port_reg_le = rtl839x_set_port_reg_le,
+ .get_port_reg_le = rtl839x_get_port_reg_le,
+ .stat_port_rst = RTL931X_STAT_PORT_RST,
+ .stat_rst = RTL931X_STAT_RST,
+ .stat_port_std_mib = 0, // Not defined
+ .traffic_enable = rtl931x_traffic_enable,
+ .traffic_disable = rtl931x_traffic_disable,
+ .traffic_get = rtl931x_traffic_get,
+ .traffic_set = rtl931x_traffic_set,
+ .l2_ctrl_0 = RTL931X_L2_CTRL,
+ .l2_ctrl_1 = RTL931X_L2_AGE_CTRL,
+ .l2_port_aging_out = RTL931X_L2_PORT_AGE_CTRL,
+ .set_ageing_time = rtl931x_set_ageing_time,
+ // .smi_poll_ctrl does not exist
+ .l2_tbl_flush_ctrl = RTL931X_L2_TBL_FLUSH_CTRL,
+ .exec_tbl0_cmd = rtl931x_exec_tbl0_cmd,
+ .exec_tbl1_cmd = rtl931x_exec_tbl1_cmd,
+ .tbl_access_data_0 = rtl931x_tbl_access_data_0,
+ .isr_glb_src = RTL931X_ISR_GLB_SRC,
+ .isr_port_link_sts_chg = RTL931X_ISR_PORT_LINK_STS_CHG,
+ .imr_port_link_sts_chg = RTL931X_IMR_PORT_LINK_STS_CHG,
+ // imr_glb does not exist on RTL931X
+ .vlan_tables_read = rtl931x_vlan_tables_read,
+ .vlan_set_tagged = rtl931x_vlan_set_tagged,
+ .vlan_set_untagged = rtl931x_vlan_set_untagged,
+ .vlan_profile_dump = rtl931x_vlan_profile_dump,
+ .vlan_profile_setup = rtl931x_vlan_profile_setup,
+ .vlan_fwd_on_inner = rtl931x_vlan_fwd_on_inner,
+ .stp_get = rtl931x_stp_get,
+ .stp_set = rtl931x_stp_set,
+ .mac_force_mode_ctrl = rtl931x_mac_force_mode_ctrl,
+ .mac_port_ctrl = rtl931x_mac_port_ctrl,
+ .l2_port_new_salrn = rtl931x_l2_port_new_salrn,
+ .l2_port_new_sa_fwd = rtl931x_l2_port_new_sa_fwd,
+ .mir_ctrl = RTL931X_MIR_CTRL,
+ .mir_dpm = RTL931X_MIR_DPM_CTRL,
+ .mir_spm = RTL931X_MIR_SPM_CTRL,
+ .mac_link_sts = RTL931X_MAC_LINK_STS,
+ .mac_link_dup_sts = RTL931X_MAC_LINK_DUP_STS,
+ .mac_link_spd_sts = rtl931x_mac_link_spd_sts,
+ .mac_rx_pause_sts = RTL931X_MAC_RX_PAUSE_STS,
+ .mac_tx_pause_sts = RTL931X_MAC_TX_PAUSE_STS,
+ .read_l2_entry_using_hash = rtl931x_read_l2_entry_using_hash,
+ .write_l2_entry_using_hash = rtl931x_write_l2_entry_using_hash,
+ .read_cam = rtl931x_read_cam,
+ .write_cam = rtl931x_write_cam,
+ .vlan_port_keep_tag_set = rtl931x_vlan_port_keep_tag_set,
+ .vlan_port_pvidmode_set = rtl931x_vlan_port_pvidmode_set,
+ .vlan_port_pvid_set = rtl931x_vlan_port_pvid_set,
+ .trk_mbr_ctr = rtl931x_trk_mbr_ctr,
+ .set_vlan_igr_filter = rtl931x_set_igr_filter,
+ .set_vlan_egr_filter = rtl931x_set_egr_filter,
+ .set_distribution_algorithm = rtl931x_set_distribution_algorithm,
+ .l2_hash_key = rtl931x_l2_hash_key,
+ .read_mcast_pmask = rtl931x_read_mcast_pmask,
+ .write_mcast_pmask = rtl931x_write_mcast_pmask,
+ .pie_init = rtl931x_pie_init,
+ .pie_rule_write = rtl931x_pie_rule_write,
+ .pie_rule_add = rtl931x_pie_rule_add,
+ .pie_rule_rm = rtl931x_pie_rule_rm,
+ .l2_learning_setup = rtl931x_l2_learning_setup,
+ .l3_setup = rtl931x_l3_setup,
+ .led_init = rtl931x_led_init,
+};
+
diff --git a/target/linux/realtek/files-5.15/drivers/net/dsa/rtl83xx/tc.c b/target/linux/realtek/files-5.15/drivers/net/dsa/rtl83xx/tc.c
new file mode 100644
index 0000000000..d0a8ee8cfe
--- /dev/null
+++ b/target/linux/realtek/files-5.15/drivers/net/dsa/rtl83xx/tc.c
@@ -0,0 +1,409 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <net/dsa.h>
+#include <linux/delay.h>
+#include <linux/netdevice.h>
+#include <net/flow_offload.h>
+#include <linux/rhashtable.h>
+
+#include <asm/mach-rtl838x/mach-rtl83xx.h>
+#include "rtl83xx.h"
+#include "rtl838x.h"
+
+/*
+ * Parse the flow rule for the matching conditions
+ */
+static int rtl83xx_parse_flow_rule(struct rtl838x_switch_priv *priv,
+ struct flow_rule *rule, struct rtl83xx_flow *flow)
+{
+ struct flow_dissector *dissector = rule->match.dissector;
+
+ pr_debug("In %s\n", __func__);
+ /* KEY_CONTROL and KEY_BASIC are needed for forming a meaningful key */
+ if ((dissector->used_keys & BIT(FLOW_DISSECTOR_KEY_CONTROL)) == 0 ||
+ (dissector->used_keys & BIT(FLOW_DISSECTOR_KEY_BASIC)) == 0) {
+ pr_err("Cannot form TC key: used_keys = 0x%x\n", dissector->used_keys);
+ return -EOPNOTSUPP;
+ }
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
+ struct flow_match_basic match;
+
+ pr_debug("%s: BASIC\n", __func__);
+ flow_rule_match_basic(rule, &match);
+ if (match.key->n_proto == htons(ETH_P_ARP))
+ flow->rule.frame_type = 0;
+ if (match.key->n_proto == htons(ETH_P_IP))
+ flow->rule.frame_type = 2;
+ if (match.key->n_proto == htons(ETH_P_IPV6))
+ flow->rule.frame_type = 3;
+ if ((match.key->n_proto == htons(ETH_P_ARP)) || flow->rule.frame_type)
+ flow->rule.frame_type_m = 3;
+ if (flow->rule.frame_type >= 2) {
+ if (match.key->ip_proto == IPPROTO_UDP)
+ flow->rule.frame_type_l4 = 0;
+ if (match.key->ip_proto == IPPROTO_TCP)
+ flow->rule.frame_type_l4 = 1;
+ if (match.key->ip_proto == IPPROTO_ICMP
+ || match.key->ip_proto ==IPPROTO_ICMPV6)
+ flow->rule.frame_type_l4 = 2;
+ if (match.key->ip_proto == IPPROTO_TCP)
+ flow->rule.frame_type_l4 = 3;
+ if ((match.key->ip_proto == IPPROTO_UDP) || flow->rule.frame_type_l4)
+ flow->rule.frame_type_l4_m = 7;
+ }
+ }
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
+ struct flow_match_eth_addrs match;
+
+ pr_debug("%s: ETH_ADDR\n", __func__);
+ flow_rule_match_eth_addrs(rule, &match);
+ ether_addr_copy(flow->rule.dmac, match.key->dst);
+ ether_addr_copy(flow->rule.dmac_m, match.mask->dst);
+ ether_addr_copy(flow->rule.smac, match.key->src);
+ ether_addr_copy(flow->rule.smac_m, match.mask->src);
+ }
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
+ struct flow_match_vlan match;
+
+ pr_debug("%s: VLAN\n", __func__);
+ flow_rule_match_vlan(rule, &match);
+ flow->rule.itag = match.key->vlan_id;
+ flow->rule.itag_m = match.mask->vlan_id;
+ // TODO: What about match.key->vlan_priority ?
+ }
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS)) {
+ struct flow_match_ipv4_addrs match;
+
+ pr_debug("%s: IPV4\n", __func__);
+ flow_rule_match_ipv4_addrs(rule, &match);
+ flow->rule.is_ipv6 = false;
+ flow->rule.dip = match.key->dst;
+ flow->rule.dip_m = match.mask->dst;
+ flow->rule.sip = match.key->src;
+ flow->rule.sip_m = match.mask->src;
+ } else if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV6_ADDRS)) {
+ struct flow_match_ipv6_addrs match;
+
+ pr_debug("%s: IPV6\n", __func__);
+ flow->rule.is_ipv6 = true;
+ flow_rule_match_ipv6_addrs(rule, &match);
+ flow->rule.dip6 = match.key->dst;
+ flow->rule.dip6_m = match.mask->dst;
+ flow->rule.sip6 = match.key->src;
+ flow->rule.sip6_m = match.mask->src;
+ }
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
+ struct flow_match_ports match;
+
+ pr_debug("%s: PORTS\n", __func__);
+ flow_rule_match_ports(rule, &match);
+ flow->rule.dport = match.key->dst;
+ flow->rule.dport_m = match.mask->dst;
+ flow->rule.sport = match.key->src;
+ flow->rule.sport_m = match.mask->src;
+ }
+
+ // TODO: ICMP
+ return 0;
+}
+
+static void rtl83xx_flow_bypass_all(struct rtl83xx_flow *flow)
+{
+ flow->rule.bypass_sel = true;
+ flow->rule.bypass_all = true;
+ flow->rule.bypass_igr_stp = true;
+ flow->rule.bypass_ibc_sc = true;
+}
+
+static int rtl83xx_parse_fwd(struct rtl838x_switch_priv *priv,
+ const struct flow_action_entry *act, struct rtl83xx_flow *flow)
+{
+ struct net_device *dev = act->dev;
+ int port;
+
+ port = rtl83xx_port_is_under(dev, priv);
+ if (port < 0) {
+ netdev_info(dev, "%s: not a DSA device.\n", __func__);
+ return -EINVAL;
+ }
+
+ flow->rule.fwd_sel = true;
+ flow->rule.fwd_data = port;
+ pr_debug("Using port index: %d\n", port);
+ rtl83xx_flow_bypass_all(flow);
+
+ return 0;
+}
+
+static int rtl83xx_add_flow(struct rtl838x_switch_priv *priv, struct flow_cls_offload *f,
+ struct rtl83xx_flow *flow)
+{
+ struct flow_rule *rule = flow_cls_offload_flow_rule(f);
+ const struct flow_action_entry *act;
+ int i, err;
+
+ pr_debug("%s\n", __func__);
+
+ rtl83xx_parse_flow_rule(priv, rule, flow);
+
+ flow_action_for_each(i, act, &rule->action) {
+ switch (act->id) {
+ case FLOW_ACTION_DROP:
+ pr_debug("%s: DROP\n", __func__);
+ flow->rule.drop = true;
+ rtl83xx_flow_bypass_all(flow);
+ return 0;
+
+ case FLOW_ACTION_TRAP:
+ pr_debug("%s: TRAP\n", __func__);
+ flow->rule.fwd_data = priv->cpu_port;
+ flow->rule.fwd_act = PIE_ACT_REDIRECT_TO_PORT;
+ rtl83xx_flow_bypass_all(flow);
+ break;
+
+ case FLOW_ACTION_MANGLE:
+ pr_err("%s: FLOW_ACTION_MANGLE not supported\n", __func__);
+ return -EOPNOTSUPP;
+
+ case FLOW_ACTION_ADD:
+ pr_err("%s: FLOW_ACTION_ADD not supported\n", __func__);
+ return -EOPNOTSUPP;
+
+ case FLOW_ACTION_VLAN_PUSH:
+ pr_debug("%s: VLAN_PUSH\n", __func__);
+// TODO: act->vlan.proto
+ flow->rule.ivid_act = PIE_ACT_VID_ASSIGN;
+ flow->rule.ivid_sel = true;
+ flow->rule.ivid_data = htons(act->vlan.vid);
+ flow->rule.ovid_act = PIE_ACT_VID_ASSIGN;
+ flow->rule.ovid_sel = true;
+ flow->rule.ovid_data = htons(act->vlan.vid);
+ flow->rule.fwd_mod_to_cpu = true;
+ break;
+
+ case FLOW_ACTION_VLAN_POP:
+ pr_debug("%s: VLAN_POP\n", __func__);
+ flow->rule.ivid_act = PIE_ACT_VID_ASSIGN;
+ flow->rule.ivid_data = 0;
+ flow->rule.ivid_sel = true;
+ flow->rule.ovid_act = PIE_ACT_VID_ASSIGN;
+ flow->rule.ovid_data = 0;
+ flow->rule.ovid_sel = true;
+ flow->rule.fwd_mod_to_cpu = true;
+ break;
+
+ case FLOW_ACTION_CSUM:
+ pr_err("%s: FLOW_ACTION_CSUM not supported\n", __func__);
+ return -EOPNOTSUPP;
+
+ case FLOW_ACTION_REDIRECT:
+ pr_debug("%s: REDIRECT\n", __func__);
+ err = rtl83xx_parse_fwd(priv, act, flow);
+ if (err)
+ return err;
+ flow->rule.fwd_act = PIE_ACT_REDIRECT_TO_PORT;
+ break;
+
+ case FLOW_ACTION_MIRRED:
+ pr_debug("%s: MIRRED\n", __func__);
+ err = rtl83xx_parse_fwd(priv, act, flow);
+ if (err)
+ return err;
+ flow->rule.fwd_act = PIE_ACT_COPY_TO_PORT;
+ break;
+
+ default:
+ pr_err("%s: Flow action not supported: %d\n", __func__, act->id);
+ return -EOPNOTSUPP;
+ }
+ }
+
+ return 0;
+}
+
+static const struct rhashtable_params tc_ht_params = {
+ .head_offset = offsetof(struct rtl83xx_flow, node),
+ .key_offset = offsetof(struct rtl83xx_flow, cookie),
+ .key_len = sizeof(((struct rtl83xx_flow *)0)->cookie),
+ .automatic_shrinking = true,
+};
+
+static int rtl83xx_configure_flower(struct rtl838x_switch_priv *priv,
+ struct flow_cls_offload *f)
+{
+ struct rtl83xx_flow *flow;
+ int err = 0;
+
+ pr_debug("In %s\n", __func__);
+
+ rcu_read_lock();
+ pr_debug("Cookie %08lx\n", f->cookie);
+ flow = rhashtable_lookup(&priv->tc_ht, &f->cookie, tc_ht_params);
+ if (flow) {
+ pr_info("%s: Got flow\n", __func__);
+ err = -EEXIST;
+ goto rcu_unlock;
+ }
+
+rcu_unlock:
+ rcu_read_unlock();
+ if (flow)
+ goto out;
+ pr_debug("%s: New flow\n", __func__);
+
+ flow = kzalloc(sizeof(*flow), GFP_KERNEL);
+ if (!flow) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ flow->cookie = f->cookie;
+ flow->priv = priv;
+
+ err = rhashtable_insert_fast(&priv->tc_ht, &flow->node, tc_ht_params);
+ if (err) {
+ pr_err("Could not insert add new rule\n");
+ goto out_free;
+ }
+
+ rtl83xx_add_flow(priv, f, flow); // TODO: check error
+
+ // Add log action to flow
+ flow->rule.packet_cntr = rtl83xx_packet_cntr_alloc(priv);
+ if (flow->rule.packet_cntr >= 0) {
+ pr_debug("Using packet counter %d\n", flow->rule.packet_cntr);
+ flow->rule.log_sel = true;
+ flow->rule.log_data = flow->rule.packet_cntr;
+ }
+
+ err = priv->r->pie_rule_add(priv, &flow->rule);
+ return err;
+
+out_free:
+ kfree(flow);
+out:
+ pr_err("%s: error %d\n", __func__, err);
+ return err;
+}
+
+static int rtl83xx_delete_flower(struct rtl838x_switch_priv *priv,
+ struct flow_cls_offload * cls_flower)
+{
+ struct rtl83xx_flow *flow;
+
+ pr_debug("In %s\n", __func__);
+ rcu_read_lock();
+ flow = rhashtable_lookup_fast(&priv->tc_ht, &cls_flower->cookie, tc_ht_params);
+ if (!flow) {
+ rcu_read_unlock();
+ return -EINVAL;
+ }
+
+ priv->r->pie_rule_rm(priv, &flow->rule);
+
+ rhashtable_remove_fast(&priv->tc_ht, &flow->node, tc_ht_params);
+
+ kfree_rcu(flow, rcu_head);
+
+ rcu_read_unlock();
+ return 0;
+}
+
+static int rtl83xx_stats_flower(struct rtl838x_switch_priv *priv,
+ struct flow_cls_offload * cls_flower)
+{
+ struct rtl83xx_flow *flow;
+ unsigned long lastused = 0;
+ int total_packets, new_packets;
+
+ pr_debug("%s: \n", __func__);
+ flow = rhashtable_lookup_fast(&priv->tc_ht, &cls_flower->cookie, tc_ht_params);
+ if (!flow)
+ return -1;
+
+ if (flow->rule.packet_cntr >= 0) {
+ total_packets = priv->r->packet_cntr_read(flow->rule.packet_cntr);
+ pr_debug("Total packets: %d\n", total_packets);
+ new_packets = total_packets - flow->rule.last_packet_cnt;
+ flow->rule.last_packet_cnt = total_packets;
+ }
+
+ // TODO: We need a second PIE rule to count the bytes
+ flow_stats_update(&cls_flower->stats, 100 * new_packets, new_packets, 0, lastused,
+ FLOW_ACTION_HW_STATS_IMMEDIATE);
+ return 0;
+}
+
+static int rtl83xx_setup_tc_cls_flower(struct rtl838x_switch_priv *priv,
+ struct flow_cls_offload *cls_flower)
+{
+ pr_debug("%s: %d\n", __func__, cls_flower->command);
+ switch (cls_flower->command) {
+ case FLOW_CLS_REPLACE:
+ return rtl83xx_configure_flower(priv, cls_flower);
+ case FLOW_CLS_DESTROY:
+ return rtl83xx_delete_flower(priv, cls_flower);
+ case FLOW_CLS_STATS:
+ return rtl83xx_stats_flower(priv, cls_flower);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+
+static int rtl83xx_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
+ void *cb_priv)
+{
+ struct rtl838x_switch_priv *priv = cb_priv;
+
+ switch (type) {
+ case TC_SETUP_CLSFLOWER:
+ pr_debug("%s: TC_SETUP_CLSFLOWER\n", __func__);
+ return rtl83xx_setup_tc_cls_flower(priv, type_data);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static LIST_HEAD(rtl83xx_block_cb_list);
+
+int rtl83xx_setup_tc(struct net_device *dev, enum tc_setup_type type, void *type_data)
+{
+ struct rtl838x_switch_priv *priv;
+ struct flow_block_offload *f = type_data;
+ static bool first_time = true;
+ int err;
+
+ pr_debug("%s: %d\n", __func__, type);
+
+ if(!netdev_uses_dsa(dev)) {
+ pr_err("%s: no DSA\n", __func__);
+ return 0;
+ }
+ priv = dev->dsa_ptr->ds->priv;
+
+ switch (type) {
+ case TC_SETUP_BLOCK:
+ if (first_time) {
+ first_time = false;
+ err = rhashtable_init(&priv->tc_ht, &tc_ht_params);
+ if (err)
+ pr_err("%s: Could not initialize hash table\n", __func__);
+ }
+
+ f->unlocked_driver_cb = true;
+ return flow_block_cb_setup_simple(type_data,
+ &rtl83xx_block_cb_list,
+ rtl83xx_setup_tc_block_cb,
+ priv, priv, true);
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}