From 8fb15ea52a02b0578c11897afd1a87f8502d8f52 Mon Sep 17 00:00:00 2001 From: INAGAKI Hiroshi Date: Fri, 9 Sep 2022 22:08:16 +0900 Subject: realtek: copy dts/files/patches/configs for 5.15 Copy dts/files/patches/configs from 5.10 to 5.15. Signed-off-by: INAGAKI Hiroshi [refresh with updated DGS-1210 dts files] Signed-off-by: Sander Vanheule --- .../files-5.15/drivers/net/dsa/rtl83xx/Kconfig | 8 + .../files-5.15/drivers/net/dsa/rtl83xx/Makefile | 3 + .../files-5.15/drivers/net/dsa/rtl83xx/common.c | 1696 +++++++++ .../files-5.15/drivers/net/dsa/rtl83xx/debugfs.c | 727 ++++ .../files-5.15/drivers/net/dsa/rtl83xx/dsa.c | 2259 +++++++++++ .../files-5.15/drivers/net/dsa/rtl83xx/qos.c | 576 +++ .../files-5.15/drivers/net/dsa/rtl83xx/rtl838x.c | 2054 ++++++++++ .../files-5.15/drivers/net/dsa/rtl83xx/rtl838x.h | 1093 ++++++ .../files-5.15/drivers/net/dsa/rtl83xx/rtl839x.c | 1937 ++++++++++ .../files-5.15/drivers/net/dsa/rtl83xx/rtl83xx.h | 137 + .../files-5.15/drivers/net/dsa/rtl83xx/rtl930x.c | 2560 +++++++++++++ .../files-5.15/drivers/net/dsa/rtl83xx/rtl931x.c | 1701 +++++++++ .../files-5.15/drivers/net/dsa/rtl83xx/tc.c | 409 ++ .../files-5.15/drivers/net/ethernet/rtl838x_eth.c | 2588 +++++++++++++ .../files-5.15/drivers/net/ethernet/rtl838x_eth.h | 457 +++ .../files-5.15/drivers/net/phy/rtl83xx-phy.c | 4018 ++++++++++++++++++++ .../files-5.15/drivers/net/phy/rtl83xx-phy.h | 68 + 17 files changed, 22291 insertions(+) create mode 100644 target/linux/realtek/files-5.15/drivers/net/dsa/rtl83xx/Kconfig create mode 100644 target/linux/realtek/files-5.15/drivers/net/dsa/rtl83xx/Makefile create mode 100644 target/linux/realtek/files-5.15/drivers/net/dsa/rtl83xx/common.c create mode 100644 target/linux/realtek/files-5.15/drivers/net/dsa/rtl83xx/debugfs.c create mode 100644 target/linux/realtek/files-5.15/drivers/net/dsa/rtl83xx/dsa.c create mode 100644 target/linux/realtek/files-5.15/drivers/net/dsa/rtl83xx/qos.c create mode 100644 target/linux/realtek/files-5.15/drivers/net/dsa/rtl83xx/rtl838x.c create mode 100644 target/linux/realtek/files-5.15/drivers/net/dsa/rtl83xx/rtl838x.h create mode 100644 target/linux/realtek/files-5.15/drivers/net/dsa/rtl83xx/rtl839x.c create mode 100644 target/linux/realtek/files-5.15/drivers/net/dsa/rtl83xx/rtl83xx.h create mode 100644 target/linux/realtek/files-5.15/drivers/net/dsa/rtl83xx/rtl930x.c create mode 100644 target/linux/realtek/files-5.15/drivers/net/dsa/rtl83xx/rtl931x.c create mode 100644 target/linux/realtek/files-5.15/drivers/net/dsa/rtl83xx/tc.c create mode 100644 target/linux/realtek/files-5.15/drivers/net/ethernet/rtl838x_eth.c create mode 100644 target/linux/realtek/files-5.15/drivers/net/ethernet/rtl838x_eth.h create mode 100644 target/linux/realtek/files-5.15/drivers/net/phy/rtl83xx-phy.c create mode 100644 target/linux/realtek/files-5.15/drivers/net/phy/rtl83xx-phy.h (limited to 'target/linux/realtek/files-5.15/drivers/net') diff --git a/target/linux/realtek/files-5.15/drivers/net/dsa/rtl83xx/Kconfig b/target/linux/realtek/files-5.15/drivers/net/dsa/rtl83xx/Kconfig new file mode 100644 index 0000000000..281f08054f --- /dev/null +++ b/target/linux/realtek/files-5.15/drivers/net/dsa/rtl83xx/Kconfig @@ -0,0 +1,8 @@ +# SPDX-License-Identifier: GPL-2.0-only +config NET_DSA_RTL83XX + tristate "Realtek RTL838x/RTL839x switch support" + depends on RTL83XX + select NET_DSA_TAG_TRAILER + help + This driver adds support for Realtek RTL83xx series switching. + diff --git a/target/linux/realtek/files-5.15/drivers/net/dsa/rtl83xx/Makefile b/target/linux/realtek/files-5.15/drivers/net/dsa/rtl83xx/Makefile new file mode 100644 index 0000000000..8752c79700 --- /dev/null +++ b/target/linux/realtek/files-5.15/drivers/net/dsa/rtl83xx/Makefile @@ -0,0 +1,3 @@ +# SPDX-License-Identifier: GPL-2.0 +obj-$(CONFIG_NET_DSA_RTL83XX) += common.o dsa.o \ + rtl838x.o rtl839x.o rtl930x.o rtl931x.o debugfs.o qos.o tc.o diff --git a/target/linux/realtek/files-5.15/drivers/net/dsa/rtl83xx/common.c b/target/linux/realtek/files-5.15/drivers/net/dsa/rtl83xx/common.c new file mode 100644 index 0000000000..e86ff9ccdf --- /dev/null +++ b/target/linux/realtek/files-5.15/drivers/net/dsa/rtl83xx/common.c @@ -0,0 +1,1696 @@ +// SPDX-License-Identifier: GPL-2.0-only + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include "rtl83xx.h" + +extern struct rtl83xx_soc_info soc_info; + +extern const struct rtl838x_reg rtl838x_reg; +extern const struct rtl838x_reg rtl839x_reg; +extern const struct rtl838x_reg rtl930x_reg; +extern const struct rtl838x_reg rtl931x_reg; + +extern const struct dsa_switch_ops rtl83xx_switch_ops; +extern const struct dsa_switch_ops rtl930x_switch_ops; + +DEFINE_MUTEX(smi_lock); + +int rtl83xx_port_get_stp_state(struct rtl838x_switch_priv *priv, int port) +{ + u32 msti = 0; + u32 port_state[4]; + int index, bit; + int pos = port; + int n = priv->port_width << 1; + + /* Ports above or equal CPU port can never be configured */ + if (port >= priv->cpu_port) + return -1; + + mutex_lock(&priv->reg_mutex); + + /* For the RTL839x and following, the bits are left-aligned in the 64/128 bit field */ + if (priv->family_id == RTL8390_FAMILY_ID) + pos += 12; + if (priv->family_id == RTL9300_FAMILY_ID) + pos += 3; + if (priv->family_id == RTL9310_FAMILY_ID) + pos += 8; + + index = n - (pos >> 4) - 1; + bit = (pos << 1) % 32; + + priv->r->stp_get(priv, msti, port_state); + + mutex_unlock(&priv->reg_mutex); + + return (port_state[index] >> bit) & 3; +} + +static struct table_reg rtl838x_tbl_regs[] = { + TBL_DESC(0x6900, 0x6908, 3, 15, 13, 1), // RTL8380_TBL_L2 + TBL_DESC(0x6914, 0x6918, 18, 14, 12, 1), // RTL8380_TBL_0 + TBL_DESC(0xA4C8, 0xA4CC, 6, 14, 12, 1), // RTL8380_TBL_1 + + TBL_DESC(0x1180, 0x1184, 3, 16, 14, 0), // RTL8390_TBL_L2 + TBL_DESC(0x1190, 0x1194, 17, 15, 12, 0), // RTL8390_TBL_0 + TBL_DESC(0x6B80, 0x6B84, 4, 14, 12, 0), // RTL8390_TBL_1 + TBL_DESC(0x611C, 0x6120, 9, 8, 6, 0), // RTL8390_TBL_2 + + TBL_DESC(0xB320, 0xB334, 3, 18, 16, 0), // RTL9300_TBL_L2 + TBL_DESC(0xB340, 0xB344, 19, 16, 12, 0), // RTL9300_TBL_0 + TBL_DESC(0xB3A0, 0xB3A4, 20, 16, 13, 0), // RTL9300_TBL_1 + TBL_DESC(0xCE04, 0xCE08, 6, 14, 12, 0), // RTL9300_TBL_2 + TBL_DESC(0xD600, 0xD604, 30, 7, 6, 0), // RTL9300_TBL_HSB + TBL_DESC(0x7880, 0x7884, 22, 9, 8, 0), // RTL9300_TBL_HSA + + TBL_DESC(0x8500, 0x8508, 8, 19, 15, 0), // RTL9310_TBL_0 + TBL_DESC(0x40C0, 0x40C4, 22, 16, 14, 0), // RTL9310_TBL_1 + TBL_DESC(0x8528, 0x852C, 6, 18, 14, 0), // RTL9310_TBL_2 + TBL_DESC(0x0200, 0x0204, 9, 15, 12, 0), // RTL9310_TBL_3 + TBL_DESC(0x20dc, 0x20e0, 29, 7, 6, 0), // RTL9310_TBL_4 + TBL_DESC(0x7e1c, 0x7e20, 53, 8, 6, 0), // RTL9310_TBL_5 +}; + +void rtl_table_init(void) +{ + int i; + + for (i = 0; i < RTL_TBL_END; i++) + mutex_init(&rtl838x_tbl_regs[i].lock); +} + +/* + * Request access to table t in table access register r + * Returns a handle to a lock for that table + */ +struct table_reg *rtl_table_get(rtl838x_tbl_reg_t r, int t) +{ + if (r >= RTL_TBL_END) + return NULL; + + if (t >= BIT(rtl838x_tbl_regs[r].c_bit-rtl838x_tbl_regs[r].t_bit)) + return NULL; + + mutex_lock(&rtl838x_tbl_regs[r].lock); + rtl838x_tbl_regs[r].tbl = t; + + return &rtl838x_tbl_regs[r]; +} + +/* + * Release a table r, unlock the corresponding lock + */ +void rtl_table_release(struct table_reg *r) +{ + if (!r) + return; + +// pr_info("Unlocking %08x\n", (u32)r); + mutex_unlock(&r->lock); +// pr_info("Unlock done\n"); +} + +/* + * Reads table index idx into the data registers of the table + */ +void rtl_table_read(struct table_reg *r, int idx) +{ + u32 cmd = r->rmode ? BIT(r->c_bit) : 0; + + cmd |= BIT(r->c_bit + 1) | (r->tbl << r->t_bit) | (idx & (BIT(r->t_bit) - 1)); + sw_w32(cmd, r->addr); + do { } while (sw_r32(r->addr) & BIT(r->c_bit + 1)); +} + +/* + * Writes the content of the table data registers into the table at index idx + */ +void rtl_table_write(struct table_reg *r, int idx) +{ + u32 cmd = r->rmode ? 0 : BIT(r->c_bit); + + cmd |= BIT(r->c_bit + 1) | (r->tbl << r->t_bit) | (idx & (BIT(r->t_bit) - 1)); + sw_w32(cmd, r->addr); + do { } while (sw_r32(r->addr) & BIT(r->c_bit + 1)); +} + +/* + * Returns the address of the ith data register of table register r + * the address is relative to the beginning of the Switch-IO block at 0xbb000000 + */ +inline u16 rtl_table_data(struct table_reg *r, int i) +{ + if (i >= r->max_data) + i = r->max_data - 1; + return r->data + i * 4; +} + +inline u32 rtl_table_data_r(struct table_reg *r, int i) +{ + return sw_r32(rtl_table_data(r, i)); +} + +inline void rtl_table_data_w(struct table_reg *r, u32 v, int i) +{ + sw_w32(v, rtl_table_data(r, i)); +} + +/* Port register accessor functions for the RTL838x and RTL930X SoCs */ +void rtl838x_mask_port_reg(u64 clear, u64 set, int reg) +{ + sw_w32_mask((u32)clear, (u32)set, reg); +} + +void rtl838x_set_port_reg(u64 set, int reg) +{ + sw_w32((u32)set, reg); +} + +u64 rtl838x_get_port_reg(int reg) +{ + return ((u64) sw_r32(reg)); +} + +/* Port register accessor functions for the RTL839x and RTL931X SoCs */ +void rtl839x_mask_port_reg_be(u64 clear, u64 set, int reg) +{ + sw_w32_mask((u32)(clear >> 32), (u32)(set >> 32), reg); + sw_w32_mask((u32)(clear & 0xffffffff), (u32)(set & 0xffffffff), reg + 4); +} + +u64 rtl839x_get_port_reg_be(int reg) +{ + u64 v = sw_r32(reg); + + v <<= 32; + v |= sw_r32(reg + 4); + return v; +} + +void rtl839x_set_port_reg_be(u64 set, int reg) +{ + sw_w32(set >> 32, reg); + sw_w32(set & 0xffffffff, reg + 4); +} + +void rtl839x_mask_port_reg_le(u64 clear, u64 set, int reg) +{ + sw_w32_mask((u32)clear, (u32)set, reg); + sw_w32_mask((u32)(clear >> 32), (u32)(set >> 32), reg + 4); +} + +void rtl839x_set_port_reg_le(u64 set, int reg) +{ + sw_w32(set, reg); + sw_w32(set >> 32, reg + 4); +} + +u64 rtl839x_get_port_reg_le(int reg) +{ + u64 v = sw_r32(reg + 4); + + v <<= 32; + v |= sw_r32(reg); + return v; +} + +int read_phy(u32 port, u32 page, u32 reg, u32 *val) +{ + switch (soc_info.family) { + case RTL8380_FAMILY_ID: + return rtl838x_read_phy(port, page, reg, val); + case RTL8390_FAMILY_ID: + return rtl839x_read_phy(port, page, reg, val); + case RTL9300_FAMILY_ID: + return rtl930x_read_phy(port, page, reg, val); + case RTL9310_FAMILY_ID: + return rtl931x_read_phy(port, page, reg, val); + } + return -1; +} + +int write_phy(u32 port, u32 page, u32 reg, u32 val) +{ + switch (soc_info.family) { + case RTL8380_FAMILY_ID: + return rtl838x_write_phy(port, page, reg, val); + case RTL8390_FAMILY_ID: + return rtl839x_write_phy(port, page, reg, val); + case RTL9300_FAMILY_ID: + return rtl930x_write_phy(port, page, reg, val); + case RTL9310_FAMILY_ID: + return rtl931x_write_phy(port, page, reg, val); + } + return -1; +} + +static int __init rtl83xx_mdio_probe(struct rtl838x_switch_priv *priv) +{ + struct device *dev = priv->dev; + struct device_node *dn, *phy_node, *mii_np = dev->of_node; + struct mii_bus *bus; + int ret; + u32 pn; + + pr_debug("In %s\n", __func__); + mii_np = of_find_compatible_node(NULL, NULL, "realtek,rtl838x-mdio"); + if (mii_np) { + pr_debug("Found compatible MDIO node!\n"); + } else { + dev_err(priv->dev, "no %s child node found", "mdio-bus"); + return -ENODEV; + } + + priv->mii_bus = of_mdio_find_bus(mii_np); + if (!priv->mii_bus) { + pr_debug("Deferring probe of mdio bus\n"); + return -EPROBE_DEFER; + } + if (!of_device_is_available(mii_np)) + ret = -ENODEV; + + bus = devm_mdiobus_alloc(priv->ds->dev); + if (!bus) + return -ENOMEM; + + bus->name = "rtl838x slave mii"; + + /* + * Since the NIC driver is loaded first, we can use the mdio rw functions + * assigned there. + */ + bus->read = priv->mii_bus->read; + bus->write = priv->mii_bus->write; + bus->read_paged = priv->mii_bus->read_paged; + bus->write_paged = priv->mii_bus->write_paged; + snprintf(bus->id, MII_BUS_ID_SIZE, "%s-%d", bus->name, dev->id); + + bus->parent = dev; + priv->ds->slave_mii_bus = bus; + priv->ds->slave_mii_bus->priv = priv->mii_bus->priv; + priv->ds->slave_mii_bus->access_capabilities = priv->mii_bus->access_capabilities; + + ret = mdiobus_register(priv->ds->slave_mii_bus); + if (ret && mii_np) { + of_node_put(dn); + return ret; + } + + dn = of_find_compatible_node(NULL, NULL, "realtek,rtl83xx-switch"); + if (!dn) { + dev_err(priv->dev, "No RTL switch node in DTS\n"); + return -ENODEV; + } + + for_each_node_by_name(dn, "port") { + phy_interface_t interface; + u32 led_set; + + if (!of_device_is_available(dn)) + continue; + + if (of_property_read_u32(dn, "reg", &pn)) + continue; + + pr_info("%s found port %d\n", __func__, pn); + phy_node = of_parse_phandle(dn, "phy-handle", 0); + if (!phy_node) { + if (pn != priv->cpu_port) + dev_err(priv->dev, "Port node %d misses phy-handle\n", pn); + continue; + } + + pr_info("%s port %d has phandle\n", __func__, pn); + if (of_property_read_u32(phy_node, "sds", &priv->ports[pn].sds_num)) + priv->ports[pn].sds_num = -1; + else { + pr_info("%s sds port %d is %d\n", __func__, pn, + priv->ports[pn].sds_num); + } + pr_info("%s port %d has SDS\n", __func__, priv->ports[pn].sds_num); + + if (of_get_phy_mode(dn, &interface)) + interface = PHY_INTERFACE_MODE_NA; + if (interface == PHY_INTERFACE_MODE_HSGMII) + priv->ports[pn].is2G5 = true; + if (interface == PHY_INTERFACE_MODE_USXGMII) + priv->ports[pn].is2G5 = priv->ports[pn].is10G = true; + if (interface == PHY_INTERFACE_MODE_10GBASER) + priv->ports[pn].is10G = true; + + if (of_property_read_u32(dn, "led-set", &led_set)) + led_set = 0; + priv->ports[pn].led_set = led_set; + + // Check for the integrated SerDes of the RTL8380M first + if (of_property_read_bool(phy_node, "phy-is-integrated") + && priv->id == 0x8380 && pn >= 24) { + pr_debug("----> FÓUND A SERDES\n"); + priv->ports[pn].phy = PHY_RTL838X_SDS; + continue; + } + + if (priv->id >= 0x9300) { + priv->ports[pn].phy_is_integrated = false; + if (of_property_read_bool(phy_node, "phy-is-integrated")) { + priv->ports[pn].phy_is_integrated = true; + priv->ports[pn].phy = PHY_RTL930X_SDS; + } + } else { + if (of_property_read_bool(phy_node, "phy-is-integrated") + && !of_property_read_bool(phy_node, "sfp")) { + priv->ports[pn].phy = PHY_RTL8218B_INT; + continue; + } + } + + if (!of_property_read_bool(phy_node, "phy-is-integrated") + && of_property_read_bool(phy_node, "sfp")) { + priv->ports[pn].phy = PHY_RTL8214FC; + continue; + } + + if (!of_property_read_bool(phy_node, "phy-is-integrated") + && !of_property_read_bool(phy_node, "sfp")) { + priv->ports[pn].phy = PHY_RTL8218B_EXT; + continue; + } + } + + /* Disable MAC polling the PHY so that we can start configuration */ + priv->r->set_port_reg_le(0ULL, priv->r->smi_poll_ctrl); + + /* Enable PHY control via SoC */ + if (priv->family_id == RTL8380_FAMILY_ID) { + /* Enable SerDes NWAY and PHY control via SoC */ + sw_w32_mask(BIT(7), BIT(15), RTL838X_SMI_GLB_CTRL); + } else if (priv->family_id == RTL8390_FAMILY_ID) { + /* Disable PHY polling via SoC */ + sw_w32_mask(BIT(7), 0, RTL839X_SMI_GLB_CTRL); + } + + /* Power on fibre ports and reset them if necessary */ + if (priv->ports[24].phy == PHY_RTL838X_SDS) { + pr_debug("Powering on fibre ports & reset\n"); + rtl8380_sds_power(24, 1); + rtl8380_sds_power(26, 1); + } + + pr_debug("%s done\n", __func__); + return 0; +} + +static int __init rtl83xx_get_l2aging(struct rtl838x_switch_priv *priv) +{ + int t = sw_r32(priv->r->l2_ctrl_1); + + t &= priv->family_id == RTL8380_FAMILY_ID ? 0x7fffff : 0x1FFFFF; + + if (priv->family_id == RTL8380_FAMILY_ID) + t = t * 128 / 625; /* Aging time in seconds. 0: L2 aging disabled */ + else + t = (t * 3) / 5; + + pr_debug("L2 AGING time: %d sec\n", t); + pr_debug("Dynamic aging for ports: %x\n", sw_r32(priv->r->l2_port_aging_out)); + return t; +} + +/* Caller must hold priv->reg_mutex */ +int rtl83xx_lag_add(struct dsa_switch *ds, int group, int port, struct netdev_lag_upper_info *info) +{ + struct rtl838x_switch_priv *priv = ds->priv; + int i; + u32 algomsk = 0; + u32 algoidx = 0; + + if (info->tx_type != NETDEV_LAG_TX_TYPE_HASH) { + pr_err("%s: Only mode LACP 802.3ad (4) allowed.\n", __func__); + return -EINVAL; + } + + if (group >= priv->n_lags) { + pr_err("%s: LAG %d invalid.\n", __func__, group); + return -EINVAL; + } + + if (port >= priv->cpu_port) { + pr_err("%s: Port %d invalid.\n", __func__, port); + return -EINVAL; + } + + for (i = 0; i < priv->n_lags; i++) { + if (priv->lags_port_members[i] & BIT_ULL(port)) + break; + } + if (i != priv->n_lags) { + pr_err("%s: Port %d already member of LAG %d.\n", __func__, port, i); + return -ENOSPC; + } + switch(info->hash_type) { + case NETDEV_LAG_HASH_L2: + algomsk |= TRUNK_DISTRIBUTION_ALGO_DMAC_BIT; + algomsk |= TRUNK_DISTRIBUTION_ALGO_SMAC_BIT; + break; + case NETDEV_LAG_HASH_L23: + algomsk |= TRUNK_DISTRIBUTION_ALGO_DMAC_BIT; + algomsk |= TRUNK_DISTRIBUTION_ALGO_SMAC_BIT; + algomsk |= TRUNK_DISTRIBUTION_ALGO_SIP_BIT; //source ip + algomsk |= TRUNK_DISTRIBUTION_ALGO_DIP_BIT; //dest ip + algoidx = 1; + break; + case NETDEV_LAG_HASH_L34: + algomsk |= TRUNK_DISTRIBUTION_ALGO_SRC_L4PORT_BIT; //sport + algomsk |= TRUNK_DISTRIBUTION_ALGO_DST_L4PORT_BIT; //dport + algomsk |= TRUNK_DISTRIBUTION_ALGO_SIP_BIT; //source ip + algomsk |= TRUNK_DISTRIBUTION_ALGO_DIP_BIT; //dest ip + algoidx = 2; + break; + default: + algomsk |= 0x7f; + } + priv->r->set_distribution_algorithm(group, algoidx, algomsk); + priv->r->mask_port_reg_be(0, BIT_ULL(port), priv->r->trk_mbr_ctr(group)); + priv->lags_port_members[group] |= BIT_ULL(port); + + pr_info("%s: Added port %d to LAG %d. Members now %016llx.\n", + __func__, port, group, priv->lags_port_members[group]); + return 0; +} + +/* Caller must hold priv->reg_mutex */ +int rtl83xx_lag_del(struct dsa_switch *ds, int group, int port) +{ + struct rtl838x_switch_priv *priv = ds->priv; + + if (group >= priv->n_lags) { + pr_err("%s: LAG %d invalid.\n", __func__, group); + return -EINVAL; + } + + if (port >= priv->cpu_port) { + pr_err("%s: Port %d invalid.\n", __func__, port); + return -EINVAL; + } + + if (!(priv->lags_port_members[group] & BIT_ULL(port))) { + pr_err("%s: Port %d not member of LAG %d.\n", __func__, port, group); + return -ENOSPC; + } + + // 0x7f algo mask all + priv->r->mask_port_reg_be(BIT_ULL(port), 0, priv->r->trk_mbr_ctr(group)); + priv->lags_port_members[group] &= ~BIT_ULL(port); + + pr_info("%s: Removed port %d from LAG %d. Members now %016llx.\n", + __func__, port, group, priv->lags_port_members[group]); + return 0; +} + +/* + * Allocate a 64 bit octet counter located in the LOG HW table + */ +static int rtl83xx_octet_cntr_alloc(struct rtl838x_switch_priv *priv) +{ + int idx; + + mutex_lock(&priv->reg_mutex); + + idx = find_first_zero_bit(priv->octet_cntr_use_bm, MAX_COUNTERS); + if (idx >= priv->n_counters) { + mutex_unlock(&priv->reg_mutex); + return -1; + } + + set_bit(idx, priv->octet_cntr_use_bm); + mutex_unlock(&priv->reg_mutex); + + return idx; +} + +/* + * Allocate a 32-bit packet counter + * 2 32-bit packet counters share the location of a 64-bit octet counter + * Initially there are no free packet counters and 2 new ones need to be freed + * by allocating the corresponding octet counter + */ +int rtl83xx_packet_cntr_alloc(struct rtl838x_switch_priv *priv) +{ + int idx, j; + + mutex_lock(&priv->reg_mutex); + + /* Because initially no packet counters are free, the logic is reversed: + * a 0-bit means the counter is already allocated (for octets) + */ + idx = find_first_bit(priv->packet_cntr_use_bm, MAX_COUNTERS * 2); + if (idx >= priv->n_counters * 2) { + j = find_first_zero_bit(priv->octet_cntr_use_bm, MAX_COUNTERS); + if (j >= priv->n_counters) { + mutex_unlock(&priv->reg_mutex); + return -1; + } + set_bit(j, priv->octet_cntr_use_bm); + idx = j * 2; + set_bit(j * 2 + 1, priv->packet_cntr_use_bm); + + } else { + clear_bit(idx, priv->packet_cntr_use_bm); + } + + mutex_unlock(&priv->reg_mutex); + + return idx; +} + +/* + * Add an L2 nexthop entry for the L3 routing system / PIE forwarding in the SoC + * Use VID and MAC in rtl838x_l2_entry to identify either a free slot in the L2 hash table + * or mark an existing entry as a nexthop by setting it's nexthop bit + * Called from the L3 layer + * The index in the L2 hash table is filled into nh->l2_id; + */ +int rtl83xx_l2_nexthop_add(struct rtl838x_switch_priv *priv, struct rtl83xx_nexthop *nh) +{ + struct rtl838x_l2_entry e; + u64 seed = priv->r->l2_hash_seed(nh->mac, nh->rvid); + u32 key = priv->r->l2_hash_key(priv, seed); + int i, idx = -1; + u64 entry; + + pr_debug("%s searching for %08llx vid %d with key %d, seed: %016llx\n", + __func__, nh->mac, nh->rvid, key, seed); + + e.type = L2_UNICAST; + u64_to_ether_addr(nh->mac, &e.mac[0]); + e.port = nh->port; + + // Loop over all entries in the hash-bucket and over the second block on 93xx SoCs + for (i = 0; i < priv->l2_bucket_size; i++) { + entry = priv->r->read_l2_entry_using_hash(key, i, &e); + + if (!e.valid || ((entry & 0x0fffffffffffffffULL) == seed)) { + idx = i > 3 ? ((key >> 14) & 0xffff) | i >> 1 + : ((key << 2) | i) & 0xffff; + break; + } + } + + if (idx < 0) { + pr_err("%s: No more L2 forwarding entries available\n", __func__); + return -1; + } + + // Found an existing (e->valid is true) or empty entry, make it a nexthop entry + nh->l2_id = idx; + if (e.valid) { + nh->port = e.port; + nh->vid = e.vid; // Save VID + nh->rvid = e.rvid; + nh->dev_id = e.stack_dev; + // If the entry is already a valid next hop entry, don't change it + if (e.next_hop) + return 0; + } else { + e.valid = true; + e.is_static = true; + e.rvid = nh->rvid; + e.is_ip_mc = false; + e.is_ipv6_mc = false; + e.block_da = false; + e.block_sa = false; + e.suspended = false; + e.age = 0; // With port-ignore + e.port = priv->port_ignore; + u64_to_ether_addr(nh->mac, &e.mac[0]); + } + e.next_hop = true; + e.nh_route_id = nh->id; // NH route ID takes place of VID + e.nh_vlan_target = false; + + priv->r->write_l2_entry_using_hash(idx >> 2, idx & 0x3, &e); + + return 0; +} + +/* + * Removes a Layer 2 next hop entry in the forwarding database + * If it was static, the entire entry is removed, otherwise the nexthop bit is cleared + * and we wait until the entry ages out + */ +int rtl83xx_l2_nexthop_rm(struct rtl838x_switch_priv *priv, struct rtl83xx_nexthop *nh) +{ + struct rtl838x_l2_entry e; + u32 key = nh->l2_id >> 2; + int i = nh->l2_id & 0x3; + u64 entry = entry = priv->r->read_l2_entry_using_hash(key, i, &e); + + pr_debug("%s: id %d, key %d, index %d\n", __func__, nh->l2_id, key, i); + if (!e.valid) { + dev_err(priv->dev, "unknown nexthop, id %x\n", nh->l2_id); + return -1; + } + + if (e.is_static) + e.valid = false; + e.next_hop = false; + e.vid = nh->vid; // Restore VID + e.rvid = nh->rvid; + + priv->r->write_l2_entry_using_hash(key, i, &e); + + return 0; +} + +static int rtl83xx_handle_changeupper(struct rtl838x_switch_priv *priv, + struct net_device *ndev, + struct netdev_notifier_changeupper_info *info) +{ + struct net_device *upper = info->upper_dev; + struct netdev_lag_upper_info *lag_upper_info = NULL; + int i, j, err; + + if (!netif_is_lag_master(upper)) + return 0; + + mutex_lock(&priv->reg_mutex); + + for (i = 0; i < priv->n_lags; i++) { + if ((!priv->lag_devs[i]) || (priv->lag_devs[i] == upper)) + break; + } + for (j = 0; j < priv->cpu_port; j++) { + if (priv->ports[j].dp->slave == ndev) + break; + } + if (j >= priv->cpu_port) { + err = -EINVAL; + goto out; + } + + if (info->linking) { + lag_upper_info = info->upper_info; + if (!priv->lag_devs[i]) + priv->lag_devs[i] = upper; + err = rtl83xx_lag_add(priv->ds, i, priv->ports[j].dp->index, lag_upper_info); + if (err) { + err = -EINVAL; + goto out; + } + } else { + if (!priv->lag_devs[i]) + err = -EINVAL; + err = rtl83xx_lag_del(priv->ds, i, priv->ports[j].dp->index); + if (err) { + err = -EINVAL; + goto out; + } + if (!priv->lags_port_members[i]) + priv->lag_devs[i] = NULL; + } + +out: + mutex_unlock(&priv->reg_mutex); + return 0; +} + +/* + * Is the lower network device a DSA slave network device of our RTL930X-switch? + * Unfortunately we cannot just follow dev->dsa_prt as this is only set for the + * DSA master device. + */ +int rtl83xx_port_is_under(const struct net_device * dev, struct rtl838x_switch_priv *priv) +{ + int i; + +// TODO: On 5.12: +// if(!dsa_slave_dev_check(dev)) { +// netdev_info(dev, "%s: not a DSA device.\n", __func__); +// return -EINVAL; +// } + + for (i = 0; i < priv->cpu_port; i++) { + if (!priv->ports[i].dp) + continue; + if (priv->ports[i].dp->slave == dev) + return i; + } + return -EINVAL; +} + +static int rtl83xx_netdevice_event(struct notifier_block *this, + unsigned long event, void *ptr) +{ + struct net_device *ndev = netdev_notifier_info_to_dev(ptr); + struct rtl838x_switch_priv *priv; + int err; + + pr_debug("In: %s, event: %lu\n", __func__, event); + + if ((event != NETDEV_CHANGEUPPER) && (event != NETDEV_CHANGELOWERSTATE)) + return NOTIFY_DONE; + + priv = container_of(this, struct rtl838x_switch_priv, nb); + switch (event) { + case NETDEV_CHANGEUPPER: + err = rtl83xx_handle_changeupper(priv, ndev, ptr); + break; + } + + if (err) + return err; + + return NOTIFY_DONE; +} + +const static struct rhashtable_params route_ht_params = { + .key_len = sizeof(u32), + .key_offset = offsetof(struct rtl83xx_route, gw_ip), + .head_offset = offsetof(struct rtl83xx_route, linkage), +}; + +/* + * Updates an L3 next hop entry in the ROUTING table + */ +static int rtl83xx_l3_nexthop_update(struct rtl838x_switch_priv *priv, __be32 ip_addr, u64 mac) +{ + struct rtl83xx_route *r; + struct rhlist_head *tmp, *list; + + rcu_read_lock(); + list = rhltable_lookup(&priv->routes, &ip_addr, route_ht_params); + if (!list) { + rcu_read_unlock(); + return -ENOENT; + } + + rhl_for_each_entry_rcu(r, tmp, list, linkage) { + pr_info("%s: Setting up fwding: ip %pI4, GW mac %016llx\n", + __func__, &ip_addr, mac); + + // Reads the ROUTING table entry associated with the route + priv->r->route_read(r->id, r); + pr_info("Route with id %d to %pI4 / %d\n", r->id, &r->dst_ip, r->prefix_len); + + r->nh.mac = r->nh.gw = mac; + r->nh.port = priv->port_ignore; + r->nh.id = r->id; + + // Do we need to explicitly add a DMAC entry with the route's nh index? + if (priv->r->set_l3_egress_mac) + priv->r->set_l3_egress_mac(r->id, mac); + + // Update ROUTING table: map gateway-mac and switch-mac id to route id + rtl83xx_l2_nexthop_add(priv, &r->nh); + + r->attr.valid = true; + r->attr.action = ROUTE_ACT_FORWARD; + r->attr.type = 0; + r->attr.hit = false; // Reset route-used indicator + + // Add PIE entry with dst_ip and prefix_len + r->pr.dip = r->dst_ip; + r->pr.dip_m = inet_make_mask(r->prefix_len); + + if (r->is_host_route) { + int slot = priv->r->find_l3_slot(r, false); + + pr_info("%s: Got slot for route: %d\n", __func__, slot); + priv->r->host_route_write(slot, r); + } else { + priv->r->route_write(r->id, r); + r->pr.fwd_sel = true; + r->pr.fwd_data = r->nh.l2_id; + r->pr.fwd_act = PIE_ACT_ROUTE_UC; + } + + if (priv->r->set_l3_nexthop) + priv->r->set_l3_nexthop(r->nh.id, r->nh.l2_id, r->nh.if_id); + + if (r->pr.id < 0) { + r->pr.packet_cntr = rtl83xx_packet_cntr_alloc(priv); + if (r->pr.packet_cntr >= 0) { + pr_info("Using packet counter %d\n", r->pr.packet_cntr); + r->pr.log_sel = true; + r->pr.log_data = r->pr.packet_cntr; + } + priv->r->pie_rule_add(priv, &r->pr); + } else { + int pkts = priv->r->packet_cntr_read(r->pr.packet_cntr); + pr_info("%s: total packets: %d\n", __func__, pkts); + + priv->r->pie_rule_write(priv, r->pr.id, &r->pr); + } + } + rcu_read_unlock(); + return 0; +} + +static int rtl83xx_port_ipv4_resolve(struct rtl838x_switch_priv *priv, + struct net_device *dev, __be32 ip_addr) +{ + struct neighbour *n = neigh_lookup(&arp_tbl, &ip_addr, dev); + int err = 0; + u64 mac; + + if (!n) { + n = neigh_create(&arp_tbl, &ip_addr, dev); + if (IS_ERR(n)) + return PTR_ERR(n); + } + + /* If the neigh is already resolved, then go ahead and + * install the entry, otherwise start the ARP process to + * resolve the neigh. + */ + if (n->nud_state & NUD_VALID) { + mac = ether_addr_to_u64(n->ha); + pr_info("%s: resolved mac: %016llx\n", __func__, mac); + rtl83xx_l3_nexthop_update(priv, ip_addr, mac); + } else { + pr_info("%s: need to wait\n", __func__); + neigh_event_send(n, NULL); + } + + neigh_release(n); + return err; +} + +struct rtl83xx_walk_data { + struct rtl838x_switch_priv *priv; + int port; +}; + +static int rtl83xx_port_lower_walk(struct net_device *lower, struct netdev_nested_priv *_priv) +{ + struct rtl83xx_walk_data *data = (struct rtl83xx_walk_data *)_priv->data; + struct rtl838x_switch_priv *priv = data->priv; + int ret = 0; + int index; + + index = rtl83xx_port_is_under(lower, priv); + data->port = index; + if (index >= 0) { + pr_debug("Found DSA-port, index %d\n", index); + ret = 1; + } + + return ret; +} + +int rtl83xx_port_dev_lower_find(struct net_device *dev, struct rtl838x_switch_priv *priv) +{ + struct rtl83xx_walk_data data; + struct netdev_nested_priv _priv; + + data.priv = priv; + data.port = 0; + _priv.data = (void *)&data; + + netdev_walk_all_lower_dev(dev, rtl83xx_port_lower_walk, &_priv); + + return data.port; +} + +static struct rtl83xx_route *rtl83xx_route_alloc(struct rtl838x_switch_priv *priv, u32 ip) +{ + struct rtl83xx_route *r; + int idx = 0, err; + + mutex_lock(&priv->reg_mutex); + + idx = find_first_zero_bit(priv->route_use_bm, MAX_ROUTES); + pr_debug("%s id: %d, ip %pI4\n", __func__, idx, &ip); + + r = kzalloc(sizeof(*r), GFP_KERNEL); + if (!r) { + mutex_unlock(&priv->reg_mutex); + return r; + } + + r->id = idx; + r->gw_ip = ip; + r->pr.id = -1; // We still need to allocate a rule in HW + r->is_host_route = false; + + err = rhltable_insert(&priv->routes, &r->linkage, route_ht_params); + if (err) { + pr_err("Could not insert new rule\n"); + mutex_unlock(&priv->reg_mutex); + goto out_free; + } + + set_bit(idx, priv->route_use_bm); + + mutex_unlock(&priv->reg_mutex); + + return r; + +out_free: + kfree(r); + return NULL; +} + + +static struct rtl83xx_route *rtl83xx_host_route_alloc(struct rtl838x_switch_priv *priv, u32 ip) +{ + struct rtl83xx_route *r; + int idx = 0, err; + + mutex_lock(&priv->reg_mutex); + + idx = find_first_zero_bit(priv->host_route_use_bm, MAX_HOST_ROUTES); + pr_debug("%s id: %d, ip %pI4\n", __func__, idx, &ip); + + r = kzalloc(sizeof(*r), GFP_KERNEL); + if (!r) { + mutex_unlock(&priv->reg_mutex); + return r; + } + + /* We require a unique route ID irrespective of whether it is a prefix or host + * route (on RTL93xx) as we use this ID to associate a DMAC and next-hop entry */ + r->id = idx + MAX_ROUTES; + + r->gw_ip = ip; + r->pr.id = -1; // We still need to allocate a rule in HW + r->is_host_route = true; + + err = rhltable_insert(&priv->routes, &r->linkage, route_ht_params); + if (err) { + pr_err("Could not insert new rule\n"); + mutex_unlock(&priv->reg_mutex); + goto out_free; + } + + set_bit(idx, priv->host_route_use_bm); + + mutex_unlock(&priv->reg_mutex); + + return r; + +out_free: + kfree(r); + return NULL; +} + + + +static void rtl83xx_route_rm(struct rtl838x_switch_priv *priv, struct rtl83xx_route *r) +{ + int id; + + if (rhltable_remove(&priv->routes, &r->linkage, route_ht_params)) + dev_warn(priv->dev, "Could not remove route\n"); + + if (r->is_host_route) { + id = priv->r->find_l3_slot(r, false); + pr_debug("%s: Got id for host route: %d\n", __func__, id); + r->attr.valid = false; + priv->r->host_route_write(id, r); + clear_bit(r->id - MAX_ROUTES, priv->host_route_use_bm); + } else { + // If there is a HW representation of the route, delete it + if (priv->r->route_lookup_hw) { + id = priv->r->route_lookup_hw(r); + pr_info("%s: Got id for prefix route: %d\n", __func__, id); + r->attr.valid = false; + priv->r->route_write(id, r); + } + clear_bit(r->id, priv->route_use_bm); + } + + kfree(r); +} + +static int rtl83xx_fib4_del(struct rtl838x_switch_priv *priv, + struct fib_entry_notifier_info *info) +{ + struct fib_nh *nh = fib_info_nh(info->fi, 0); + struct rtl83xx_route *r; + struct rhlist_head *tmp, *list; + + pr_debug("In %s, ip %pI4, len %d\n", __func__, &info->dst, info->dst_len); + rcu_read_lock(); + list = rhltable_lookup(&priv->routes, &nh->fib_nh_gw4, route_ht_params); + if (!list) { + rcu_read_unlock(); + pr_err("%s: no such gateway: %pI4\n", __func__, &nh->fib_nh_gw4); + return -ENOENT; + } + rhl_for_each_entry_rcu(r, tmp, list, linkage) { + if (r->dst_ip == info->dst && r->prefix_len == info->dst_len) { + pr_info("%s: found a route with id %d, nh-id %d\n", + __func__, r->id, r->nh.id); + break; + } + } + rcu_read_unlock(); + + rtl83xx_l2_nexthop_rm(priv, &r->nh); + + pr_debug("%s: Releasing packet counter %d\n", __func__, r->pr.packet_cntr); + set_bit(r->pr.packet_cntr, priv->packet_cntr_use_bm); + priv->r->pie_rule_rm(priv, &r->pr); + + rtl83xx_route_rm(priv, r); + + nh->fib_nh_flags &= ~RTNH_F_OFFLOAD; + + return 0; +} + +/* + * On the RTL93xx, an L3 termination endpoint MAC address on which the router waits + * for packets to be routed needs to be allocated. + */ +static int rtl83xx_alloc_router_mac(struct rtl838x_switch_priv *priv, u64 mac) +{ + int i, free_mac = -1; + struct rtl93xx_rt_mac m; + + mutex_lock(&priv->reg_mutex); + for (i = 0; i < MAX_ROUTER_MACS; i++) { + priv->r->get_l3_router_mac(i, &m); + if (free_mac < 0 && !m.valid) { + free_mac = i; + continue; + } + if (m.valid && m.mac == mac) { + free_mac = i; + break; + } + } + + if (free_mac < 0) { + pr_err("No free router MACs, cannot offload\n"); + mutex_unlock(&priv->reg_mutex); + return -1; + } + + m.valid = true; + m.mac = mac; + m.p_type = 0; // An individual port, not a trunk port + m.p_id = 0x3f; // Listen on any port + m.p_id_mask = 0; + m.vid = 0; // Listen on any VLAN... + m.vid_mask = 0; // ... so mask needs to be 0 + m.mac_mask = 0xffffffffffffULL; // We want an exact match of the interface MAC + m.action = L3_FORWARD; // Route the packet + priv->r->set_l3_router_mac(free_mac, &m); + + mutex_unlock(&priv->reg_mutex); + + return 0; +} + +static int rtl83xx_alloc_egress_intf(struct rtl838x_switch_priv *priv, u64 mac, int vlan) +{ + int i, free_mac = -1; + struct rtl838x_l3_intf intf; + u64 m; + + mutex_lock(&priv->reg_mutex); + for (i = 0; i < MAX_SMACS; i++) { + m = priv->r->get_l3_egress_mac(L3_EGRESS_DMACS + i); + if (free_mac < 0 && !m) { + free_mac = i; + continue; + } + if (m == mac) { + mutex_unlock(&priv->reg_mutex); + return i; + } + } + + if (free_mac < 0) { + pr_err("No free egress interface, cannot offload\n"); + return -1; + } + + // Set up default egress interface 1 + intf.vid = vlan; + intf.smac_idx = free_mac; + intf.ip4_mtu_id = 1; + intf.ip6_mtu_id = 1; + intf.ttl_scope = 1; // TTL + intf.hl_scope = 1; // Hop Limit + intf.ip4_icmp_redirect = intf.ip6_icmp_redirect = 2; // FORWARD + intf.ip4_pbr_icmp_redirect = intf.ip6_pbr_icmp_redirect = 2; // FORWARD; + priv->r->set_l3_egress_intf(free_mac, &intf); + + priv->r->set_l3_egress_mac(L3_EGRESS_DMACS + free_mac, mac); + + mutex_unlock(&priv->reg_mutex); + + return free_mac; +} + +static int rtl83xx_fib4_add(struct rtl838x_switch_priv *priv, + struct fib_entry_notifier_info *info) +{ + struct fib_nh *nh = fib_info_nh(info->fi, 0); + struct net_device *dev = fib_info_nh(info->fi, 0)->fib_nh_dev; + int port; + struct rtl83xx_route *r; + bool to_localhost; + int vlan = is_vlan_dev(dev) ? vlan_dev_vlan_id(dev) : 0; + + pr_debug("In %s, ip %pI4, len %d\n", __func__, &info->dst, info->dst_len); + if (!info->dst) { + pr_info("Not offloading default route for now\n"); + return 0; + } + + pr_debug("GW: %pI4, interface name %s, mac %016llx, vlan %d\n", &nh->fib_nh_gw4, dev->name, + ether_addr_to_u64(dev->dev_addr), vlan + ); + + port = rtl83xx_port_dev_lower_find(dev, priv); + if (port < 0) + return -1; + + // For now we only work with routes that have a gateway and are not ourself +// if ((!nh->fib_nh_gw4) && (info->dst_len != 32)) +// return 0; + + if ((info->dst & 0xff) == 0xff) + return 0; + + // Do not offload routes to 192.168.100.x + if ((info->dst & 0xffffff00) == 0xc0a86400) + return 0; + + // Do not offload routes to 127.x.x.x + if ((info->dst & 0xff000000) == 0x7f000000) + return 0; + + // Allocate route or host-route (entry if hardware supports this) + if (info->dst_len == 32 && priv->r->host_route_write) + r = rtl83xx_host_route_alloc(priv, nh->fib_nh_gw4); + else + r = rtl83xx_route_alloc(priv, nh->fib_nh_gw4); + + if (!r) { + pr_err("%s: No more free route entries\n", __func__); + return -1; + } + + r->dst_ip = info->dst; + r->prefix_len = info->dst_len; + r->nh.rvid = vlan; + to_localhost = !nh->fib_nh_gw4; + + if (priv->r->set_l3_router_mac) { + u64 mac = ether_addr_to_u64(dev->dev_addr); + + pr_debug("Local route and router mac %016llx\n", mac); + + if (rtl83xx_alloc_router_mac(priv, mac)) + goto out_free_rt; + + // vid = 0: Do not care about VID + r->nh.if_id = rtl83xx_alloc_egress_intf(priv, mac, vlan); + if (r->nh.if_id < 0) + goto out_free_rmac; + + if (to_localhost) { + int slot; + + r->nh.mac = mac; + r->nh.port = priv->port_ignore; + r->attr.valid = true; + r->attr.action = ROUTE_ACT_TRAP2CPU; + r->attr.type = 0; + + slot = priv->r->find_l3_slot(r, false); + pr_debug("%s: Got slot for route: %d\n", __func__, slot); + priv->r->host_route_write(slot, r); + } + } + + // We need to resolve the mac address of the GW + if (!to_localhost) + rtl83xx_port_ipv4_resolve(priv, dev, nh->fib_nh_gw4); + + nh->fib_nh_flags |= RTNH_F_OFFLOAD; + + return 0; + +out_free_rmac: +out_free_rt: + return 0; +} + +static int rtl83xx_fib6_add(struct rtl838x_switch_priv *priv, + struct fib6_entry_notifier_info *info) +{ + pr_debug("In %s\n", __func__); +// nh->fib_nh_flags |= RTNH_F_OFFLOAD; + return 0; +} + +struct net_event_work { + struct work_struct work; + struct rtl838x_switch_priv *priv; + u64 mac; + u32 gw_addr; +}; + +static void rtl83xx_net_event_work_do(struct work_struct *work) +{ + struct net_event_work *net_work = + container_of(work, struct net_event_work, work); + struct rtl838x_switch_priv *priv = net_work->priv; + + rtl83xx_l3_nexthop_update(priv, net_work->gw_addr, net_work->mac); +} + +static int rtl83xx_netevent_event(struct notifier_block *this, + unsigned long event, void *ptr) +{ + struct rtl838x_switch_priv *priv; + struct net_device *dev; + struct neighbour *n = ptr; + int err, port; + struct net_event_work *net_work; + + priv = container_of(this, struct rtl838x_switch_priv, ne_nb); + + net_work = kzalloc(sizeof(*net_work), GFP_ATOMIC); + if (!net_work) + return NOTIFY_BAD; + + INIT_WORK(&net_work->work, rtl83xx_net_event_work_do); + net_work->priv = priv; + + switch (event) { + case NETEVENT_NEIGH_UPDATE: + if (n->tbl != &arp_tbl) + return NOTIFY_DONE; + dev = n->dev; + port = rtl83xx_port_dev_lower_find(dev, priv); + if (port < 0 || !(n->nud_state & NUD_VALID)) { + pr_debug("%s: Neigbour invalid, not updating\n", __func__); + kfree(net_work); + return NOTIFY_DONE; + } + + net_work->mac = ether_addr_to_u64(n->ha); + net_work->gw_addr = *(__be32 *) n->primary_key; + + pr_debug("%s: updating neighbour on port %d, mac %016llx\n", + __func__, port, net_work->mac); + schedule_work(&net_work->work); + if (err) + netdev_warn(dev, "failed to handle neigh update (err %d)\n", err); + break; + } + + return NOTIFY_DONE; +} + +struct rtl83xx_fib_event_work { + struct work_struct work; + union { + struct fib_entry_notifier_info fen_info; + struct fib6_entry_notifier_info fen6_info; + struct fib_rule_notifier_info fr_info; + }; + struct rtl838x_switch_priv *priv; + bool is_fib6; + unsigned long event; +}; + +static void rtl83xx_fib_event_work_do(struct work_struct *work) +{ + struct rtl83xx_fib_event_work *fib_work = + container_of(work, struct rtl83xx_fib_event_work, work); + struct rtl838x_switch_priv *priv = fib_work->priv; + struct fib_rule *rule; + int err; + + /* Protect internal structures from changes */ + rtnl_lock(); + pr_debug("%s: doing work, event %ld\n", __func__, fib_work->event); + switch (fib_work->event) { + case FIB_EVENT_ENTRY_ADD: + case FIB_EVENT_ENTRY_REPLACE: + case FIB_EVENT_ENTRY_APPEND: + if (fib_work->is_fib6) { + err = rtl83xx_fib6_add(priv, &fib_work->fen6_info); + } else { + err = rtl83xx_fib4_add(priv, &fib_work->fen_info); + fib_info_put(fib_work->fen_info.fi); + } + if (err) + pr_err("%s: FIB4 failed\n", __func__); + break; + case FIB_EVENT_ENTRY_DEL: + rtl83xx_fib4_del(priv, &fib_work->fen_info); + fib_info_put(fib_work->fen_info.fi); + break; + case FIB_EVENT_RULE_ADD: + case FIB_EVENT_RULE_DEL: + rule = fib_work->fr_info.rule; + if (!fib4_rule_default(rule)) + pr_err("%s: FIB4 default rule failed\n", __func__); + fib_rule_put(rule); + break; + } + rtnl_unlock(); + kfree(fib_work); +} + +/* Called with rcu_read_lock() */ +static int rtl83xx_fib_event(struct notifier_block *this, unsigned long event, void *ptr) +{ + struct fib_notifier_info *info = ptr; + struct rtl838x_switch_priv *priv; + struct rtl83xx_fib_event_work *fib_work; + + if ((info->family != AF_INET && info->family != AF_INET6 && + info->family != RTNL_FAMILY_IPMR && + info->family != RTNL_FAMILY_IP6MR)) + return NOTIFY_DONE; + + priv = container_of(this, struct rtl838x_switch_priv, fib_nb); + + fib_work = kzalloc(sizeof(*fib_work), GFP_ATOMIC); + if (!fib_work) + return NOTIFY_BAD; + + INIT_WORK(&fib_work->work, rtl83xx_fib_event_work_do); + fib_work->priv = priv; + fib_work->event = event; + fib_work->is_fib6 = false; + + switch (event) { + case FIB_EVENT_ENTRY_ADD: + case FIB_EVENT_ENTRY_REPLACE: + case FIB_EVENT_ENTRY_APPEND: + case FIB_EVENT_ENTRY_DEL: + pr_debug("%s: FIB_ENTRY ADD/DEL, event %ld\n", __func__, event); + if (info->family == AF_INET) { + struct fib_entry_notifier_info *fen_info = ptr; + + if (fen_info->fi->fib_nh_is_v6) { + NL_SET_ERR_MSG_MOD(info->extack, + "IPv6 gateway with IPv4 route is not supported"); + kfree(fib_work); + return notifier_from_errno(-EINVAL); + } + + memcpy(&fib_work->fen_info, ptr, sizeof(fib_work->fen_info)); + /* Take referece on fib_info to prevent it from being + * freed while work is queued. Release it afterwards. + */ + fib_info_hold(fib_work->fen_info.fi); + + } else if (info->family == AF_INET6) { + struct fib6_entry_notifier_info *fen6_info = ptr; + pr_warn("%s: FIB_RULE ADD/DEL for IPv6 not supported\n", __func__); + kfree(fib_work); + return NOTIFY_DONE; + } + break; + + case FIB_EVENT_RULE_ADD: + case FIB_EVENT_RULE_DEL: + pr_debug("%s: FIB_RULE ADD/DEL, event: %ld\n", __func__, event); + memcpy(&fib_work->fr_info, ptr, sizeof(fib_work->fr_info)); + fib_rule_get(fib_work->fr_info.rule); + break; + } + + schedule_work(&fib_work->work); + + return NOTIFY_DONE; +} + +static int __init rtl83xx_sw_probe(struct platform_device *pdev) +{ + int err = 0, i; + struct rtl838x_switch_priv *priv; + struct device *dev = &pdev->dev; + u64 bpdu_mask; + + pr_debug("Probing RTL838X switch device\n"); + if (!pdev->dev.of_node) { + dev_err(dev, "No DT found\n"); + return -EINVAL; + } + + // Initialize access to RTL switch tables + rtl_table_init(); + + priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + priv->ds = devm_kzalloc(dev, sizeof(*priv->ds), GFP_KERNEL); + + if (!priv->ds) + return -ENOMEM; + priv->ds->dev = dev; + priv->ds->priv = priv; + priv->ds->ops = &rtl83xx_switch_ops; + priv->dev = dev; + + mutex_init(&priv->reg_mutex); + + priv->family_id = soc_info.family; + priv->id = soc_info.id; + switch(soc_info.family) { + case RTL8380_FAMILY_ID: + priv->ds->ops = &rtl83xx_switch_ops; + priv->cpu_port = RTL838X_CPU_PORT; + priv->port_mask = 0x1f; + priv->port_width = 1; + priv->irq_mask = 0x0FFFFFFF; + priv->r = &rtl838x_reg; + priv->ds->num_ports = 29; + priv->fib_entries = 8192; + rtl8380_get_version(priv); + priv->n_lags = 8; + priv->l2_bucket_size = 4; + priv->n_pie_blocks = 12; + priv->port_ignore = 0x1f; + priv->n_counters = 128; + break; + case RTL8390_FAMILY_ID: + priv->ds->ops = &rtl83xx_switch_ops; + priv->cpu_port = RTL839X_CPU_PORT; + priv->port_mask = 0x3f; + priv->port_width = 2; + priv->irq_mask = 0xFFFFFFFFFFFFFULL; + priv->r = &rtl839x_reg; + priv->ds->num_ports = 53; + priv->fib_entries = 16384; + rtl8390_get_version(priv); + priv->n_lags = 16; + priv->l2_bucket_size = 4; + priv->n_pie_blocks = 18; + priv->port_ignore = 0x3f; + priv->n_counters = 1024; + break; + case RTL9300_FAMILY_ID: + priv->ds->ops = &rtl930x_switch_ops; + priv->cpu_port = RTL930X_CPU_PORT; + priv->port_mask = 0x1f; + priv->port_width = 1; + priv->irq_mask = 0x0FFFFFFF; + priv->r = &rtl930x_reg; + priv->ds->num_ports = 29; + priv->fib_entries = 16384; + priv->version = RTL8390_VERSION_A; + priv->n_lags = 16; + sw_w32(1, RTL930X_ST_CTRL); + priv->l2_bucket_size = 8; + priv->n_pie_blocks = 16; + priv->port_ignore = 0x3f; + priv->n_counters = 2048; + break; + case RTL9310_FAMILY_ID: + priv->ds->ops = &rtl930x_switch_ops; + priv->cpu_port = RTL931X_CPU_PORT; + priv->port_mask = 0x3f; + priv->port_width = 2; + priv->irq_mask = 0xFFFFFFFFFFFFFULL; + priv->r = &rtl931x_reg; + priv->ds->num_ports = 57; + priv->fib_entries = 16384; + priv->version = RTL8390_VERSION_A; + priv->n_lags = 16; + priv->l2_bucket_size = 8; + break; + } + pr_debug("Chip version %c\n", priv->version); + + err = rtl83xx_mdio_probe(priv); + if (err) { + /* Probing fails the 1st time because of missing ethernet driver + * initialization. Use this to disable traffic in case the bootloader left if on + */ + return err; + } + err = dsa_register_switch(priv->ds); + if (err) { + dev_err(dev, "Error registering switch: %d\n", err); + return err; + } + + /* + * dsa_to_port returns dsa_port from the port list in + * dsa_switch_tree, the tree is built when the switch + * is registered by dsa_register_switch + */ + for (i = 0; i <= priv->cpu_port; i++) + priv->ports[i].dp = dsa_to_port(priv->ds, i); + + /* Enable link and media change interrupts. Are the SERDES masks needed? */ + sw_w32_mask(0, 3, priv->r->isr_glb_src); + + priv->r->set_port_reg_le(priv->irq_mask, priv->r->isr_port_link_sts_chg); + priv->r->set_port_reg_le(priv->irq_mask, priv->r->imr_port_link_sts_chg); + + priv->link_state_irq = platform_get_irq(pdev, 0); + pr_info("LINK state irq: %d\n", priv->link_state_irq); + switch (priv->family_id) { + case RTL8380_FAMILY_ID: + err = request_irq(priv->link_state_irq, rtl838x_switch_irq, + IRQF_SHARED, "rtl838x-link-state", priv->ds); + break; + case RTL8390_FAMILY_ID: + err = request_irq(priv->link_state_irq, rtl839x_switch_irq, + IRQF_SHARED, "rtl839x-link-state", priv->ds); + break; + case RTL9300_FAMILY_ID: + err = request_irq(priv->link_state_irq, rtl930x_switch_irq, + IRQF_SHARED, "rtl930x-link-state", priv->ds); + break; + case RTL9310_FAMILY_ID: + err = request_irq(priv->link_state_irq, rtl931x_switch_irq, + IRQF_SHARED, "rtl931x-link-state", priv->ds); + break; + } + if (err) { + dev_err(dev, "Error setting up switch interrupt.\n"); + /* Need to free allocated switch here */ + } + + /* Enable interrupts for switch, on RTL931x, the IRQ is always on globally */ + if (soc_info.family != RTL9310_FAMILY_ID) + sw_w32(0x1, priv->r->imr_glb); + + rtl83xx_get_l2aging(priv); + + rtl83xx_setup_qos(priv); + + priv->r->l3_setup(priv); + + /* Clear all destination ports for mirror groups */ + for (i = 0; i < 4; i++) + priv->mirror_group_ports[i] = -1; + + /* + * Register netdevice event callback to catch changes in link aggregation groups + */ + priv->nb.notifier_call = rtl83xx_netdevice_event; + if (register_netdevice_notifier(&priv->nb)) { + priv->nb.notifier_call = NULL; + dev_err(dev, "Failed to register LAG netdev notifier\n"); + goto err_register_nb; + } + + // Initialize hash table for L3 routing + rhltable_init(&priv->routes, &route_ht_params); + + /* + * Register netevent notifier callback to catch notifications about neighboring + * changes to update nexthop entries for L3 routing. + */ + priv->ne_nb.notifier_call = rtl83xx_netevent_event; + if (register_netevent_notifier(&priv->ne_nb)) { + priv->ne_nb.notifier_call = NULL; + dev_err(dev, "Failed to register netevent notifier\n"); + goto err_register_ne_nb; + } + + priv->fib_nb.notifier_call = rtl83xx_fib_event; + + /* + * Register Forwarding Information Base notifier to offload routes where + * where possible + * Only FIBs pointing to our own netdevs are programmed into + * the device, so no need to pass a callback. + */ + err = register_fib_notifier(&init_net, &priv->fib_nb, NULL, NULL); + if (err) + goto err_register_fib_nb; + + // TODO: put this into l2_setup() + // Flood BPDUs to all ports including cpu-port + if (soc_info.family != RTL9300_FAMILY_ID) { + bpdu_mask = soc_info.family == RTL8380_FAMILY_ID ? 0x1FFFFFFF : 0x1FFFFFFFFFFFFF; + priv->r->set_port_reg_be(bpdu_mask, priv->r->rma_bpdu_fld_pmask); + + // TRAP 802.1X frames (EAPOL) to the CPU-Port, bypass STP and VLANs + sw_w32(7, priv->r->spcl_trap_eapol_ctrl); + + rtl838x_dbgfs_init(priv); + } else { + rtl930x_dbgfs_init(priv); + } + + return 0; + +err_register_fib_nb: + unregister_netevent_notifier(&priv->ne_nb); +err_register_ne_nb: + unregister_netdevice_notifier(&priv->nb); +err_register_nb: + return err; +} + +static int rtl83xx_sw_remove(struct platform_device *pdev) +{ + // TODO: + pr_debug("Removing platform driver for rtl83xx-sw\n"); + return 0; +} + +static const struct of_device_id rtl83xx_switch_of_ids[] = { + { .compatible = "realtek,rtl83xx-switch"}, + { /* sentinel */ } +}; + + +MODULE_DEVICE_TABLE(of, rtl83xx_switch_of_ids); + +static struct platform_driver rtl83xx_switch_driver = { + .probe = rtl83xx_sw_probe, + .remove = rtl83xx_sw_remove, + .driver = { + .name = "rtl83xx-switch", + .pm = NULL, + .of_match_table = rtl83xx_switch_of_ids, + }, +}; + +module_platform_driver(rtl83xx_switch_driver); + +MODULE_AUTHOR("B. Koblitz"); +MODULE_DESCRIPTION("RTL83XX SoC Switch Driver"); +MODULE_LICENSE("GPL"); diff --git a/target/linux/realtek/files-5.15/drivers/net/dsa/rtl83xx/debugfs.c b/target/linux/realtek/files-5.15/drivers/net/dsa/rtl83xx/debugfs.c new file mode 100644 index 0000000000..9a7c7714c6 --- /dev/null +++ b/target/linux/realtek/files-5.15/drivers/net/dsa/rtl83xx/debugfs.c @@ -0,0 +1,727 @@ +// SPDX-License-Identifier: GPL-2.0-only + +#include +#include + +#include +#include "rtl83xx.h" + +#define RTL838X_DRIVER_NAME "rtl838x" + +#define RTL8380_LED_GLB_CTRL (0xA000) +#define RTL8380_LED_MODE_SEL (0x1004) +#define RTL8380_LED_MODE_CTRL (0xA004) +#define RTL8380_LED_P_EN_CTRL (0xA008) +#define RTL8380_LED_SW_CTRL (0xA00C) +#define RTL8380_LED0_SW_P_EN_CTRL (0xA010) +#define RTL8380_LED1_SW_P_EN_CTRL (0xA014) +#define RTL8380_LED2_SW_P_EN_CTRL (0xA018) +#define RTL8380_LED_SW_P_CTRL(p) (0xA01C + (((p) << 2))) + +#define RTL8390_LED_GLB_CTRL (0x00E4) +#define RTL8390_LED_SET_2_3_CTRL (0x00E8) +#define RTL8390_LED_SET_0_1_CTRL (0x00EC) +#define RTL8390_LED_COPR_SET_SEL_CTRL(p) (0x00F0 + (((p >> 4) << 2))) +#define RTL8390_LED_FIB_SET_SEL_CTRL(p) (0x0100 + (((p >> 4) << 2))) +#define RTL8390_LED_COPR_PMASK_CTRL(p) (0x0110 + (((p >> 5) << 2))) +#define RTL8390_LED_FIB_PMASK_CTRL(p) (0x00118 + (((p >> 5) << 2))) +#define RTL8390_LED_COMBO_CTRL(p) (0x0120 + (((p >> 5) << 2))) +#define RTL8390_LED_SW_CTRL (0x0128) +#define RTL8390_LED_SW_P_EN_CTRL(p) (0x012C + (((p / 10) << 2))) +#define RTL8390_LED_SW_P_CTRL(p) (0x0144 + (((p) << 2))) + +#define RTL838X_MIR_QID_CTRL(grp) (0xAD44 + (((grp) << 2))) +#define RTL838X_MIR_RSPAN_VLAN_CTRL(grp) (0xA340 + (((grp) << 2))) +#define RTL838X_MIR_RSPAN_VLAN_CTRL_MAC(grp) (0xAA70 + (((grp) << 2))) +#define RTL838X_MIR_RSPAN_TX_CTRL (0xA350) +#define RTL838X_MIR_RSPAN_TX_TAG_RM_CTRL (0xAA80) +#define RTL838X_MIR_RSPAN_TX_TAG_EN_CTRL (0xAA84) +#define RTL839X_MIR_RSPAN_VLAN_CTRL(grp) (0xA340 + (((grp) << 2))) +#define RTL839X_MIR_RSPAN_TX_CTRL (0x69b0) +#define RTL839X_MIR_RSPAN_TX_TAG_RM_CTRL (0x2550) +#define RTL839X_MIR_RSPAN_TX_TAG_EN_CTRL (0x2554) +#define RTL839X_MIR_SAMPLE_RATE_CTRL (0x2558) + +#define RTL838X_STAT_PRVTE_DROP_COUNTERS (0x6A00) +#define RTL839X_STAT_PRVTE_DROP_COUNTERS (0x3E00) +#define RTL930X_STAT_PRVTE_DROP_COUNTERS (0xB5B8) +#define RTL931X_STAT_PRVTE_DROP_COUNTERS (0xd800) + +int rtl83xx_port_get_stp_state(struct rtl838x_switch_priv *priv, int port); +void rtl83xx_port_stp_state_set(struct dsa_switch *ds, int port, u8 state); +void rtl83xx_fast_age(struct dsa_switch *ds, int port); +u32 rtl838x_get_egress_rate(struct rtl838x_switch_priv *priv, int port); +u32 rtl839x_get_egress_rate(struct rtl838x_switch_priv *priv, int port); +int rtl838x_set_egress_rate(struct rtl838x_switch_priv *priv, int port, u32 rate); +int rtl839x_set_egress_rate(struct rtl838x_switch_priv *priv, int port, u32 rate); + + +const char *rtl838x_drop_cntr[] = { + "ALE_TX_GOOD_PKTS", "MAC_RX_DROP", "ACL_FWD_DROP", "HW_ATTACK_PREVENTION_DROP", + "RMA_DROP", "VLAN_IGR_FLTR_DROP", "INNER_OUTER_CFI_EQUAL_1_DROP", "PORT_MOVE_DROP", + "NEW_SA_DROP", "MAC_LIMIT_SYS_DROP", "MAC_LIMIT_VLAN_DROP", "MAC_LIMIT_PORT_DROP", + "SWITCH_MAC_DROP", "ROUTING_EXCEPTION_DROP", "DA_LKMISS_DROP", "RSPAN_DROP", + "ACL_LKMISS_DROP", "ACL_DROP", "INBW_DROP", "IGR_METER_DROP", + "ACCEPT_FRAME_TYPE_DROP", "STP_IGR_DROP", "INVALID_SA_DROP", "SA_BLOCKING_DROP", + "DA_BLOCKING_DROP", "L2_INVALID_DPM_DROP", "MCST_INVALID_DPM_DROP", "RX_FLOW_CONTROL_DROP", + "STORM_SPPRS_DROP", "LALS_DROP", "VLAN_EGR_FILTER_DROP", "STP_EGR_DROP", + "SRC_PORT_FILTER_DROP", "PORT_ISOLATION_DROP", "ACL_FLTR_DROP", "MIRROR_FLTR_DROP", + "TX_MAX_DROP", "LINK_DOWN_DROP", "FLOW_CONTROL_DROP", "BRIDGE .1d discards" +}; + +const char *rtl839x_drop_cntr[] = { + "ALE_TX_GOOD_PKTS", "ERROR_PKTS", "EGR_ACL_DROP", "EGR_METER_DROP", + "OAM", "CFM" "VLAN_IGR_FLTR", "VLAN_ERR", + "INNER_OUTER_CFI_EQUAL_1", "VLAN_TAG_FORMAT", "SRC_PORT_SPENDING_TREE", "INBW", + "RMA", "HW_ATTACK_PREVENTION", "PROTO_STORM", "MCAST_SA", + "IGR_ACL_DROP", "IGR_METER_DROP", "DFLT_ACTION_FOR_MISS_ACL_AND_C2SC", "NEW_SA", + "PORT_MOVE", "SA_BLOCKING", "ROUTING_EXCEPTION", "SRC_PORT_SPENDING_TREE_NON_FWDING", + "MAC_LIMIT", "UNKNOW_STORM", "MISS_DROP", "CPU_MAC_DROP", + "DA_BLOCKING", "SRC_PORT_FILTER_BEFORE_EGR_ACL", "VLAN_EGR_FILTER", "SPANNING_TRE", + "PORT_ISOLATION", "OAM_EGRESS_DROP", "MIRROR_ISOLATION", "MAX_LEN_BEFORE_EGR_ACL", + "SRC_PORT_FILTER_BEFORE_MIRROR", "MAX_LEN_BEFORE_MIRROR", "SPECIAL_CONGEST_BEFORE_MIRROR", + "LINK_STATUS_BEFORE_MIRROR", + "WRED_BEFORE_MIRROR", "MAX_LEN_AFTER_MIRROR", "SPECIAL_CONGEST_AFTER_MIRROR", + "LINK_STATUS_AFTER_MIRROR", + "WRED_AFTER_MIRROR" +}; + +const char *rtl930x_drop_cntr[] = { + "OAM_PARSER", "UC_RPF", "DEI_CFI", "MAC_IP_SUBNET_BASED_VLAN", "VLAN_IGR_FILTER", + "L2_UC_MC", "IPV_IP6_MC_BRIDGE", "PTP", "USER_DEF_0_3", "RESERVED", + "RESERVED1", "RESERVED2", "BPDU_RMA", "LACP", "LLDP", + "EAPOL", "XX_RMA", "L3_IPUC_NON_IP", "IP4_IP6_HEADER_ERROR", "L3_BAD_IP", + "L3_DIP_DMAC_MISMATCH", "IP4_IP_OPTION", "IP_UC_MC_ROUTING_LOOK_UP_MISS", "L3_DST_NULL_INTF", + "L3_PBR_NULL_INTF", + "HOST_NULL_INTF", "ROUTE_NULL_INTF", "BRIDGING_ACTION", "ROUTING_ACTION", "IPMC_RPF", + "L2_NEXTHOP_AGE_OUT", "L3_UC_TTL_FAIL", "L3_MC_TTL_FAIL", "L3_UC_MTU_FAIL", "L3_MC_MTU_FAIL", + "L3_UC_ICMP_REDIR", "IP6_MLD_OTHER_ACT", "ND", "IP_MC_RESERVED", "IP6_HBH", + "INVALID_SA", "L2_HASH_FULL", "NEW_SA", "PORT_MOVE_FORBID", "STATIC_PORT_MOVING", + "DYNMIC_PORT_MOVING", "L3_CRC", "MAC_LIMIT", "ATTACK_PREVENT", "ACL_FWD_ACTION", + "OAMPDU", "OAM_MUX", "TRUNK_FILTER", "ACL_DROP", "IGR_BW", + "ACL_METER", "VLAN_ACCEPT_FRAME_TYPE", "MSTP_SRC_DROP_DISABLED_BLOCKING", "SA_BLOCK", "DA_BLOCK", + "STORM_CONTROL", "VLAN_EGR_FILTER", "MSTP_DESTINATION_DROP", "SRC_PORT_FILTER", "PORT_ISOLATION", + "TX_MAX_FRAME_SIZE", "EGR_LINK_STATUS", "MAC_TX_DISABLE", "MAC_PAUSE_FRAME", "MAC_RX_DROP", + "MIRROR_ISOLATE", "RX_FC", "EGR_QUEUE", "HSM_RUNOUT", "ROUTING_DISABLE", "INVALID_L2_NEXTHOP_ENTRY", + "L3_MC_SRC_FLT", "CPUTAG_FLT", "FWD_PMSK_NULL", "IPUC_ROUTING_LOOKUP_MISS", "MY_DEV_DROP", + "STACK_NONUC_BLOCKING_PMSK", "STACK_PORT_NOT_FOUND", "ACL_LOOPBACK_DROP", "IP6_ROUTING_EXT_HEADER" +}; + +const char *rtl931x_drop_cntr[] = { + "ALE_RX_GOOD_PKTS", "RX_MAX_FRAME_SIZE", "MAC_RX_DROP", "OPENFLOW_IP_MPLS_TTL", "OPENFLOW_TBL_MISS", + "IGR_BW", "SPECIAL_CONGEST", "EGR_QUEUE", "RESERVED", "EGR_LINK_STATUS", "STACK_UCAST_NONUCAST_TTL", // 10 + "STACK_NONUC_BLOCKING_PMSK", "L2_CRC", "SRC_PORT_FILTER", "PARSER_PACKET_TOO_LONG", "PARSER_MALFORM_PACKET", + "MPLS_OVER_2_LBL", "EACL_METER", "IACL_METER", "PROTO_STORM", "INVALID_CAPWAP_HEADER", // 20 + "MAC_IP_SUBNET_BASED_VLAN", "OAM_PARSER", "UC_MC_RPF", "IP_MAC_BINDING_MATCH_MISMATCH", "SA_BLOCK", + "TUNNEL_IP_ADDRESS_CHECK", "EACL_DROP", "IACL_DROP", "ATTACK_PREVENT", "SYSTEM_PORT_LIMIT_LEARN", // 30, + "OAMPDU", "CCM_RX", "CFM_UNKNOWN_TYPE", "LBM_LBR_LTM_LTR", "Y_1731", "VLAN_LIMIT_LEARN", + "VLAN_ACCEPT_FRAME_TYPE", "CFI_1", "STATIC_DYNAMIC_PORT_MOVING", "PORT_MOVE_FORBID", // 40 + "L3_CRC", "BPDU_PTP_LLDP_EAPOL_RMA", "MSTP_SRC_DROP_DISABLED_BLOCKING", "INVALID_SA", "NEW_SA", + "VLAN_IGR_FILTER", "IGR_VLAN_CONVERT", "GRATUITOUS_ARP", "MSTP_SRC_DROP", "L2_HASH_FULL", // 50 + "MPLS_UNKNOWN_LBL", "L3_IPUC_NON_IP", "TTL", "MTU", "ICMP_REDIRECT", "STORM_CONTROL", "L3_DIP_DMAC_MISMATCH", + "IP4_IP_OPTION", "IP6_HBH_EXT_HEADER", "IP4_IP6_HEADER_ERROR", // 60 + "ROUTING_IP_ADDR_CHECK", "ROUTING_EXCEPTION", "DA_BLOCK", "OAM_MUX", "PORT_ISOLATION", "VLAN_EGR_FILTER", + "MIRROR_ISOLATE", "MSTP_DESTINATION_DROP", "L2_MC_BRIDGE", "IP_UC_MC_ROUTING_LOOK_UP_MISS", // 70 + "L2_UC", "L2_MC", "IP4_MC", "IP6_MC", "L3_UC_MC_ROUTE", "UNKNOWN_L2_UC_FLPM", "BC_FLPM", + "VLAN_PRO_UNKNOWN_L2_MC_FLPM", "VLAN_PRO_UNKNOWN_IP4_MC_FLPM", "VLAN_PROFILE_UNKNOWN_IP6_MC_FLPM" // 80, +}; + +static ssize_t rtl838x_common_read(char __user *buffer, size_t count, + loff_t *ppos, unsigned int value) +{ + char *buf; + ssize_t len; + + if (*ppos != 0) + return 0; + + buf = kasprintf(GFP_KERNEL, "0x%08x\n", value); + if (!buf) + return -ENOMEM; + + if (count < strlen(buf)) { + kfree(buf); + return -ENOSPC; + } + + len = simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf)); + kfree(buf); + + return len; +} + +static ssize_t rtl838x_common_write(const char __user *buffer, size_t count, + loff_t *ppos, unsigned int *value) +{ + char b[32]; + ssize_t len; + int ret; + + if (*ppos != 0) + return -EINVAL; + + if (count >= sizeof(b)) + return -ENOSPC; + + len = simple_write_to_buffer(b, sizeof(b) - 1, ppos, + buffer, count); + if (len < 0) + return len; + + b[len] = '\0'; + ret = kstrtouint(b, 16, value); + if (ret) + return -EIO; + + return len; +} + +static ssize_t stp_state_read(struct file *filp, char __user *buffer, size_t count, + loff_t *ppos) +{ + struct rtl838x_port *p = filp->private_data; + struct dsa_switch *ds = p->dp->ds; + int value = rtl83xx_port_get_stp_state(ds->priv, p->dp->index); + + if (value < 0) + return -EINVAL; + + return rtl838x_common_read(buffer, count, ppos, (u32)value); +} + +static ssize_t stp_state_write(struct file *filp, const char __user *buffer, + size_t count, loff_t *ppos) +{ + struct rtl838x_port *p = filp->private_data; + u32 value; + size_t res = rtl838x_common_write(buffer, count, ppos, &value); + if (res < 0) + return res; + + rtl83xx_port_stp_state_set(p->dp->ds, p->dp->index, (u8)value); + + return res; +} + +static const struct file_operations stp_state_fops = { + .owner = THIS_MODULE, + .open = simple_open, + .read = stp_state_read, + .write = stp_state_write, +}; + +static ssize_t drop_counter_read(struct file *filp, char __user *buffer, size_t count, + loff_t *ppos) +{ + struct rtl838x_switch_priv *priv = filp->private_data; + int i; + const char **d; + u32 v; + char *buf; + int n = 0, len, offset; + int num; + + switch (priv->family_id) { + case RTL8380_FAMILY_ID: + d = rtl838x_drop_cntr; + offset = RTL838X_STAT_PRVTE_DROP_COUNTERS; + num = 40; + break; + case RTL8390_FAMILY_ID: + d = rtl839x_drop_cntr; + offset = RTL839X_STAT_PRVTE_DROP_COUNTERS; + num = 45; + break; + case RTL9300_FAMILY_ID: + d = rtl930x_drop_cntr; + offset = RTL930X_STAT_PRVTE_DROP_COUNTERS; + num = 85; + break; + case RTL9310_FAMILY_ID: + d = rtl931x_drop_cntr; + offset = RTL931X_STAT_PRVTE_DROP_COUNTERS; + num = 81; + break; + } + + buf = kmalloc(30 * num, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + for (i = 0; i < num; i++) { + v = sw_r32(offset + (i << 2)) & 0xffff; + n += sprintf(buf + n, "%s: %d\n", d[i], v); + } + + if (count < strlen(buf)) { + kfree(buf); + return -ENOSPC; + } + + len = simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf)); + kfree(buf); + + return len; +} + +static const struct file_operations drop_counter_fops = { + .owner = THIS_MODULE, + .open = simple_open, + .read = drop_counter_read, +}; + +static void l2_table_print_entry(struct seq_file *m, struct rtl838x_switch_priv *priv, + struct rtl838x_l2_entry *e) +{ + u64 portmask; + int i; + + if (e->type == L2_UNICAST) { + seq_puts(m, "L2_UNICAST\n"); + + seq_printf(m, " mac %02x:%02x:%02x:%02x:%02x:%02x vid %u rvid %u\n", + e->mac[0], e->mac[1], e->mac[2], e->mac[3], e->mac[4], e->mac[5], + e->vid, e->rvid); + + seq_printf(m, " port %d age %d", e->port, e->age); + if (e->is_static) + seq_puts(m, " static"); + if (e->block_da) + seq_puts(m, " block_da"); + if (e->block_sa) + seq_puts(m, " block_sa"); + if (e->suspended) + seq_puts(m, " suspended"); + if (e->next_hop) + seq_printf(m, " next_hop route_id %u", e->nh_route_id); + seq_puts(m, "\n"); + + } else { + if (e->type == L2_MULTICAST) { + seq_puts(m, "L2_MULTICAST\n"); + + seq_printf(m, " mac %02x:%02x:%02x:%02x:%02x:%02x vid %u rvid %u\n", + e->mac[0], e->mac[1], e->mac[2], e->mac[3], e->mac[4], e->mac[5], + e->vid, e->rvid); + } + + if (e->type == IP4_MULTICAST || e->type == IP6_MULTICAST) { + seq_puts(m, (e->type == IP4_MULTICAST) ? + "IP4_MULTICAST\n" : "IP6_MULTICAST\n"); + + seq_printf(m, " gip %08x sip %08x vid %u rvid %u\n", + e->mc_gip, e->mc_sip, e->vid, e->rvid); + } + + portmask = priv->r->read_mcast_pmask(e->mc_portmask_index); + seq_printf(m, " index %u ports", e->mc_portmask_index); + for (i = 0; i < 64; i++) { + if (portmask & BIT_ULL(i)) + seq_printf(m, " %d", i); + } + seq_puts(m, "\n"); + } + + seq_puts(m, "\n"); +} + +static int l2_table_show(struct seq_file *m, void *v) +{ + struct rtl838x_switch_priv *priv = m->private; + struct rtl838x_l2_entry e; + int i, bucket, index; + + mutex_lock(&priv->reg_mutex); + + for (i = 0; i < priv->fib_entries; i++) { + bucket = i >> 2; + index = i & 0x3; + priv->r->read_l2_entry_using_hash(bucket, index, &e); + + if (!e.valid) + continue; + + seq_printf(m, "Hash table bucket %d index %d ", bucket, index); + l2_table_print_entry(m, priv, &e); + } + + for (i = 0; i < 64; i++) { + priv->r->read_cam(i, &e); + + if (!e.valid) + continue; + + seq_printf(m, "CAM index %d ", i); + l2_table_print_entry(m, priv, &e); + } + + mutex_unlock(&priv->reg_mutex); + + return 0; +} + +static int l2_table_open(struct inode *inode, struct file *filp) +{ + return single_open(filp, l2_table_show, inode->i_private); +} + +static const struct file_operations l2_table_fops = { + .owner = THIS_MODULE, + .open = l2_table_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static ssize_t age_out_read(struct file *filp, char __user *buffer, size_t count, + loff_t *ppos) +{ + struct rtl838x_port *p = filp->private_data; + struct dsa_switch *ds = p->dp->ds; + struct rtl838x_switch_priv *priv = ds->priv; + int value = sw_r32(priv->r->l2_port_aging_out); + + if (value < 0) + return -EINVAL; + + return rtl838x_common_read(buffer, count, ppos, (u32)value); +} + +static ssize_t age_out_write(struct file *filp, const char __user *buffer, + size_t count, loff_t *ppos) +{ + struct rtl838x_port *p = filp->private_data; + u32 value; + size_t res = rtl838x_common_write(buffer, count, ppos, &value); + if (res < 0) + return res; + + rtl83xx_fast_age(p->dp->ds, p->dp->index); + + return res; +} + +static const struct file_operations age_out_fops = { + .owner = THIS_MODULE, + .open = simple_open, + .read = age_out_read, + .write = age_out_write, +}; + +static ssize_t port_egress_rate_read(struct file *filp, char __user *buffer, size_t count, + loff_t *ppos) +{ + struct rtl838x_port *p = filp->private_data; + struct dsa_switch *ds = p->dp->ds; + struct rtl838x_switch_priv *priv = ds->priv; + int value; + if (priv->family_id == RTL8380_FAMILY_ID) + value = rtl838x_get_egress_rate(priv, p->dp->index); + else + value = rtl839x_get_egress_rate(priv, p->dp->index); + + if (value < 0) + return -EINVAL; + + return rtl838x_common_read(buffer, count, ppos, (u32)value); +} + +static ssize_t port_egress_rate_write(struct file *filp, const char __user *buffer, + size_t count, loff_t *ppos) +{ + struct rtl838x_port *p = filp->private_data; + struct dsa_switch *ds = p->dp->ds; + struct rtl838x_switch_priv *priv = ds->priv; + u32 value; + size_t res = rtl838x_common_write(buffer, count, ppos, &value); + if (res < 0) + return res; + + if (priv->family_id == RTL8380_FAMILY_ID) + rtl838x_set_egress_rate(priv, p->dp->index, value); + else + rtl839x_set_egress_rate(priv, p->dp->index, value); + + return res; +} + +static const struct file_operations port_egress_fops = { + .owner = THIS_MODULE, + .open = simple_open, + .read = port_egress_rate_read, + .write = port_egress_rate_write, +}; + + +static const struct debugfs_reg32 port_ctrl_regs[] = { + { .name = "port_isolation", .offset = RTL838X_PORT_ISO_CTRL(0), }, + { .name = "mac_force_mode", .offset = RTL838X_MAC_FORCE_MODE_CTRL, }, +}; + +void rtl838x_dbgfs_cleanup(struct rtl838x_switch_priv *priv) +{ + debugfs_remove_recursive(priv->dbgfs_dir); + +// kfree(priv->dbgfs_entries); +} + +static int rtl838x_dbgfs_port_init(struct dentry *parent, struct rtl838x_switch_priv *priv, + int port) +{ + struct dentry *port_dir; + struct debugfs_regset32 *port_ctrl_regset; + + port_dir = debugfs_create_dir(priv->ports[port].dp->name, parent); + + if (priv->family_id == RTL8380_FAMILY_ID) { + debugfs_create_x32("storm_rate_uc", 0644, port_dir, + (u32 *)(RTL838X_SW_BASE + RTL838X_STORM_CTRL_PORT_UC(port))); + + debugfs_create_x32("storm_rate_mc", 0644, port_dir, + (u32 *)(RTL838X_SW_BASE + RTL838X_STORM_CTRL_PORT_MC(port))); + + debugfs_create_x32("storm_rate_bc", 0644, port_dir, + (u32 *)(RTL838X_SW_BASE + RTL838X_STORM_CTRL_PORT_BC(port))); + } else { + debugfs_create_x32("storm_rate_uc", 0644, port_dir, + (u32 *)(RTL838X_SW_BASE + RTL839X_STORM_CTRL_PORT_UC_0(port))); + + debugfs_create_x32("storm_rate_mc", 0644, port_dir, + (u32 *)(RTL838X_SW_BASE + RTL839X_STORM_CTRL_PORT_MC_0(port))); + + debugfs_create_x32("storm_rate_bc", 0644, port_dir, + (u32 *)(RTL838X_SW_BASE + RTL839X_STORM_CTRL_PORT_BC_0(port))); + } + + debugfs_create_u32("id", 0444, port_dir, (u32 *)&priv->ports[port].dp->index); + + port_ctrl_regset = devm_kzalloc(priv->dev, sizeof(*port_ctrl_regset), GFP_KERNEL); + if (!port_ctrl_regset) + return -ENOMEM; + + port_ctrl_regset->regs = port_ctrl_regs; + port_ctrl_regset->nregs = ARRAY_SIZE(port_ctrl_regs); + port_ctrl_regset->base = (void *)(RTL838X_SW_BASE + (port << 2)); + debugfs_create_regset32("port_ctrl", 0400, port_dir, port_ctrl_regset); + + debugfs_create_file("stp_state", 0600, port_dir, &priv->ports[port], &stp_state_fops); + debugfs_create_file("age_out", 0600, port_dir, &priv->ports[port], &age_out_fops); + debugfs_create_file("port_egress_rate", 0600, port_dir, &priv->ports[port], + &port_egress_fops); + return 0; +} + +static int rtl838x_dbgfs_leds(struct dentry *parent, struct rtl838x_switch_priv *priv) +{ + struct dentry *led_dir; + int p; + char led_sw_p_ctrl_name[20]; + char port_led_name[20]; + + led_dir = debugfs_create_dir("led", parent); + + if (priv->family_id == RTL8380_FAMILY_ID) { + debugfs_create_x32("led_glb_ctrl", 0644, led_dir, + (u32 *)(RTL838X_SW_BASE + RTL8380_LED_GLB_CTRL)); + debugfs_create_x32("led_mode_sel", 0644, led_dir, + (u32 *)(RTL838X_SW_BASE + RTL8380_LED_MODE_SEL)); + debugfs_create_x32("led_mode_ctrl", 0644, led_dir, + (u32 *)(RTL838X_SW_BASE + RTL8380_LED_MODE_CTRL)); + debugfs_create_x32("led_p_en_ctrl", 0644, led_dir, + (u32 *)(RTL838X_SW_BASE + RTL8380_LED_P_EN_CTRL)); + debugfs_create_x32("led_sw_ctrl", 0644, led_dir, + (u32 *)(RTL838X_SW_BASE + RTL8380_LED_SW_CTRL)); + debugfs_create_x32("led0_sw_p_en_ctrl", 0644, led_dir, + (u32 *)(RTL838X_SW_BASE + RTL8380_LED0_SW_P_EN_CTRL)); + debugfs_create_x32("led1_sw_p_en_ctrl", 0644, led_dir, + (u32 *)(RTL838X_SW_BASE + RTL8380_LED1_SW_P_EN_CTRL)); + debugfs_create_x32("led2_sw_p_en_ctrl", 0644, led_dir, + (u32 *)(RTL838X_SW_BASE + RTL8380_LED2_SW_P_EN_CTRL)); + for (p = 0; p < 28; p++) { + snprintf(led_sw_p_ctrl_name, sizeof(led_sw_p_ctrl_name), + "led_sw_p_ctrl.%02d", p); + debugfs_create_x32(led_sw_p_ctrl_name, 0644, led_dir, + (u32 *)(RTL838X_SW_BASE + RTL8380_LED_SW_P_CTRL(p))); + } + } else if (priv->family_id == RTL8390_FAMILY_ID) { + debugfs_create_x32("led_glb_ctrl", 0644, led_dir, + (u32 *)(RTL838X_SW_BASE + RTL8390_LED_GLB_CTRL)); + debugfs_create_x32("led_set_2_3", 0644, led_dir, + (u32 *)(RTL838X_SW_BASE + RTL8390_LED_SET_2_3_CTRL)); + debugfs_create_x32("led_set_0_1", 0644, led_dir, + (u32 *)(RTL838X_SW_BASE + RTL8390_LED_SET_0_1_CTRL)); + for (p = 0; p < 4; p++) { + snprintf(port_led_name, sizeof(port_led_name), "led_copr_set_sel.%1d", p); + debugfs_create_x32(port_led_name, 0644, led_dir, + (u32 *)(RTL838X_SW_BASE + RTL8390_LED_COPR_SET_SEL_CTRL(p << 4))); + snprintf(port_led_name, sizeof(port_led_name), "led_fib_set_sel.%1d", p); + debugfs_create_x32(port_led_name, 0644, led_dir, + (u32 *)(RTL838X_SW_BASE + RTL8390_LED_FIB_SET_SEL_CTRL(p << 4))); + } + debugfs_create_x32("led_copr_pmask_ctrl_0", 0644, led_dir, + (u32 *)(RTL838X_SW_BASE + RTL8390_LED_COPR_PMASK_CTRL(0))); + debugfs_create_x32("led_copr_pmask_ctrl_1", 0644, led_dir, + (u32 *)(RTL838X_SW_BASE + RTL8390_LED_COPR_PMASK_CTRL(32))); + debugfs_create_x32("led_fib_pmask_ctrl_0", 0644, led_dir, + (u32 *)(RTL838X_SW_BASE + RTL8390_LED_FIB_PMASK_CTRL(0))); + debugfs_create_x32("led_fib_pmask_ctrl_1", 0644, led_dir, + (u32 *)(RTL838X_SW_BASE + RTL8390_LED_FIB_PMASK_CTRL(32))); + debugfs_create_x32("led_combo_ctrl_0", 0644, led_dir, + (u32 *)(RTL838X_SW_BASE + RTL8390_LED_COMBO_CTRL(0))); + debugfs_create_x32("led_combo_ctrl_1", 0644, led_dir, + (u32 *)(RTL838X_SW_BASE + RTL8390_LED_COMBO_CTRL(32))); + debugfs_create_x32("led_sw_ctrl", 0644, led_dir, + (u32 *)(RTL838X_SW_BASE + RTL8390_LED_SW_CTRL)); + for (p = 0; p < 5; p++) { + snprintf(port_led_name, sizeof(port_led_name), "led_sw_p_en_ctrl.%1d", p); + debugfs_create_x32(port_led_name, 0644, led_dir, + (u32 *)(RTL838X_SW_BASE + RTL8390_LED_SW_P_EN_CTRL(p * 10))); + } + for (p = 0; p < 28; p++) { + snprintf(port_led_name, sizeof(port_led_name), "led_sw_p_ctrl.%02d", p); + debugfs_create_x32(port_led_name, 0644, led_dir, + (u32 *)(RTL838X_SW_BASE + RTL8390_LED_SW_P_CTRL(p))); + } + } + return 0; +} + +void rtl838x_dbgfs_init(struct rtl838x_switch_priv *priv) +{ + struct dentry *rtl838x_dir; + struct dentry *port_dir; + struct dentry *mirror_dir; + struct debugfs_regset32 *port_ctrl_regset; + int ret, i; + char lag_name[10]; + char mirror_name[10]; + + pr_info("%s called\n", __func__); + rtl838x_dir = debugfs_lookup(RTL838X_DRIVER_NAME, NULL); + if (!rtl838x_dir) + rtl838x_dir = debugfs_create_dir(RTL838X_DRIVER_NAME, NULL); + + priv->dbgfs_dir = rtl838x_dir; + + debugfs_create_u32("soc", 0444, rtl838x_dir, + (u32 *)(RTL838X_SW_BASE + RTL838X_MODEL_NAME_INFO)); + + /* Create one directory per port */ + for (i = 0; i < priv->cpu_port; i++) { + if (priv->ports[i].phy) { + ret = rtl838x_dbgfs_port_init(rtl838x_dir, priv, i); + if (ret) + goto err; + } + } + + /* Create directory for CPU-port */ + port_dir = debugfs_create_dir("cpu_port", rtl838x_dir); + port_ctrl_regset = devm_kzalloc(priv->dev, sizeof(*port_ctrl_regset), GFP_KERNEL); + if (!port_ctrl_regset) { + ret = -ENOMEM; + goto err; + } + + port_ctrl_regset->regs = port_ctrl_regs; + port_ctrl_regset->nregs = ARRAY_SIZE(port_ctrl_regs); + port_ctrl_regset->base = (void *)(RTL838X_SW_BASE + (priv->cpu_port << 2)); + debugfs_create_regset32("port_ctrl", 0400, port_dir, port_ctrl_regset); + debugfs_create_u8("id", 0444, port_dir, &priv->cpu_port); + + /* Create entries for LAGs */ + for (i = 0; i < priv->n_lags; i++) { + snprintf(lag_name, sizeof(lag_name), "lag.%02d", i); + if (priv->family_id == RTL8380_FAMILY_ID) + debugfs_create_x32(lag_name, 0644, rtl838x_dir, + (u32 *)(RTL838X_SW_BASE + priv->r->trk_mbr_ctr(i))); + else + debugfs_create_x64(lag_name, 0644, rtl838x_dir, + (u64 *)(RTL838X_SW_BASE + priv->r->trk_mbr_ctr(i))); + } + + /* Create directories for mirror groups */ + for (i = 0; i < 4; i++) { + snprintf(mirror_name, sizeof(mirror_name), "mirror.%1d", i); + mirror_dir = debugfs_create_dir(mirror_name, rtl838x_dir); + if (priv->family_id == RTL8380_FAMILY_ID) { + debugfs_create_x32("ctrl", 0644, mirror_dir, + (u32 *)(RTL838X_SW_BASE + RTL838X_MIR_CTRL + i * 4)); + debugfs_create_x32("ingress_pm", 0644, mirror_dir, + (u32 *)(RTL838X_SW_BASE + priv->r->mir_spm + i * 4)); + debugfs_create_x32("egress_pm", 0644, mirror_dir, + (u32 *)(RTL838X_SW_BASE + priv->r->mir_dpm + i * 4)); + debugfs_create_x32("qid", 0644, mirror_dir, + (u32 *)(RTL838X_SW_BASE + RTL838X_MIR_QID_CTRL(i))); + debugfs_create_x32("rspan_vlan", 0644, mirror_dir, + (u32 *)(RTL838X_SW_BASE + RTL838X_MIR_RSPAN_VLAN_CTRL(i))); + debugfs_create_x32("rspan_vlan_mac", 0644, mirror_dir, + (u32 *)(RTL838X_SW_BASE + RTL838X_MIR_RSPAN_VLAN_CTRL_MAC(i))); + debugfs_create_x32("rspan_tx", 0644, mirror_dir, + (u32 *)(RTL838X_SW_BASE + RTL838X_MIR_RSPAN_TX_CTRL)); + debugfs_create_x32("rspan_tx_tag_rm", 0644, mirror_dir, + (u32 *)(RTL838X_SW_BASE + RTL838X_MIR_RSPAN_TX_TAG_RM_CTRL)); + debugfs_create_x32("rspan_tx_tag_en", 0644, mirror_dir, + (u32 *)(RTL838X_SW_BASE + RTL838X_MIR_RSPAN_TX_TAG_EN_CTRL)); + } else { + debugfs_create_x32("ctrl", 0644, mirror_dir, + (u32 *)(RTL838X_SW_BASE + RTL839X_MIR_CTRL + i * 4)); + debugfs_create_x64("ingress_pm", 0644, mirror_dir, + (u64 *)(RTL838X_SW_BASE + priv->r->mir_spm + i * 8)); + debugfs_create_x64("egress_pm", 0644, mirror_dir, + (u64 *)(RTL838X_SW_BASE + priv->r->mir_dpm + i * 8)); + debugfs_create_x32("rspan_vlan", 0644, mirror_dir, + (u32 *)(RTL838X_SW_BASE + RTL839X_MIR_RSPAN_VLAN_CTRL(i))); + debugfs_create_x32("rspan_tx", 0644, mirror_dir, + (u32 *)(RTL838X_SW_BASE + RTL839X_MIR_RSPAN_TX_CTRL)); + debugfs_create_x32("rspan_tx_tag_rm", 0644, mirror_dir, + (u32 *)(RTL838X_SW_BASE + RTL839X_MIR_RSPAN_TX_TAG_RM_CTRL)); + debugfs_create_x32("rspan_tx_tag_en", 0644, mirror_dir, + (u32 *)(RTL838X_SW_BASE + RTL839X_MIR_RSPAN_TX_TAG_EN_CTRL)); + debugfs_create_x64("sample_rate", 0644, mirror_dir, + (u64 *)(RTL838X_SW_BASE + RTL839X_MIR_SAMPLE_RATE_CTRL)); + } + } + + if (priv->family_id == RTL8380_FAMILY_ID) + debugfs_create_x32("bpdu_flood_mask", 0644, rtl838x_dir, + (u32 *)(RTL838X_SW_BASE + priv->r->rma_bpdu_fld_pmask)); + else + debugfs_create_x64("bpdu_flood_mask", 0644, rtl838x_dir, + (u64 *)(RTL838X_SW_BASE + priv->r->rma_bpdu_fld_pmask)); + + if (priv->family_id == RTL8380_FAMILY_ID) + debugfs_create_x32("vlan_ctrl", 0644, rtl838x_dir, + (u32 *)(RTL838X_SW_BASE + RTL838X_VLAN_CTRL)); + else + debugfs_create_x32("vlan_ctrl", 0644, rtl838x_dir, + (u32 *)(RTL838X_SW_BASE + RTL839X_VLAN_CTRL)); + + ret = rtl838x_dbgfs_leds(rtl838x_dir, priv); + if (ret) + goto err; + + debugfs_create_file("drop_counters", 0400, rtl838x_dir, priv, &drop_counter_fops); + + debugfs_create_file("l2_table", 0400, rtl838x_dir, priv, &l2_table_fops); + + return; +err: + rtl838x_dbgfs_cleanup(priv); +} + +void rtl930x_dbgfs_init(struct rtl838x_switch_priv *priv) +{ + struct dentry *dbg_dir; + + pr_info("%s called\n", __func__); + dbg_dir = debugfs_lookup(RTL838X_DRIVER_NAME, NULL); + if (!dbg_dir) + dbg_dir = debugfs_create_dir(RTL838X_DRIVER_NAME, NULL); + + priv->dbgfs_dir = dbg_dir; + + debugfs_create_file("drop_counters", 0400, dbg_dir, priv, &drop_counter_fops); + + debugfs_create_file("l2_table", 0400, dbg_dir, priv, &l2_table_fops); +} diff --git a/target/linux/realtek/files-5.15/drivers/net/dsa/rtl83xx/dsa.c b/target/linux/realtek/files-5.15/drivers/net/dsa/rtl83xx/dsa.c new file mode 100644 index 0000000000..6eea0dc936 --- /dev/null +++ b/target/linux/realtek/files-5.15/drivers/net/dsa/rtl83xx/dsa.c @@ -0,0 +1,2259 @@ +// SPDX-License-Identifier: GPL-2.0-only + +#include +#include + +#include +#include "rtl83xx.h" + + +extern struct rtl83xx_soc_info soc_info; + + +static void rtl83xx_init_stats(struct rtl838x_switch_priv *priv) +{ + mutex_lock(&priv->reg_mutex); + + /* Enable statistics module: all counters plus debug. + * On RTL839x all counters are enabled by default + */ + if (priv->family_id == RTL8380_FAMILY_ID) + sw_w32_mask(0, 3, RTL838X_STAT_CTRL); + + /* Reset statistics counters */ + sw_w32_mask(0, 1, priv->r->stat_rst); + + mutex_unlock(&priv->reg_mutex); +} + +static void rtl83xx_enable_phy_polling(struct rtl838x_switch_priv *priv) +{ + int i; + u64 v = 0; + + msleep(1000); + /* Enable all ports with a PHY, including the SFP-ports */ + for (i = 0; i < priv->cpu_port; i++) { + if (priv->ports[i].phy) + v |= BIT_ULL(i); + } + + pr_info("%s: %16llx\n", __func__, v); + priv->r->set_port_reg_le(v, priv->r->smi_poll_ctrl); + + /* PHY update complete, there is no global PHY polling enable bit on the 9300 */ + if (priv->family_id == RTL8390_FAMILY_ID) + sw_w32_mask(0, BIT(7), RTL839X_SMI_GLB_CTRL); + else if(priv->family_id == RTL9300_FAMILY_ID) + sw_w32_mask(0, 0x8000, RTL838X_SMI_GLB_CTRL); +} + +const struct rtl83xx_mib_desc rtl83xx_mib[] = { + MIB_DESC(2, 0xf8, "ifInOctets"), + MIB_DESC(2, 0xf0, "ifOutOctets"), + MIB_DESC(1, 0xec, "dot1dTpPortInDiscards"), + MIB_DESC(1, 0xe8, "ifInUcastPkts"), + MIB_DESC(1, 0xe4, "ifInMulticastPkts"), + MIB_DESC(1, 0xe0, "ifInBroadcastPkts"), + MIB_DESC(1, 0xdc, "ifOutUcastPkts"), + MIB_DESC(1, 0xd8, "ifOutMulticastPkts"), + MIB_DESC(1, 0xd4, "ifOutBroadcastPkts"), + MIB_DESC(1, 0xd0, "ifOutDiscards"), + MIB_DESC(1, 0xcc, ".3SingleCollisionFrames"), + MIB_DESC(1, 0xc8, ".3MultipleCollisionFrames"), + MIB_DESC(1, 0xc4, ".3DeferredTransmissions"), + MIB_DESC(1, 0xc0, ".3LateCollisions"), + MIB_DESC(1, 0xbc, ".3ExcessiveCollisions"), + MIB_DESC(1, 0xb8, ".3SymbolErrors"), + MIB_DESC(1, 0xb4, ".3ControlInUnknownOpcodes"), + MIB_DESC(1, 0xb0, ".3InPauseFrames"), + MIB_DESC(1, 0xac, ".3OutPauseFrames"), + MIB_DESC(1, 0xa8, "DropEvents"), + MIB_DESC(1, 0xa4, "tx_BroadcastPkts"), + MIB_DESC(1, 0xa0, "tx_MulticastPkts"), + MIB_DESC(1, 0x9c, "CRCAlignErrors"), + MIB_DESC(1, 0x98, "tx_UndersizePkts"), + MIB_DESC(1, 0x94, "rx_UndersizePkts"), + MIB_DESC(1, 0x90, "rx_UndersizedropPkts"), + MIB_DESC(1, 0x8c, "tx_OversizePkts"), + MIB_DESC(1, 0x88, "rx_OversizePkts"), + MIB_DESC(1, 0x84, "Fragments"), + MIB_DESC(1, 0x80, "Jabbers"), + MIB_DESC(1, 0x7c, "Collisions"), + MIB_DESC(1, 0x78, "tx_Pkts64Octets"), + MIB_DESC(1, 0x74, "rx_Pkts64Octets"), + MIB_DESC(1, 0x70, "tx_Pkts65to127Octets"), + MIB_DESC(1, 0x6c, "rx_Pkts65to127Octets"), + MIB_DESC(1, 0x68, "tx_Pkts128to255Octets"), + MIB_DESC(1, 0x64, "rx_Pkts128to255Octets"), + MIB_DESC(1, 0x60, "tx_Pkts256to511Octets"), + MIB_DESC(1, 0x5c, "rx_Pkts256to511Octets"), + MIB_DESC(1, 0x58, "tx_Pkts512to1023Octets"), + MIB_DESC(1, 0x54, "rx_Pkts512to1023Octets"), + MIB_DESC(1, 0x50, "tx_Pkts1024to1518Octets"), + MIB_DESC(1, 0x4c, "rx_StatsPkts1024to1518Octets"), + MIB_DESC(1, 0x48, "tx_Pkts1519toMaxOctets"), + MIB_DESC(1, 0x44, "rx_Pkts1519toMaxOctets"), + MIB_DESC(1, 0x40, "rxMacDiscards") +}; + + +/* DSA callbacks */ + + +static enum dsa_tag_protocol rtl83xx_get_tag_protocol(struct dsa_switch *ds, + int port, + enum dsa_tag_protocol mprot) +{ + /* The switch does not tag the frames, instead internally the header + * structure for each packet is tagged accordingly. + */ + return DSA_TAG_PROTO_TRAILER; +} + +/* + * Initialize all VLANS + */ +static void rtl83xx_vlan_setup(struct rtl838x_switch_priv *priv) +{ + struct rtl838x_vlan_info info; + int i; + + pr_info("In %s\n", __func__); + + priv->r->vlan_profile_setup(0); + priv->r->vlan_profile_setup(1); + pr_info("UNKNOWN_MC_PMASK: %016llx\n", priv->r->read_mcast_pmask(UNKNOWN_MC_PMASK)); + priv->r->vlan_profile_dump(0); + + info.fid = 0; // Default Forwarding ID / MSTI + info.hash_uc_fid = false; // Do not build the L2 lookup hash with FID, but VID + info.hash_mc_fid = false; // Do the same for Multicast packets + info.profile_id = 0; // Use default Vlan Profile 0 + info.tagged_ports = 0; // Initially no port members + if (priv->family_id == RTL9310_FAMILY_ID) { + info.if_id = 0; + info.multicast_grp_mask = 0; + info.l2_tunnel_list_id = -1; + } + + // Initialize all vlans 0-4095 + for (i = 0; i < MAX_VLANS; i ++) + priv->r->vlan_set_tagged(i, &info); + + // reset PVIDs; defaults to 1 on reset + for (i = 0; i <= priv->ds->num_ports; i++) { + priv->r->vlan_port_pvid_set(i, PBVLAN_TYPE_INNER, 0); + priv->r->vlan_port_pvid_set(i, PBVLAN_TYPE_OUTER, 0); + priv->r->vlan_port_pvidmode_set(i, PBVLAN_TYPE_INNER, PBVLAN_MODE_UNTAG_AND_PRITAG); + priv->r->vlan_port_pvidmode_set(i, PBVLAN_TYPE_OUTER, PBVLAN_MODE_UNTAG_AND_PRITAG); + } + + // Set forwarding action based on inner VLAN tag + for (i = 0; i < priv->cpu_port; i++) + priv->r->vlan_fwd_on_inner(i, true); +} + +static void rtl83xx_setup_bpdu_traps(struct rtl838x_switch_priv *priv) +{ + int i; + + for (i = 0; i < priv->cpu_port; i++) + priv->r->set_receive_management_action(i, BPDU, COPY2CPU); +} + +static void rtl83xx_port_set_salrn(struct rtl838x_switch_priv *priv, + int port, bool enable) +{ + int shift = SALRN_PORT_SHIFT(port); + int val = enable ? SALRN_MODE_HARDWARE : SALRN_MODE_DISABLED; + + sw_w32_mask(SALRN_MODE_MASK << shift, val << shift, + priv->r->l2_port_new_salrn(port)); +} + +static int rtl83xx_setup(struct dsa_switch *ds) +{ + int i; + struct rtl838x_switch_priv *priv = ds->priv; + u64 port_bitmap = BIT_ULL(priv->cpu_port); + + pr_debug("%s called\n", __func__); + + /* Disable MAC polling the PHY so that we can start configuration */ + priv->r->set_port_reg_le(0ULL, priv->r->smi_poll_ctrl); + + for (i = 0; i < ds->num_ports; i++) + priv->ports[i].enable = false; + priv->ports[priv->cpu_port].enable = true; + + /* Isolate ports from each other: traffic only CPU <-> port */ + /* Setting bit j in register RTL838X_PORT_ISO_CTRL(i) allows + * traffic from source port i to destination port j + */ + for (i = 0; i < priv->cpu_port; i++) { + if (priv->ports[i].phy) { + priv->r->set_port_reg_be(BIT_ULL(priv->cpu_port) | BIT_ULL(i), + priv->r->port_iso_ctrl(i)); + port_bitmap |= BIT_ULL(i); + } + } + priv->r->set_port_reg_be(port_bitmap, priv->r->port_iso_ctrl(priv->cpu_port)); + + if (priv->family_id == RTL8380_FAMILY_ID) + rtl838x_print_matrix(); + else + rtl839x_print_matrix(); + + rtl83xx_init_stats(priv); + + rtl83xx_vlan_setup(priv); + + rtl83xx_setup_bpdu_traps(priv); + + ds->configure_vlan_while_not_filtering = true; + + priv->r->l2_learning_setup(); + + rtl83xx_port_set_salrn(priv, priv->cpu_port, false); + ds->assisted_learning_on_cpu_port = true; + + /* + * Make sure all frames sent to the switch's MAC are trapped to the CPU-port + * 0: FWD, 1: DROP, 2: TRAP2CPU + */ + if (priv->family_id == RTL8380_FAMILY_ID) + sw_w32(0x2, RTL838X_SPCL_TRAP_SWITCH_MAC_CTRL); + else + sw_w32(0x2, RTL839X_SPCL_TRAP_SWITCH_MAC_CTRL); + + /* Enable MAC Polling PHY again */ + rtl83xx_enable_phy_polling(priv); + pr_debug("Please wait until PHY is settled\n"); + msleep(1000); + priv->r->pie_init(priv); + + return 0; +} + +static int rtl93xx_setup(struct dsa_switch *ds) +{ + int i; + struct rtl838x_switch_priv *priv = ds->priv; + u32 port_bitmap = BIT(priv->cpu_port); + + pr_info("%s called\n", __func__); + + /* Disable MAC polling the PHY so that we can start configuration */ + if (priv->family_id == RTL9300_FAMILY_ID) + sw_w32(0, RTL930X_SMI_POLL_CTRL); + + if (priv->family_id == RTL9310_FAMILY_ID) { + sw_w32(0, RTL931X_SMI_PORT_POLLING_CTRL); + sw_w32(0, RTL931X_SMI_PORT_POLLING_CTRL + 4); + } + + // Disable all ports except CPU port + for (i = 0; i < ds->num_ports; i++) + priv->ports[i].enable = false; + priv->ports[priv->cpu_port].enable = true; + + for (i = 0; i < priv->cpu_port; i++) { + if (priv->ports[i].phy) { + priv->r->traffic_set(i, BIT_ULL(priv->cpu_port) | BIT_ULL(i)); + port_bitmap |= BIT_ULL(i); + } + } + priv->r->traffic_set(priv->cpu_port, port_bitmap); + + rtl930x_print_matrix(); + + // TODO: Initialize statistics + + rtl83xx_vlan_setup(priv); + + ds->configure_vlan_while_not_filtering = true; + + priv->r->l2_learning_setup(); + + rtl83xx_port_set_salrn(priv, priv->cpu_port, false); + ds->assisted_learning_on_cpu_port = true; + + rtl83xx_enable_phy_polling(priv); + + priv->r->pie_init(priv); + + priv->r->led_init(priv); + + return 0; +} + +static int rtl93xx_get_sds(struct phy_device *phydev) +{ + struct device *dev = &phydev->mdio.dev; + struct device_node *dn; + u32 sds_num; + + if (!dev) + return -1; + if (dev->of_node) { + dn = dev->of_node; + if (of_property_read_u32(dn, "sds", &sds_num)) + sds_num = -1; + } else { + dev_err(dev, "No DT node.\n"); + return -1; + } + + return sds_num; +} + +static void rtl83xx_phylink_validate(struct dsa_switch *ds, int port, + unsigned long *supported, + struct phylink_link_state *state) +{ + struct rtl838x_switch_priv *priv = ds->priv; + __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, }; + + pr_debug("In %s port %d, state is %d", __func__, port, state->interface); + + if (!phy_interface_mode_is_rgmii(state->interface) && + state->interface != PHY_INTERFACE_MODE_NA && + state->interface != PHY_INTERFACE_MODE_1000BASEX && + state->interface != PHY_INTERFACE_MODE_MII && + state->interface != PHY_INTERFACE_MODE_REVMII && + state->interface != PHY_INTERFACE_MODE_GMII && + state->interface != PHY_INTERFACE_MODE_QSGMII && + state->interface != PHY_INTERFACE_MODE_INTERNAL && + state->interface != PHY_INTERFACE_MODE_SGMII) { + bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS); + dev_err(ds->dev, + "Unsupported interface: %d for port %d\n", + state->interface, port); + return; + } + + /* Allow all the expected bits */ + phylink_set(mask, Autoneg); + phylink_set_port_modes(mask); + phylink_set(mask, Pause); + phylink_set(mask, Asym_Pause); + + /* With the exclusion of MII and Reverse MII, we support Gigabit, + * including Half duplex + */ + if (state->interface != PHY_INTERFACE_MODE_MII && + state->interface != PHY_INTERFACE_MODE_REVMII) { + phylink_set(mask, 1000baseT_Full); + phylink_set(mask, 1000baseT_Half); + } + + /* On both the 8380 and 8382, ports 24-27 are SFP ports */ + if (port >= 24 && port <= 27 && priv->family_id == RTL8380_FAMILY_ID) + phylink_set(mask, 1000baseX_Full); + + /* On the RTL839x family of SoCs, ports 48 to 51 are SFP ports */ + if (port >= 48 && port <= 51 && priv->family_id == RTL8390_FAMILY_ID) + phylink_set(mask, 1000baseX_Full); + + phylink_set(mask, 10baseT_Half); + phylink_set(mask, 10baseT_Full); + phylink_set(mask, 100baseT_Half); + phylink_set(mask, 100baseT_Full); + + bitmap_and(supported, supported, mask, + __ETHTOOL_LINK_MODE_MASK_NBITS); + bitmap_and(state->advertising, state->advertising, mask, + __ETHTOOL_LINK_MODE_MASK_NBITS); +} + +static void rtl93xx_phylink_validate(struct dsa_switch *ds, int port, + unsigned long *supported, + struct phylink_link_state *state) +{ + struct rtl838x_switch_priv *priv = ds->priv; + __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, }; + + pr_debug("In %s port %d, state is %d (%s)", __func__, port, state->interface, + phy_modes(state->interface)); + + if (!phy_interface_mode_is_rgmii(state->interface) && + state->interface != PHY_INTERFACE_MODE_NA && + state->interface != PHY_INTERFACE_MODE_1000BASEX && + state->interface != PHY_INTERFACE_MODE_MII && + state->interface != PHY_INTERFACE_MODE_REVMII && + state->interface != PHY_INTERFACE_MODE_GMII && + state->interface != PHY_INTERFACE_MODE_QSGMII && + state->interface != PHY_INTERFACE_MODE_XGMII && + state->interface != PHY_INTERFACE_MODE_HSGMII && + state->interface != PHY_INTERFACE_MODE_10GBASER && + state->interface != PHY_INTERFACE_MODE_10GKR && + state->interface != PHY_INTERFACE_MODE_USXGMII && + state->interface != PHY_INTERFACE_MODE_INTERNAL && + state->interface != PHY_INTERFACE_MODE_SGMII) { + bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS); + dev_err(ds->dev, + "Unsupported interface: %d for port %d\n", + state->interface, port); + return; + } + + /* Allow all the expected bits */ + phylink_set(mask, Autoneg); + phylink_set_port_modes(mask); + phylink_set(mask, Pause); + phylink_set(mask, Asym_Pause); + + /* With the exclusion of MII and Reverse MII, we support Gigabit, + * including Half duplex + */ + if (state->interface != PHY_INTERFACE_MODE_MII && + state->interface != PHY_INTERFACE_MODE_REVMII) { + phylink_set(mask, 1000baseT_Full); + phylink_set(mask, 1000baseT_Half); + } + + // Internal phys of the RTL93xx family provide 10G + if (priv->ports[port].phy_is_integrated + && state->interface == PHY_INTERFACE_MODE_1000BASEX) { + phylink_set(mask, 1000baseX_Full); + } else if (priv->ports[port].phy_is_integrated) { + phylink_set(mask, 1000baseX_Full); + phylink_set(mask, 10000baseKR_Full); + phylink_set(mask, 10000baseSR_Full); + phylink_set(mask, 10000baseCR_Full); + } + if (state->interface == PHY_INTERFACE_MODE_INTERNAL) { + phylink_set(mask, 1000baseX_Full); + phylink_set(mask, 1000baseT_Full); + phylink_set(mask, 10000baseKR_Full); + phylink_set(mask, 10000baseT_Full); + phylink_set(mask, 10000baseSR_Full); + phylink_set(mask, 10000baseCR_Full); + } + + if (state->interface == PHY_INTERFACE_MODE_USXGMII) + phylink_set(mask, 10000baseT_Full); + + phylink_set(mask, 10baseT_Half); + phylink_set(mask, 10baseT_Full); + phylink_set(mask, 100baseT_Half); + phylink_set(mask, 100baseT_Full); + + bitmap_and(supported, supported, mask, + __ETHTOOL_LINK_MODE_MASK_NBITS); + bitmap_and(state->advertising, state->advertising, mask, + __ETHTOOL_LINK_MODE_MASK_NBITS); + pr_debug("%s leaving supported: %*pb", __func__, __ETHTOOL_LINK_MODE_MASK_NBITS, supported); +} + +static int rtl83xx_phylink_mac_link_state(struct dsa_switch *ds, int port, + struct phylink_link_state *state) +{ + struct rtl838x_switch_priv *priv = ds->priv; + u64 speed; + u64 link; + + if (port < 0 || port > priv->cpu_port) + return -EINVAL; + + state->link = 0; + link = priv->r->get_port_reg_le(priv->r->mac_link_sts); + if (link & BIT_ULL(port)) + state->link = 1; + pr_debug("%s: link state port %d: %llx\n", __func__, port, link & BIT_ULL(port)); + + state->duplex = 0; + if (priv->r->get_port_reg_le(priv->r->mac_link_dup_sts) & BIT_ULL(port)) + state->duplex = 1; + + speed = priv->r->get_port_reg_le(priv->r->mac_link_spd_sts(port)); + speed >>= (port % 16) << 1; + switch (speed & 0x3) { + case 0: + state->speed = SPEED_10; + break; + case 1: + state->speed = SPEED_100; + break; + case 2: + state->speed = SPEED_1000; + break; + case 3: + if (priv->family_id == RTL9300_FAMILY_ID + && (port == 24 || port == 26)) /* Internal serdes */ + state->speed = SPEED_2500; + else + state->speed = SPEED_100; /* Is in fact 500Mbit */ + } + + state->pause &= (MLO_PAUSE_RX | MLO_PAUSE_TX); + if (priv->r->get_port_reg_le(priv->r->mac_rx_pause_sts) & BIT_ULL(port)) + state->pause |= MLO_PAUSE_RX; + if (priv->r->get_port_reg_le(priv->r->mac_tx_pause_sts) & BIT_ULL(port)) + state->pause |= MLO_PAUSE_TX; + return 1; +} + +static int rtl93xx_phylink_mac_link_state(struct dsa_switch *ds, int port, + struct phylink_link_state *state) +{ + struct rtl838x_switch_priv *priv = ds->priv; + u64 speed; + u64 link; + u64 media; + + if (port < 0 || port > priv->cpu_port) + return -EINVAL; + + /* + * On the RTL9300 for at least the RTL8226B PHY, the MAC-side link + * state needs to be read twice in order to read a correct result. + * This would not be necessary for ports connected e.g. to RTL8218D + * PHYs. + */ + state->link = 0; + link = priv->r->get_port_reg_le(priv->r->mac_link_sts); + link = priv->r->get_port_reg_le(priv->r->mac_link_sts); + if (link & BIT_ULL(port)) + state->link = 1; + + if (priv->family_id == RTL9310_FAMILY_ID) + media = priv->r->get_port_reg_le(RTL931X_MAC_LINK_MEDIA_STS); + + if (priv->family_id == RTL9300_FAMILY_ID) + media = sw_r32(RTL930X_MAC_LINK_MEDIA_STS); + + if (media & BIT_ULL(port)) + state->link = 1; + + pr_debug("%s: link state port %d: %llx, media %llx\n", __func__, port, + link & BIT_ULL(port), media); + + state->duplex = 0; + if (priv->r->get_port_reg_le(priv->r->mac_link_dup_sts) & BIT_ULL(port)) + state->duplex = 1; + + speed = priv->r->get_port_reg_le(priv->r->mac_link_spd_sts(port)); + speed >>= (port % 8) << 2; + switch (speed & 0xf) { + case 0: + state->speed = SPEED_10; + break; + case 1: + state->speed = SPEED_100; + break; + case 2: + case 7: + state->speed = SPEED_1000; + break; + case 4: + state->speed = SPEED_10000; + break; + case 5: + case 8: + state->speed = SPEED_2500; + break; + case 6: + state->speed = SPEED_5000; + break; + default: + pr_err("%s: unknown speed: %d\n", __func__, (u32)speed & 0xf); + } + + if (priv->family_id == RTL9310_FAMILY_ID + && (port >= 52 || port <= 55)) { /* Internal serdes */ + state->speed = SPEED_10000; + state->link = 1; + state->duplex = 1; + } + + pr_debug("%s: speed is: %d %d\n", __func__, (u32)speed & 0xf, state->speed); + state->pause &= (MLO_PAUSE_RX | MLO_PAUSE_TX); + if (priv->r->get_port_reg_le(priv->r->mac_rx_pause_sts) & BIT_ULL(port)) + state->pause |= MLO_PAUSE_RX; + if (priv->r->get_port_reg_le(priv->r->mac_tx_pause_sts) & BIT_ULL(port)) + state->pause |= MLO_PAUSE_TX; + return 1; +} + +static void rtl83xx_config_interface(int port, phy_interface_t interface) +{ + u32 old, int_shift, sds_shift; + + switch (port) { + case 24: + int_shift = 0; + sds_shift = 5; + break; + case 26: + int_shift = 3; + sds_shift = 0; + break; + default: + return; + } + + old = sw_r32(RTL838X_SDS_MODE_SEL); + switch (interface) { + case PHY_INTERFACE_MODE_1000BASEX: + if ((old >> sds_shift & 0x1f) == 4) + return; + sw_w32_mask(0x7 << int_shift, 1 << int_shift, RTL838X_INT_MODE_CTRL); + sw_w32_mask(0x1f << sds_shift, 4 << sds_shift, RTL838X_SDS_MODE_SEL); + break; + case PHY_INTERFACE_MODE_SGMII: + if ((old >> sds_shift & 0x1f) == 2) + return; + sw_w32_mask(0x7 << int_shift, 2 << int_shift, RTL838X_INT_MODE_CTRL); + sw_w32_mask(0x1f << sds_shift, 2 << sds_shift, RTL838X_SDS_MODE_SEL); + break; + default: + return; + } + pr_debug("configured port %d for interface %s\n", port, phy_modes(interface)); +} + +static void rtl83xx_phylink_mac_config(struct dsa_switch *ds, int port, + unsigned int mode, + const struct phylink_link_state *state) +{ + struct rtl838x_switch_priv *priv = ds->priv; + u32 reg; + int speed_bit = priv->family_id == RTL8380_FAMILY_ID ? 4 : 3; + + pr_debug("%s port %d, mode %x\n", __func__, port, mode); + + if (port == priv->cpu_port) { + /* Set Speed, duplex, flow control + * FORCE_EN | LINK_EN | NWAY_EN | DUP_SEL + * | SPD_SEL = 0b10 | FORCE_FC_EN | PHY_MASTER_SLV_MANUAL_EN + * | MEDIA_SEL + */ + if (priv->family_id == RTL8380_FAMILY_ID) { + sw_w32(0x6192F, priv->r->mac_force_mode_ctrl(priv->cpu_port)); + /* allow CRC errors on CPU-port */ + sw_w32_mask(0, 0x8, RTL838X_MAC_PORT_CTRL(priv->cpu_port)); + } else { + sw_w32_mask(0, 3, priv->r->mac_force_mode_ctrl(priv->cpu_port)); + } + return; + } + + reg = sw_r32(priv->r->mac_force_mode_ctrl(port)); + /* Auto-Negotiation does not work for MAC in RTL8390 */ + if (priv->family_id == RTL8380_FAMILY_ID) { + if (mode == MLO_AN_PHY || phylink_autoneg_inband(mode)) { + pr_debug("PHY autonegotiates\n"); + reg |= RTL838X_NWAY_EN; + sw_w32(reg, priv->r->mac_force_mode_ctrl(port)); + rtl83xx_config_interface(port, state->interface); + return; + } + } + + if (mode != MLO_AN_FIXED) + pr_debug("Fixed state.\n"); + + /* Clear id_mode_dis bit, and the existing port mode, let + * RGMII_MODE_EN bet set by mac_link_{up,down} */ + if (priv->family_id == RTL8380_FAMILY_ID) { + reg &= ~(RTL838X_RX_PAUSE_EN | RTL838X_TX_PAUSE_EN); + if (state->pause & MLO_PAUSE_TXRX_MASK) { + if (state->pause & MLO_PAUSE_TX) + reg |= RTL838X_TX_PAUSE_EN; + reg |= RTL838X_RX_PAUSE_EN; + } + } else if (priv->family_id == RTL8390_FAMILY_ID) { + reg &= ~(RTL839X_RX_PAUSE_EN | RTL839X_TX_PAUSE_EN); + if (state->pause & MLO_PAUSE_TXRX_MASK) { + if (state->pause & MLO_PAUSE_TX) + reg |= RTL839X_TX_PAUSE_EN; + reg |= RTL839X_RX_PAUSE_EN; + } + } + + + reg &= ~(3 << speed_bit); + switch (state->speed) { + case SPEED_1000: + reg |= 2 << speed_bit; + break; + case SPEED_100: + reg |= 1 << speed_bit; + break; + default: + break; // Ignore, including 10MBit which has a speed value of 0 + } + + if (priv->family_id == RTL8380_FAMILY_ID) { + reg &= ~(RTL838X_DUPLEX_MODE | RTL838X_FORCE_LINK_EN); + if (state->link) + reg |= RTL838X_FORCE_LINK_EN; + if (state->duplex == RTL838X_DUPLEX_MODE) + reg |= RTL838X_DUPLEX_MODE; + } else if (priv->family_id == RTL8390_FAMILY_ID) { + reg &= ~(RTL839X_DUPLEX_MODE | RTL839X_FORCE_LINK_EN); + if (state->link) + reg |= RTL839X_FORCE_LINK_EN; + if (state->duplex == RTL839X_DUPLEX_MODE) + reg |= RTL839X_DUPLEX_MODE; + } + + // LAG members must use DUPLEX and we need to enable the link + if (priv->lagmembers & BIT_ULL(port)) { + switch(priv->family_id) { + case RTL8380_FAMILY_ID: + reg |= (RTL838X_DUPLEX_MODE | RTL838X_FORCE_LINK_EN); + break; + case RTL8390_FAMILY_ID: + reg |= (RTL839X_DUPLEX_MODE | RTL839X_FORCE_LINK_EN); + break; + } + } + + // Disable AN + if (priv->family_id == RTL8380_FAMILY_ID) + reg &= ~RTL838X_NWAY_EN; + sw_w32(reg, priv->r->mac_force_mode_ctrl(port)); +} + +static void rtl931x_phylink_mac_config(struct dsa_switch *ds, int port, + unsigned int mode, + const struct phylink_link_state *state) +{ + struct rtl838x_switch_priv *priv = ds->priv; + int sds_num; + u32 reg, band; + + sds_num = priv->ports[port].sds_num; + pr_info("%s: speed %d sds_num %d\n", __func__, state->speed, sds_num); + + switch (state->interface) { + case PHY_INTERFACE_MODE_HSGMII: + pr_info("%s setting mode PHY_INTERFACE_MODE_HSGMII\n", __func__); + band = rtl931x_sds_cmu_band_get(sds_num, PHY_INTERFACE_MODE_HSGMII); + rtl931x_sds_init(sds_num, PHY_INTERFACE_MODE_HSGMII); + band = rtl931x_sds_cmu_band_set(sds_num, true, 62, PHY_INTERFACE_MODE_HSGMII); + break; + case PHY_INTERFACE_MODE_1000BASEX: + band = rtl931x_sds_cmu_band_get(sds_num, PHY_INTERFACE_MODE_1000BASEX); + rtl931x_sds_init(sds_num, PHY_INTERFACE_MODE_1000BASEX); + break; + case PHY_INTERFACE_MODE_XGMII: + band = rtl931x_sds_cmu_band_get(sds_num, PHY_INTERFACE_MODE_XGMII); + rtl931x_sds_init(sds_num, PHY_INTERFACE_MODE_XGMII); + break; + case PHY_INTERFACE_MODE_10GBASER: + case PHY_INTERFACE_MODE_10GKR: + band = rtl931x_sds_cmu_band_get(sds_num, PHY_INTERFACE_MODE_10GBASER); + rtl931x_sds_init(sds_num, PHY_INTERFACE_MODE_10GBASER); + break; + case PHY_INTERFACE_MODE_USXGMII: + // Translates to MII_USXGMII_10GSXGMII + band = rtl931x_sds_cmu_band_get(sds_num, PHY_INTERFACE_MODE_USXGMII); + rtl931x_sds_init(sds_num, PHY_INTERFACE_MODE_USXGMII); + break; + case PHY_INTERFACE_MODE_SGMII: + pr_info("%s setting mode PHY_INTERFACE_MODE_SGMII\n", __func__); + band = rtl931x_sds_cmu_band_get(sds_num, PHY_INTERFACE_MODE_SGMII); + rtl931x_sds_init(sds_num, PHY_INTERFACE_MODE_SGMII); + band = rtl931x_sds_cmu_band_set(sds_num, true, 62, PHY_INTERFACE_MODE_SGMII); + break; + case PHY_INTERFACE_MODE_QSGMII: + band = rtl931x_sds_cmu_band_get(sds_num, PHY_INTERFACE_MODE_QSGMII); + rtl931x_sds_init(sds_num, PHY_INTERFACE_MODE_QSGMII); + break; + default: + pr_err("%s: unknown serdes mode: %s\n", + __func__, phy_modes(state->interface)); + return; + } + + reg = sw_r32(priv->r->mac_force_mode_ctrl(port)); + pr_info("%s reading FORCE_MODE_CTRL: %08x\n", __func__, reg); + + reg &= ~(RTL931X_DUPLEX_MODE | RTL931X_FORCE_EN | RTL931X_FORCE_LINK_EN); + + reg &= ~(0xf << 12); + reg |= 0x2 << 12; // Set SMI speed to 0x2 + + reg |= RTL931X_TX_PAUSE_EN | RTL931X_RX_PAUSE_EN; + + if (priv->lagmembers & BIT_ULL(port)) + reg |= RTL931X_DUPLEX_MODE; + + if (state->duplex == DUPLEX_FULL) + reg |= RTL931X_DUPLEX_MODE; + + sw_w32(reg, priv->r->mac_force_mode_ctrl(port)); + +} + +static void rtl93xx_phylink_mac_config(struct dsa_switch *ds, int port, + unsigned int mode, + const struct phylink_link_state *state) +{ + struct rtl838x_switch_priv *priv = ds->priv; + int sds_num, sds_mode; + u32 reg; + + pr_info("%s port %d, mode %x, phy-mode: %s, speed %d, link %d\n", __func__, + port, mode, phy_modes(state->interface), state->speed, state->link); + + // Nothing to be done for the CPU-port + if (port == priv->cpu_port) + return; + + if (priv->family_id == RTL9310_FAMILY_ID) + return rtl931x_phylink_mac_config(ds, port, mode, state); + + sds_num = priv->ports[port].sds_num; + pr_info("%s SDS is %d\n", __func__, sds_num); + if (sds_num >= 0) { + switch (state->interface) { + case PHY_INTERFACE_MODE_HSGMII: + sds_mode = 0x12; + break; + case PHY_INTERFACE_MODE_1000BASEX: + sds_mode = 0x04; + break; + case PHY_INTERFACE_MODE_XGMII: + sds_mode = 0x10; + break; + case PHY_INTERFACE_MODE_10GBASER: + case PHY_INTERFACE_MODE_10GKR: + sds_mode = 0x1b; // 10G 1000X Auto + break; + case PHY_INTERFACE_MODE_USXGMII: + sds_mode = 0x0d; + break; + default: + pr_err("%s: unknown serdes mode: %s\n", + __func__, phy_modes(state->interface)); + return; + } + if (state->interface == PHY_INTERFACE_MODE_10GBASER) + rtl9300_serdes_setup(sds_num, state->interface); + } + + reg = sw_r32(priv->r->mac_force_mode_ctrl(port)); + reg &= ~(0xf << 3); + + switch (state->speed) { + case SPEED_10000: + reg |= 4 << 3; + break; + case SPEED_5000: + reg |= 6 << 3; + break; + case SPEED_2500: + reg |= 5 << 3; + break; + case SPEED_1000: + reg |= 2 << 3; + break; + default: + reg |= 2 << 3; + break; + } + + if (state->link) + reg |= RTL930X_FORCE_LINK_EN; + + if (priv->lagmembers & BIT_ULL(port)) + reg |= RTL930X_DUPLEX_MODE | RTL930X_FORCE_LINK_EN; + + if (state->duplex == DUPLEX_FULL) + reg |= RTL930X_DUPLEX_MODE; + + if (priv->ports[port].phy_is_integrated) + reg &= ~RTL930X_FORCE_EN; // Clear MAC_FORCE_EN to allow SDS-MAC link + else + reg |= RTL930X_FORCE_EN; + + sw_w32(reg, priv->r->mac_force_mode_ctrl(port)); +} + +static void rtl83xx_phylink_mac_link_down(struct dsa_switch *ds, int port, + unsigned int mode, + phy_interface_t interface) +{ + struct rtl838x_switch_priv *priv = ds->priv; + + /* Stop TX/RX to port */ + sw_w32_mask(0x3, 0, priv->r->mac_port_ctrl(port)); + + // No longer force link + sw_w32_mask(0x3, 0, priv->r->mac_force_mode_ctrl(port)); +} + +static void rtl93xx_phylink_mac_link_down(struct dsa_switch *ds, int port, + unsigned int mode, + phy_interface_t interface) +{ + struct rtl838x_switch_priv *priv = ds->priv; + u32 v = 0; + + /* Stop TX/RX to port */ + sw_w32_mask(0x3, 0, priv->r->mac_port_ctrl(port)); + + // No longer force link + if (priv->family_id == RTL9300_FAMILY_ID) + v = RTL930X_FORCE_EN | RTL930X_FORCE_LINK_EN; + else if (priv->family_id == RTL9310_FAMILY_ID) + v = RTL931X_FORCE_EN | RTL931X_FORCE_LINK_EN; + sw_w32_mask(v, 0, priv->r->mac_force_mode_ctrl(port)); +} + +static void rtl83xx_phylink_mac_link_up(struct dsa_switch *ds, int port, + unsigned int mode, + phy_interface_t interface, + struct phy_device *phydev, + int speed, int duplex, + bool tx_pause, bool rx_pause) +{ + struct rtl838x_switch_priv *priv = ds->priv; + /* Restart TX/RX to port */ + sw_w32_mask(0, 0x3, priv->r->mac_port_ctrl(port)); + // TODO: Set speed/duplex/pauses +} + +static void rtl93xx_phylink_mac_link_up(struct dsa_switch *ds, int port, + unsigned int mode, + phy_interface_t interface, + struct phy_device *phydev, + int speed, int duplex, + bool tx_pause, bool rx_pause) +{ + struct rtl838x_switch_priv *priv = ds->priv; + + /* Restart TX/RX to port */ + sw_w32_mask(0, 0x3, priv->r->mac_port_ctrl(port)); + // TODO: Set speed/duplex/pauses +} + +static void rtl83xx_get_strings(struct dsa_switch *ds, + int port, u32 stringset, u8 *data) +{ + int i; + + if (stringset != ETH_SS_STATS) + return; + + for (i = 0; i < ARRAY_SIZE(rtl83xx_mib); i++) + strncpy(data + i * ETH_GSTRING_LEN, rtl83xx_mib[i].name, + ETH_GSTRING_LEN); +} + +static void rtl83xx_get_ethtool_stats(struct dsa_switch *ds, int port, + uint64_t *data) +{ + struct rtl838x_switch_priv *priv = ds->priv; + const struct rtl83xx_mib_desc *mib; + int i; + u64 h; + + for (i = 0; i < ARRAY_SIZE(rtl83xx_mib); i++) { + mib = &rtl83xx_mib[i]; + + data[i] = sw_r32(priv->r->stat_port_std_mib + (port << 8) + 252 - mib->offset); + if (mib->size == 2) { + h = sw_r32(priv->r->stat_port_std_mib + (port << 8) + 248 - mib->offset); + data[i] |= h << 32; + } + } +} + +static int rtl83xx_get_sset_count(struct dsa_switch *ds, int port, int sset) +{ + if (sset != ETH_SS_STATS) + return 0; + + return ARRAY_SIZE(rtl83xx_mib); +} + +static int rtl83xx_mc_group_alloc(struct rtl838x_switch_priv *priv, int port) +{ + int mc_group = find_first_zero_bit(priv->mc_group_bm, MAX_MC_GROUPS - 1); + u64 portmask; + + if (mc_group >= MAX_MC_GROUPS - 1) + return -1; + + if (priv->is_lagmember[port]) { + pr_info("%s: %d is lag slave. ignore\n", __func__, port); + return 0; + } + + set_bit(mc_group, priv->mc_group_bm); + mc_group++; // We cannot use group 0, as this is used for lookup miss flooding + portmask = BIT_ULL(port) | BIT_ULL(priv->cpu_port); + priv->r->write_mcast_pmask(mc_group, portmask); + + return mc_group; +} + +static u64 rtl83xx_mc_group_add_port(struct rtl838x_switch_priv *priv, int mc_group, int port) +{ + u64 portmask = priv->r->read_mcast_pmask(mc_group); + + pr_debug("%s: %d\n", __func__, port); + if (priv->is_lagmember[port]) { + pr_info("%s: %d is lag slave. ignore\n", __func__, port); + return portmask; + } + portmask |= BIT_ULL(port); + priv->r->write_mcast_pmask(mc_group, portmask); + + return portmask; +} + +static u64 rtl83xx_mc_group_del_port(struct rtl838x_switch_priv *priv, int mc_group, int port) +{ + u64 portmask = priv->r->read_mcast_pmask(mc_group); + + pr_debug("%s: %d\n", __func__, port); + if (priv->is_lagmember[port]) { + pr_info("%s: %d is lag slave. ignore\n", __func__, port); + return portmask; + } + priv->r->write_mcast_pmask(mc_group, portmask); + if (portmask == BIT_ULL(priv->cpu_port)) { + portmask &= ~BIT_ULL(priv->cpu_port); + priv->r->write_mcast_pmask(mc_group, portmask); + clear_bit(mc_group, priv->mc_group_bm); + } + + return portmask; +} + +static void store_mcgroups(struct rtl838x_switch_priv *priv, int port) +{ + int mc_group; + + for (mc_group = 0; mc_group < MAX_MC_GROUPS; mc_group++) { + u64 portmask = priv->r->read_mcast_pmask(mc_group); + if (portmask & BIT_ULL(port)) { + priv->mc_group_saves[mc_group] = port; + rtl83xx_mc_group_del_port(priv, mc_group, port); + } + } +} + +static void load_mcgroups(struct rtl838x_switch_priv *priv, int port) +{ + int mc_group; + + for (mc_group = 0; mc_group < MAX_MC_GROUPS; mc_group++) { + if (priv->mc_group_saves[mc_group] == port) { + rtl83xx_mc_group_add_port(priv, mc_group, port); + priv->mc_group_saves[mc_group] = -1; + } + } +} + +static int rtl83xx_port_enable(struct dsa_switch *ds, int port, + struct phy_device *phydev) +{ + struct rtl838x_switch_priv *priv = ds->priv; + u64 v; + + pr_debug("%s: %x %d", __func__, (u32) priv, port); + priv->ports[port].enable = true; + + /* enable inner tagging on egress, do not keep any tags */ + priv->r->vlan_port_keep_tag_set(port, 0, 1); + + if (dsa_is_cpu_port(ds, port)) + return 0; + + /* add port to switch mask of CPU_PORT */ + priv->r->traffic_enable(priv->cpu_port, port); + + load_mcgroups(priv, port); + + if (priv->is_lagmember[port]) { + pr_debug("%s: %d is lag slave. ignore\n", __func__, port); + return 0; + } + + /* add all other ports in the same bridge to switch mask of port */ + v = priv->r->traffic_get(port); + v |= priv->ports[port].pm; + priv->r->traffic_set(port, v); + + // TODO: Figure out if this is necessary + if (priv->family_id == RTL9300_FAMILY_ID) { + sw_w32_mask(0, BIT(port), RTL930X_L2_PORT_SABLK_CTRL); + sw_w32_mask(0, BIT(port), RTL930X_L2_PORT_DABLK_CTRL); + } + + if (priv->ports[port].sds_num < 0) + priv->ports[port].sds_num = rtl93xx_get_sds(phydev); + + return 0; +} + +static void rtl83xx_port_disable(struct dsa_switch *ds, int port) +{ + struct rtl838x_switch_priv *priv = ds->priv; + u64 v; + + pr_debug("%s %x: %d", __func__, (u32)priv, port); + /* you can only disable user ports */ + if (!dsa_is_user_port(ds, port)) + return; + + // BUG: This does not work on RTL931X + /* remove port from switch mask of CPU_PORT */ + priv->r->traffic_disable(priv->cpu_port, port); + store_mcgroups(priv, port); + + /* remove all other ports in the same bridge from switch mask of port */ + v = priv->r->traffic_get(port); + v &= ~priv->ports[port].pm; + priv->r->traffic_set(port, v); + + priv->ports[port].enable = false; +} + +static int rtl83xx_set_mac_eee(struct dsa_switch *ds, int port, + struct ethtool_eee *e) +{ + struct rtl838x_switch_priv *priv = ds->priv; + + if (e->eee_enabled && !priv->eee_enabled) { + pr_info("Globally enabling EEE\n"); + priv->r->init_eee(priv, true); + } + + priv->r->port_eee_set(priv, port, e->eee_enabled); + + if (e->eee_enabled) + pr_info("Enabled EEE for port %d\n", port); + else + pr_info("Disabled EEE for port %d\n", port); + return 0; +} + +static int rtl83xx_get_mac_eee(struct dsa_switch *ds, int port, + struct ethtool_eee *e) +{ + struct rtl838x_switch_priv *priv = ds->priv; + + e->supported = SUPPORTED_100baseT_Full | SUPPORTED_1000baseT_Full; + + priv->r->eee_port_ability(priv, e, port); + + e->eee_enabled = priv->ports[port].eee_enabled; + + e->eee_active = !!(e->advertised & e->lp_advertised); + + return 0; +} + +static int rtl93xx_get_mac_eee(struct dsa_switch *ds, int port, + struct ethtool_eee *e) +{ + struct rtl838x_switch_priv *priv = ds->priv; + + e->supported = SUPPORTED_100baseT_Full | SUPPORTED_1000baseT_Full + | SUPPORTED_2500baseX_Full; + + priv->r->eee_port_ability(priv, e, port); + + e->eee_enabled = priv->ports[port].eee_enabled; + + e->eee_active = !!(e->advertised & e->lp_advertised); + + return 0; +} + +static int rtl83xx_set_ageing_time(struct dsa_switch *ds, unsigned int msec) +{ + struct rtl838x_switch_priv *priv = ds->priv; + + priv->r->set_ageing_time(msec); + return 0; +} + +static int rtl83xx_port_bridge_join(struct dsa_switch *ds, int port, + struct net_device *bridge) +{ + struct rtl838x_switch_priv *priv = ds->priv; + u64 port_bitmap = BIT_ULL(priv->cpu_port), v; + int i; + + pr_debug("%s %x: %d %llx", __func__, (u32)priv, port, port_bitmap); + + if (priv->is_lagmember[port]) { + pr_debug("%s: %d is lag slave. ignore\n", __func__, port); + return 0; + } + + mutex_lock(&priv->reg_mutex); + for (i = 0; i < ds->num_ports; i++) { + /* Add this port to the port matrix of the other ports in the + * same bridge. If the port is disabled, port matrix is kept + * and not being setup until the port becomes enabled. + */ + if (dsa_is_user_port(ds, i) && !priv->is_lagmember[i] && i != port) { + if (dsa_to_port(ds, i)->bridge_dev != bridge) + continue; + if (priv->ports[i].enable) + priv->r->traffic_enable(i, port); + + priv->ports[i].pm |= BIT_ULL(port); + port_bitmap |= BIT_ULL(i); + } + } + load_mcgroups(priv, port); + + /* Add all other ports to this port matrix. */ + if (priv->ports[port].enable) { + priv->r->traffic_enable(priv->cpu_port, port); + v = priv->r->traffic_get(port); + v |= port_bitmap; + priv->r->traffic_set(port, v); + } + priv->ports[port].pm |= port_bitmap; + mutex_unlock(&priv->reg_mutex); + + return 0; +} + +static void rtl83xx_port_bridge_leave(struct dsa_switch *ds, int port, + struct net_device *bridge) +{ + struct rtl838x_switch_priv *priv = ds->priv; + u64 port_bitmap = BIT_ULL(priv->cpu_port), v; + int i; + + pr_debug("%s %x: %d", __func__, (u32)priv, port); + mutex_lock(&priv->reg_mutex); + for (i = 0; i < ds->num_ports; i++) { + /* Remove this port from the port matrix of the other ports + * in the same bridge. If the port is disabled, port matrix + * is kept and not being setup until the port becomes enabled. + * And the other port's port matrix cannot be broken when the + * other port is still a VLAN-aware port. + */ + if (dsa_is_user_port(ds, i) && i != port) { + if (dsa_to_port(ds, i)->bridge_dev != bridge) + continue; + if (priv->ports[i].enable) + priv->r->traffic_disable(i, port); + + priv->ports[i].pm |= BIT_ULL(port); + port_bitmap &= ~BIT_ULL(i); + } + } + store_mcgroups(priv, port); + + /* Add all other ports to this port matrix. */ + if (priv->ports[port].enable) { + v = priv->r->traffic_get(port); + v |= port_bitmap; + priv->r->traffic_set(port, v); + } + priv->ports[port].pm &= ~port_bitmap; + + mutex_unlock(&priv->reg_mutex); +} + +void rtl83xx_port_stp_state_set(struct dsa_switch *ds, int port, u8 state) +{ + u32 msti = 0; + u32 port_state[4]; + int index, bit; + int pos = port; + struct rtl838x_switch_priv *priv = ds->priv; + int n = priv->port_width << 1; + + /* Ports above or equal CPU port can never be configured */ + if (port >= priv->cpu_port) + return; + + mutex_lock(&priv->reg_mutex); + + /* For the RTL839x and following, the bits are left-aligned, 838x and 930x + * have 64 bit fields, 839x and 931x have 128 bit fields + */ + if (priv->family_id == RTL8390_FAMILY_ID) + pos += 12; + if (priv->family_id == RTL9300_FAMILY_ID) + pos += 3; + if (priv->family_id == RTL9310_FAMILY_ID) + pos += 8; + + index = n - (pos >> 4) - 1; + bit = (pos << 1) % 32; + + priv->r->stp_get(priv, msti, port_state); + + pr_debug("Current state, port %d: %d\n", port, (port_state[index] >> bit) & 3); + port_state[index] &= ~(3 << bit); + + switch (state) { + case BR_STATE_DISABLED: /* 0 */ + port_state[index] |= (0 << bit); + break; + case BR_STATE_BLOCKING: /* 4 */ + case BR_STATE_LISTENING: /* 1 */ + port_state[index] |= (1 << bit); + break; + case BR_STATE_LEARNING: /* 2 */ + port_state[index] |= (2 << bit); + break; + case BR_STATE_FORWARDING: /* 3*/ + port_state[index] |= (3 << bit); + default: + break; + } + + priv->r->stp_set(priv, msti, port_state); + + mutex_unlock(&priv->reg_mutex); +} + +void rtl83xx_fast_age(struct dsa_switch *ds, int port) +{ + struct rtl838x_switch_priv *priv = ds->priv; + int s = priv->family_id == RTL8390_FAMILY_ID ? 2 : 0; + + pr_debug("FAST AGE port %d\n", port); + mutex_lock(&priv->reg_mutex); + /* RTL838X_L2_TBL_FLUSH_CTRL register bits, 839x has 1 bit larger + * port fields: + * 0-4: Replacing port + * 5-9: Flushed/replaced port + * 10-21: FVID + * 22: Entry types: 1: dynamic, 0: also static + * 23: Match flush port + * 24: Match FVID + * 25: Flush (0) or replace (1) L2 entries + * 26: Status of action (1: Start, 0: Done) + */ + sw_w32(1 << (26 + s) | 1 << (23 + s) | port << (5 + (s / 2)), priv->r->l2_tbl_flush_ctrl); + + do { } while (sw_r32(priv->r->l2_tbl_flush_ctrl) & BIT(26 + s)); + + mutex_unlock(&priv->reg_mutex); +} + +void rtl931x_fast_age(struct dsa_switch *ds, int port) +{ + struct rtl838x_switch_priv *priv = ds->priv; + + pr_info("%s port %d\n", __func__, port); + mutex_lock(&priv->reg_mutex); + sw_w32(port << 11, RTL931X_L2_TBL_FLUSH_CTRL + 4); + + sw_w32(BIT(24) | BIT(28), RTL931X_L2_TBL_FLUSH_CTRL); + + do { } while (sw_r32(RTL931X_L2_TBL_FLUSH_CTRL) & BIT (28)); + + mutex_unlock(&priv->reg_mutex); +} + +void rtl930x_fast_age(struct dsa_switch *ds, int port) +{ + struct rtl838x_switch_priv *priv = ds->priv; + + if (priv->family_id == RTL9310_FAMILY_ID) + return rtl931x_fast_age(ds, port); + + pr_debug("FAST AGE port %d\n", port); + mutex_lock(&priv->reg_mutex); + sw_w32(port << 11, RTL930X_L2_TBL_FLUSH_CTRL + 4); + + sw_w32(BIT(26) | BIT(30), RTL930X_L2_TBL_FLUSH_CTRL); + + do { } while (sw_r32(priv->r->l2_tbl_flush_ctrl) & BIT(30)); + + mutex_unlock(&priv->reg_mutex); +} + +static int rtl83xx_vlan_filtering(struct dsa_switch *ds, int port, + bool vlan_filtering, + struct switchdev_trans *trans) +{ + struct rtl838x_switch_priv *priv = ds->priv; + + pr_debug("%s: port %d\n", __func__, port); + mutex_lock(&priv->reg_mutex); + + if (vlan_filtering) { + /* Enable ingress and egress filtering + * The VLAN_PORT_IGR_FILTER register uses 2 bits for each port to define + * the filter action: + * 0: Always Forward + * 1: Drop packet + * 2: Trap packet to CPU port + * The Egress filter used 1 bit per state (0: DISABLED, 1: ENABLED) + */ + if (port != priv->cpu_port) + priv->r->set_vlan_igr_filter(port, IGR_DROP); + + priv->r->set_vlan_egr_filter(port, EGR_ENABLE); + } else { + /* Disable ingress and egress filtering */ + if (port != priv->cpu_port) + priv->r->set_vlan_igr_filter(port, IGR_FORWARD); + + priv->r->set_vlan_egr_filter(port, EGR_DISABLE); + } + + /* Do we need to do something to the CPU-Port, too? */ + mutex_unlock(&priv->reg_mutex); + + return 0; +} + +static int rtl83xx_vlan_prepare(struct dsa_switch *ds, int port, + const struct switchdev_obj_port_vlan *vlan) +{ + struct rtl838x_vlan_info info; + struct rtl838x_switch_priv *priv = ds->priv; + + priv->r->vlan_tables_read(0, &info); + + pr_debug("VLAN 0: Tagged ports %llx, untag %llx, profile %d, MC# %d, UC# %d, FID %x\n", + info.tagged_ports, info.untagged_ports, info.profile_id, + info.hash_mc_fid, info.hash_uc_fid, info.fid); + + priv->r->vlan_tables_read(1, &info); + pr_debug("VLAN 1: Tagged ports %llx, untag %llx, profile %d, MC# %d, UC# %d, FID %x\n", + info.tagged_ports, info.untagged_ports, info.profile_id, + info.hash_mc_fid, info.hash_uc_fid, info.fid); + priv->r->vlan_set_untagged(1, info.untagged_ports); + pr_debug("SET: Untagged ports, VLAN %d: %llx\n", 1, info.untagged_ports); + + priv->r->vlan_set_tagged(1, &info); + pr_debug("SET: Tagged ports, VLAN %d: %llx\n", 1, info.tagged_ports); + + return 0; +} + +static void rtl83xx_vlan_add(struct dsa_switch *ds, int port, + const struct switchdev_obj_port_vlan *vlan) +{ + struct rtl838x_vlan_info info; + struct rtl838x_switch_priv *priv = ds->priv; + int v; + + pr_debug("%s port %d, vid_begin %d, vid_end %d, flags %x\n", __func__, + port, vlan->vid_begin, vlan->vid_end, vlan->flags); + + if (vlan->vid_begin > 4095 || vlan->vid_end > 4095) { + dev_err(priv->dev, "VLAN out of range: %d - %d", + vlan->vid_begin, vlan->vid_end); + return; + } + + mutex_lock(&priv->reg_mutex); + + if (vlan->flags & BRIDGE_VLAN_INFO_PVID) { + for (v = vlan->vid_begin; v <= vlan->vid_end; v++) { + if (!v) + continue; + /* Set both inner and outer PVID of the port */ + priv->r->vlan_port_pvid_set(port, PBVLAN_TYPE_INNER, v); + priv->r->vlan_port_pvid_set(port, PBVLAN_TYPE_OUTER, v); + priv->r->vlan_port_pvidmode_set(port, PBVLAN_TYPE_INNER, + PBVLAN_MODE_UNTAG_AND_PRITAG); + priv->r->vlan_port_pvidmode_set(port, PBVLAN_TYPE_OUTER, + PBVLAN_MODE_UNTAG_AND_PRITAG); + + priv->ports[port].pvid = vlan->vid_end; + } + } + + for (v = vlan->vid_begin; v <= vlan->vid_end; v++) { + /* Get port memberships of this vlan */ + priv->r->vlan_tables_read(v, &info); + + /* new VLAN? */ + if (!info.tagged_ports) { + info.fid = 0; + info.hash_mc_fid = false; + info.hash_uc_fid = false; + info.profile_id = 0; + } + + /* sanitize untagged_ports - must be a subset */ + if (info.untagged_ports & ~info.tagged_ports) + info.untagged_ports = 0; + + info.tagged_ports |= BIT_ULL(port); + if (vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED) + info.untagged_ports |= BIT_ULL(port); + + priv->r->vlan_set_untagged(v, info.untagged_ports); + pr_debug("Untagged ports, VLAN %d: %llx\n", v, info.untagged_ports); + + priv->r->vlan_set_tagged(v, &info); + pr_debug("Tagged ports, VLAN %d: %llx\n", v, info.tagged_ports); + } + + mutex_unlock(&priv->reg_mutex); +} + +static int rtl83xx_vlan_del(struct dsa_switch *ds, int port, + const struct switchdev_obj_port_vlan *vlan) +{ + struct rtl838x_vlan_info info; + struct rtl838x_switch_priv *priv = ds->priv; + int v; + u16 pvid; + + pr_debug("%s: port %d, vid_begin %d, vid_end %d, flags %x\n", __func__, + port, vlan->vid_begin, vlan->vid_end, vlan->flags); + + if (vlan->vid_begin > 4095 || vlan->vid_end > 4095) { + dev_err(priv->dev, "VLAN out of range: %d - %d", + vlan->vid_begin, vlan->vid_end); + return -ENOTSUPP; + } + + mutex_lock(&priv->reg_mutex); + pvid = priv->ports[port].pvid; + + for (v = vlan->vid_begin; v <= vlan->vid_end; v++) { + /* Reset to default if removing the current PVID */ + if (v == pvid) { + priv->r->vlan_port_pvid_set(port, PBVLAN_TYPE_INNER, 0); + priv->r->vlan_port_pvid_set(port, PBVLAN_TYPE_OUTER, 0); + priv->r->vlan_port_pvidmode_set(port, PBVLAN_TYPE_INNER, + PBVLAN_MODE_UNTAG_AND_PRITAG); + priv->r->vlan_port_pvidmode_set(port, PBVLAN_TYPE_OUTER, + PBVLAN_MODE_UNTAG_AND_PRITAG); + } + /* Get port memberships of this vlan */ + priv->r->vlan_tables_read(v, &info); + + /* remove port from both tables */ + info.untagged_ports &= (~BIT_ULL(port)); + info.tagged_ports &= (~BIT_ULL(port)); + + priv->r->vlan_set_untagged(v, info.untagged_ports); + pr_debug("Untagged ports, VLAN %d: %llx\n", v, info.untagged_ports); + + priv->r->vlan_set_tagged(v, &info); + pr_debug("Tagged ports, VLAN %d: %llx\n", v, info.tagged_ports); + } + mutex_unlock(&priv->reg_mutex); + + return 0; +} + +static void rtl83xx_setup_l2_uc_entry(struct rtl838x_l2_entry *e, int port, int vid, u64 mac) +{ + memset(e, 0, sizeof(*e)); + + e->type = L2_UNICAST; + e->valid = true; + + e->age = 3; + e->is_static = true; + + e->port = port; + + e->rvid = e->vid = vid; + e->is_ip_mc = e->is_ipv6_mc = false; + u64_to_ether_addr(mac, e->mac); +} + +static void rtl83xx_setup_l2_mc_entry(struct rtl838x_l2_entry *e, int vid, u64 mac, int mc_group) +{ + memset(e, 0, sizeof(*e)); + + e->type = L2_MULTICAST; + e->valid = true; + + e->mc_portmask_index = mc_group; + + e->rvid = e->vid = vid; + e->is_ip_mc = e->is_ipv6_mc = false; + u64_to_ether_addr(mac, e->mac); +} + +/* + * Uses the seed to identify a hash bucket in the L2 using the derived hash key and then loops + * over the entries in the bucket until either a matching entry is found or an empty slot + * Returns the filled in rtl838x_l2_entry and the index in the bucket when an entry was found + * when an empty slot was found and must exist is false, the index of the slot is returned + * when no slots are available returns -1 + */ +static int rtl83xx_find_l2_hash_entry(struct rtl838x_switch_priv *priv, u64 seed, + bool must_exist, struct rtl838x_l2_entry *e) +{ + int i, idx = -1; + u32 key = priv->r->l2_hash_key(priv, seed); + u64 entry; + + pr_debug("%s: using key %x, for seed %016llx\n", __func__, key, seed); + // Loop over all entries in the hash-bucket and over the second block on 93xx SoCs + for (i = 0; i < priv->l2_bucket_size; i++) { + entry = priv->r->read_l2_entry_using_hash(key, i, e); + pr_debug("valid %d, mac %016llx\n", e->valid, ether_addr_to_u64(&e->mac[0])); + if (must_exist && !e->valid) + continue; + if (!e->valid || ((entry & 0x0fffffffffffffffULL) == seed)) { + idx = i > 3 ? ((key >> 14) & 0xffff) | i >> 1 : ((key << 2) | i) & 0xffff; + break; + } + } + + return idx; +} + +/* + * Uses the seed to identify an entry in the CAM by looping over all its entries + * Returns the filled in rtl838x_l2_entry and the index in the CAM when an entry was found + * when an empty slot was found the index of the slot is returned + * when no slots are available returns -1 + */ +static int rtl83xx_find_l2_cam_entry(struct rtl838x_switch_priv *priv, u64 seed, + bool must_exist, struct rtl838x_l2_entry *e) +{ + int i, idx = -1; + u64 entry; + + for (i = 0; i < 64; i++) { + entry = priv->r->read_cam(i, e); + if (!must_exist && !e->valid) { + if (idx < 0) /* First empty entry? */ + idx = i; + break; + } else if ((entry & 0x0fffffffffffffffULL) == seed) { + pr_debug("Found entry in CAM\n"); + idx = i; + break; + } + } + return idx; +} + +static int rtl83xx_port_fdb_add(struct dsa_switch *ds, int port, + const unsigned char *addr, u16 vid) +{ + struct rtl838x_switch_priv *priv = ds->priv; + u64 mac = ether_addr_to_u64(addr); + struct rtl838x_l2_entry e; + int err = 0, idx; + u64 seed = priv->r->l2_hash_seed(mac, vid); + + if (priv->is_lagmember[port]) { + pr_debug("%s: %d is lag slave. ignore\n", __func__, port); + return 0; + } + + mutex_lock(&priv->reg_mutex); + + idx = rtl83xx_find_l2_hash_entry(priv, seed, false, &e); + + // Found an existing or empty entry + if (idx >= 0) { + rtl83xx_setup_l2_uc_entry(&e, port, vid, mac); + priv->r->write_l2_entry_using_hash(idx >> 2, idx & 0x3, &e); + goto out; + } + + // Hash buckets full, try CAM + rtl83xx_find_l2_cam_entry(priv, seed, false, &e); + + if (idx >= 0) { + rtl83xx_setup_l2_uc_entry(&e, port, vid, mac); + priv->r->write_cam(idx, &e); + goto out; + } + + err = -ENOTSUPP; +out: + mutex_unlock(&priv->reg_mutex); + return err; +} + +static int rtl83xx_port_fdb_del(struct dsa_switch *ds, int port, + const unsigned char *addr, u16 vid) +{ + struct rtl838x_switch_priv *priv = ds->priv; + u64 mac = ether_addr_to_u64(addr); + struct rtl838x_l2_entry e; + int err = 0, idx; + u64 seed = priv->r->l2_hash_seed(mac, vid); + + pr_debug("In %s, mac %llx, vid: %d\n", __func__, mac, vid); + mutex_lock(&priv->reg_mutex); + + idx = rtl83xx_find_l2_hash_entry(priv, seed, true, &e); + + if (idx >= 0) { + pr_debug("Found entry index %d, key %d and bucket %d\n", idx, idx >> 2, idx & 3); + e.valid = false; + priv->r->write_l2_entry_using_hash(idx >> 2, idx & 0x3, &e); + goto out; + } + + /* Check CAM for spillover from hash buckets */ + rtl83xx_find_l2_cam_entry(priv, seed, true, &e); + + if (idx >= 0) { + e.valid = false; + priv->r->write_cam(idx, &e); + goto out; + } + err = -ENOENT; +out: + mutex_unlock(&priv->reg_mutex); + return err; +} + +static int rtl83xx_port_fdb_dump(struct dsa_switch *ds, int port, + dsa_fdb_dump_cb_t *cb, void *data) +{ + struct rtl838x_l2_entry e; + struct rtl838x_switch_priv *priv = ds->priv; + int i; + + mutex_lock(&priv->reg_mutex); + + for (i = 0; i < priv->fib_entries; i++) { + priv->r->read_l2_entry_using_hash(i >> 2, i & 0x3, &e); + + if (!e.valid) + continue; + + if (e.port == port || e.port == RTL930X_PORT_IGNORE) + cb(e.mac, e.vid, e.is_static, data); + } + + for (i = 0; i < 64; i++) { + priv->r->read_cam(i, &e); + + if (!e.valid) + continue; + + if (e.port == port) + cb(e.mac, e.vid, e.is_static, data); + } + + mutex_unlock(&priv->reg_mutex); + return 0; +} + +static int rtl83xx_port_mdb_prepare(struct dsa_switch *ds, int port, + const struct switchdev_obj_port_mdb *mdb) +{ + struct rtl838x_switch_priv *priv = ds->priv; + + if (priv->id >= 0x9300) + return -EOPNOTSUPP; + + return 0; +} + +static void rtl83xx_port_mdb_add(struct dsa_switch *ds, int port, + const struct switchdev_obj_port_mdb *mdb) +{ + struct rtl838x_switch_priv *priv = ds->priv; + u64 mac = ether_addr_to_u64(mdb->addr); + struct rtl838x_l2_entry e; + int err = 0, idx; + int vid = mdb->vid; + u64 seed = priv->r->l2_hash_seed(mac, vid); + int mc_group; + + pr_debug("In %s port %d, mac %llx, vid: %d\n", __func__, port, mac, vid); + + if (priv->is_lagmember[port]) { + pr_debug("%s: %d is lag slave. ignore\n", __func__, port); + return; + } + + mutex_lock(&priv->reg_mutex); + + idx = rtl83xx_find_l2_hash_entry(priv, seed, false, &e); + + // Found an existing or empty entry + if (idx >= 0) { + if (e.valid) { + pr_debug("Found an existing entry %016llx, mc_group %d\n", + ether_addr_to_u64(e.mac), e.mc_portmask_index); + rtl83xx_mc_group_add_port(priv, e.mc_portmask_index, port); + } else { + pr_debug("New entry for seed %016llx\n", seed); + mc_group = rtl83xx_mc_group_alloc(priv, port); + if (mc_group < 0) { + err = -ENOTSUPP; + goto out; + } + rtl83xx_setup_l2_mc_entry(&e, vid, mac, mc_group); + priv->r->write_l2_entry_using_hash(idx >> 2, idx & 0x3, &e); + } + goto out; + } + + // Hash buckets full, try CAM + rtl83xx_find_l2_cam_entry(priv, seed, false, &e); + + if (idx >= 0) { + if (e.valid) { + pr_debug("Found existing CAM entry %016llx, mc_group %d\n", + ether_addr_to_u64(e.mac), e.mc_portmask_index); + rtl83xx_mc_group_add_port(priv, e.mc_portmask_index, port); + } else { + pr_debug("New entry\n"); + mc_group = rtl83xx_mc_group_alloc(priv, port); + if (mc_group < 0) { + err = -ENOTSUPP; + goto out; + } + rtl83xx_setup_l2_mc_entry(&e, vid, mac, mc_group); + priv->r->write_cam(idx, &e); + } + goto out; + } + + err = -ENOTSUPP; +out: + mutex_unlock(&priv->reg_mutex); + if (err) + dev_err(ds->dev, "failed to add MDB entry\n"); +} + +int rtl83xx_port_mdb_del(struct dsa_switch *ds, int port, + const struct switchdev_obj_port_mdb *mdb) +{ + struct rtl838x_switch_priv *priv = ds->priv; + u64 mac = ether_addr_to_u64(mdb->addr); + struct rtl838x_l2_entry e; + int err = 0, idx; + int vid = mdb->vid; + u64 seed = priv->r->l2_hash_seed(mac, vid); + u64 portmask; + + pr_debug("In %s, port %d, mac %llx, vid: %d\n", __func__, port, mac, vid); + + if (priv->is_lagmember[port]) { + pr_info("%s: %d is lag slave. ignore\n", __func__, port); + return 0; + } + + mutex_lock(&priv->reg_mutex); + + idx = rtl83xx_find_l2_hash_entry(priv, seed, true, &e); + + if (idx >= 0) { + pr_debug("Found entry index %d, key %d and bucket %d\n", idx, idx >> 2, idx & 3); + portmask = rtl83xx_mc_group_del_port(priv, e.mc_portmask_index, port); + if (!portmask) { + e.valid = false; + priv->r->write_l2_entry_using_hash(idx >> 2, idx & 0x3, &e); + } + goto out; + } + + /* Check CAM for spillover from hash buckets */ + rtl83xx_find_l2_cam_entry(priv, seed, true, &e); + + if (idx >= 0) { + portmask = rtl83xx_mc_group_del_port(priv, e.mc_portmask_index, port); + if (!portmask) { + e.valid = false; + priv->r->write_cam(idx, &e); + } + goto out; + } + // TODO: Re-enable with a newer kernel: err = -ENOENT; +out: + mutex_unlock(&priv->reg_mutex); + return err; +} + +static int rtl83xx_port_mirror_add(struct dsa_switch *ds, int port, + struct dsa_mall_mirror_tc_entry *mirror, + bool ingress) +{ + /* We support 4 mirror groups, one destination port per group */ + int group; + struct rtl838x_switch_priv *priv = ds->priv; + int ctrl_reg, dpm_reg, spm_reg; + + pr_debug("In %s\n", __func__); + + for (group = 0; group < 4; group++) { + if (priv->mirror_group_ports[group] == mirror->to_local_port) + break; + } + if (group >= 4) { + for (group = 0; group < 4; group++) { + if (priv->mirror_group_ports[group] < 0) + break; + } + } + + if (group >= 4) + return -ENOSPC; + + ctrl_reg = priv->r->mir_ctrl + group * 4; + dpm_reg = priv->r->mir_dpm + group * 4 * priv->port_width; + spm_reg = priv->r->mir_spm + group * 4 * priv->port_width; + + pr_debug("Using group %d\n", group); + mutex_lock(&priv->reg_mutex); + + if (priv->family_id == RTL8380_FAMILY_ID) { + /* Enable mirroring to port across VLANs (bit 11) */ + sw_w32(1 << 11 | (mirror->to_local_port << 4) | 1, ctrl_reg); + } else { + /* Enable mirroring to destination port */ + sw_w32((mirror->to_local_port << 4) | 1, ctrl_reg); + } + + if (ingress && (priv->r->get_port_reg_be(spm_reg) & (1ULL << port))) { + mutex_unlock(&priv->reg_mutex); + return -EEXIST; + } + if ((!ingress) && (priv->r->get_port_reg_be(dpm_reg) & (1ULL << port))) { + mutex_unlock(&priv->reg_mutex); + return -EEXIST; + } + + if (ingress) + priv->r->mask_port_reg_be(0, 1ULL << port, spm_reg); + else + priv->r->mask_port_reg_be(0, 1ULL << port, dpm_reg); + + priv->mirror_group_ports[group] = mirror->to_local_port; + mutex_unlock(&priv->reg_mutex); + return 0; +} + +static void rtl83xx_port_mirror_del(struct dsa_switch *ds, int port, + struct dsa_mall_mirror_tc_entry *mirror) +{ + int group = 0; + struct rtl838x_switch_priv *priv = ds->priv; + int ctrl_reg, dpm_reg, spm_reg; + + pr_debug("In %s\n", __func__); + for (group = 0; group < 4; group++) { + if (priv->mirror_group_ports[group] == mirror->to_local_port) + break; + } + if (group >= 4) + return; + + ctrl_reg = priv->r->mir_ctrl + group * 4; + dpm_reg = priv->r->mir_dpm + group * 4 * priv->port_width; + spm_reg = priv->r->mir_spm + group * 4 * priv->port_width; + + mutex_lock(&priv->reg_mutex); + if (mirror->ingress) { + /* Ingress, clear source port matrix */ + priv->r->mask_port_reg_be(1ULL << port, 0, spm_reg); + } else { + /* Egress, clear destination port matrix */ + priv->r->mask_port_reg_be(1ULL << port, 0, dpm_reg); + } + + if (!(sw_r32(spm_reg) || sw_r32(dpm_reg))) { + priv->mirror_group_ports[group] = -1; + sw_w32(0, ctrl_reg); + } + + mutex_unlock(&priv->reg_mutex); +} + +static int rtl83xx_port_pre_bridge_flags(struct dsa_switch *ds, int port, unsigned long flags, struct netlink_ext_ack *extack) +{ + struct rtl838x_switch_priv *priv = ds->priv; + unsigned long features = 0; + pr_debug("%s: %d %lX\n", __func__, port, flags); + if (priv->r->enable_learning) + features |= BR_LEARNING; + if (priv->r->enable_flood) + features |= BR_FLOOD; + if (priv->r->enable_mcast_flood) + features |= BR_MCAST_FLOOD; + if (priv->r->enable_bcast_flood) + features |= BR_BCAST_FLOOD; + if (flags & ~(features)) + return -EINVAL; + + return 0; +} + +static int rtl83xx_port_bridge_flags(struct dsa_switch *ds, int port, unsigned long flags, struct netlink_ext_ack *extack) +{ + struct rtl838x_switch_priv *priv = ds->priv; + + pr_debug("%s: %d %lX\n", __func__, port, flags); + if (priv->r->enable_learning) + priv->r->enable_learning(port, !!(flags & BR_LEARNING)); + + if (priv->r->enable_flood) + priv->r->enable_flood(port, !!(flags & BR_FLOOD)); + + if (priv->r->enable_mcast_flood) + priv->r->enable_mcast_flood(port, !!(flags & BR_MCAST_FLOOD)); + + if (priv->r->enable_bcast_flood) + priv->r->enable_bcast_flood(port, !!(flags & BR_BCAST_FLOOD)); + + return 0; +} + +static bool rtl83xx_lag_can_offload(struct dsa_switch *ds, + struct net_device *lag, + struct netdev_lag_upper_info *info) +{ + int id; + + id = dsa_lag_id(ds->dst, lag); + if (id < 0 || id >= ds->num_lag_ids) + return false; + + if (info->tx_type != NETDEV_LAG_TX_TYPE_HASH) { + return false; + } + if (info->hash_type != NETDEV_LAG_HASH_L2 && info->hash_type != NETDEV_LAG_HASH_L23) + return false; + + return true; +} + +static int rtl83xx_port_lag_change(struct dsa_switch *ds, int port) +{ + struct rtl838x_switch_priv *priv = ds->priv; + + pr_debug("%s: %d\n", __func__, port); + // Nothing to be done... + + return 0; +} + +static int rtl83xx_port_lag_join(struct dsa_switch *ds, int port, + struct net_device *lag, + struct netdev_lag_upper_info *info) +{ + struct rtl838x_switch_priv *priv = ds->priv; + int i, err = 0; + + if (!rtl83xx_lag_can_offload(ds, lag, info)) + return -EOPNOTSUPP; + + mutex_lock(&priv->reg_mutex); + + for (i = 0; i < priv->n_lags; i++) { + if ((!priv->lag_devs[i]) || (priv->lag_devs[i] == lag)) + break; + } + if (port >= priv->cpu_port) { + err = -EINVAL; + goto out; + } + pr_info("port_lag_join: group %d, port %d\n",i, port); + if (!priv->lag_devs[i]) + priv->lag_devs[i] = lag; + + if (priv->lag_primary[i]==-1) { + priv->lag_primary[i]=port; + } else + priv->is_lagmember[port] = 1; + + priv->lagmembers |= (1ULL << port); + + pr_debug("lag_members = %llX\n", priv->lagmembers); + err = rtl83xx_lag_add(priv->ds, i, port, info); + if (err) { + err = -EINVAL; + goto out; + } + +out: + mutex_unlock(&priv->reg_mutex); + return err; + +} + +static int rtl83xx_port_lag_leave(struct dsa_switch *ds, int port, + struct net_device *lag) +{ + int i, group = -1, err; + struct rtl838x_switch_priv *priv = ds->priv; + + mutex_lock(&priv->reg_mutex); + for (i=0;in_lags;i++) { + if (priv->lags_port_members[i] & BIT_ULL(port)) { + group = i; + break; + } + } + + if (group == -1) { + pr_info("port_lag_leave: port %d is not a member\n", port); + err = -EINVAL; + goto out; + } + + if (port >= priv->cpu_port) { + err = -EINVAL; + goto out; + } + pr_info("port_lag_del: group %d, port %d\n",group, port); + priv->lagmembers &=~ (1ULL << port); + priv->lag_primary[i] = -1; + priv->is_lagmember[port] = 0; + pr_debug("lag_members = %llX\n", priv->lagmembers); + err = rtl83xx_lag_del(priv->ds, group, port); + if (err) { + err = -EINVAL; + goto out; + } + if (!priv->lags_port_members[i]) + priv->lag_devs[i] = NULL; + +out: + mutex_unlock(&priv->reg_mutex); + return 0; +} + +int dsa_phy_read(struct dsa_switch *ds, int phy_addr, int phy_reg) +{ + u32 val; + u32 offset = 0; + struct rtl838x_switch_priv *priv = ds->priv; + + if (phy_addr >= 24 && phy_addr <= 27 + && priv->ports[24].phy == PHY_RTL838X_SDS) { + if (phy_addr == 26) + offset = 0x100; + val = sw_r32(RTL838X_SDS4_FIB_REG0 + offset + (phy_reg << 2)) & 0xffff; + return val; + } + + read_phy(phy_addr, 0, phy_reg, &val); + return val; +} + +int dsa_phy_write(struct dsa_switch *ds, int phy_addr, int phy_reg, u16 val) +{ + u32 offset = 0; + struct rtl838x_switch_priv *priv = ds->priv; + + if (phy_addr >= 24 && phy_addr <= 27 + && priv->ports[24].phy == PHY_RTL838X_SDS) { + if (phy_addr == 26) + offset = 0x100; + sw_w32(val, RTL838X_SDS4_FIB_REG0 + offset + (phy_reg << 2)); + return 0; + } + return write_phy(phy_addr, 0, phy_reg, val); +} + +const struct dsa_switch_ops rtl83xx_switch_ops = { + .get_tag_protocol = rtl83xx_get_tag_protocol, + .setup = rtl83xx_setup, + + .phy_read = dsa_phy_read, + .phy_write = dsa_phy_write, + + .phylink_validate = rtl83xx_phylink_validate, + .phylink_mac_link_state = rtl83xx_phylink_mac_link_state, + .phylink_mac_config = rtl83xx_phylink_mac_config, + .phylink_mac_link_down = rtl83xx_phylink_mac_link_down, + .phylink_mac_link_up = rtl83xx_phylink_mac_link_up, + + .get_strings = rtl83xx_get_strings, + .get_ethtool_stats = rtl83xx_get_ethtool_stats, + .get_sset_count = rtl83xx_get_sset_count, + + .port_enable = rtl83xx_port_enable, + .port_disable = rtl83xx_port_disable, + + .get_mac_eee = rtl83xx_get_mac_eee, + .set_mac_eee = rtl83xx_set_mac_eee, + + .set_ageing_time = rtl83xx_set_ageing_time, + .port_bridge_join = rtl83xx_port_bridge_join, + .port_bridge_leave = rtl83xx_port_bridge_leave, + .port_stp_state_set = rtl83xx_port_stp_state_set, + .port_fast_age = rtl83xx_fast_age, + + .port_vlan_filtering = rtl83xx_vlan_filtering, + .port_vlan_prepare = rtl83xx_vlan_prepare, + .port_vlan_add = rtl83xx_vlan_add, + .port_vlan_del = rtl83xx_vlan_del, + + .port_fdb_add = rtl83xx_port_fdb_add, + .port_fdb_del = rtl83xx_port_fdb_del, + .port_fdb_dump = rtl83xx_port_fdb_dump, + + .port_mdb_prepare = rtl83xx_port_mdb_prepare, + .port_mdb_add = rtl83xx_port_mdb_add, + .port_mdb_del = rtl83xx_port_mdb_del, + + .port_mirror_add = rtl83xx_port_mirror_add, + .port_mirror_del = rtl83xx_port_mirror_del, + + .port_lag_change = rtl83xx_port_lag_change, + .port_lag_join = rtl83xx_port_lag_join, + .port_lag_leave = rtl83xx_port_lag_leave, + + .port_pre_bridge_flags = rtl83xx_port_pre_bridge_flags, + .port_bridge_flags = rtl83xx_port_bridge_flags, +}; + +const struct dsa_switch_ops rtl930x_switch_ops = { + .get_tag_protocol = rtl83xx_get_tag_protocol, + .setup = rtl93xx_setup, + + .phy_read = dsa_phy_read, + .phy_write = dsa_phy_write, + + .phylink_validate = rtl93xx_phylink_validate, + .phylink_mac_link_state = rtl93xx_phylink_mac_link_state, + .phylink_mac_config = rtl93xx_phylink_mac_config, + .phylink_mac_link_down = rtl93xx_phylink_mac_link_down, + .phylink_mac_link_up = rtl93xx_phylink_mac_link_up, + + .get_strings = rtl83xx_get_strings, + .get_ethtool_stats = rtl83xx_get_ethtool_stats, + .get_sset_count = rtl83xx_get_sset_count, + + .port_enable = rtl83xx_port_enable, + .port_disable = rtl83xx_port_disable, + + .get_mac_eee = rtl93xx_get_mac_eee, + .set_mac_eee = rtl83xx_set_mac_eee, + + .set_ageing_time = rtl83xx_set_ageing_time, + .port_bridge_join = rtl83xx_port_bridge_join, + .port_bridge_leave = rtl83xx_port_bridge_leave, + .port_stp_state_set = rtl83xx_port_stp_state_set, + .port_fast_age = rtl930x_fast_age, + + .port_vlan_filtering = rtl83xx_vlan_filtering, + .port_vlan_prepare = rtl83xx_vlan_prepare, + .port_vlan_add = rtl83xx_vlan_add, + .port_vlan_del = rtl83xx_vlan_del, + + .port_fdb_add = rtl83xx_port_fdb_add, + .port_fdb_del = rtl83xx_port_fdb_del, + .port_fdb_dump = rtl83xx_port_fdb_dump, + + .port_mdb_prepare = rtl83xx_port_mdb_prepare, + .port_mdb_add = rtl83xx_port_mdb_add, + .port_mdb_del = rtl83xx_port_mdb_del, + + .port_lag_change = rtl83xx_port_lag_change, + .port_lag_join = rtl83xx_port_lag_join, + .port_lag_leave = rtl83xx_port_lag_leave, + + .port_pre_bridge_flags = rtl83xx_port_pre_bridge_flags, + .port_bridge_flags = rtl83xx_port_bridge_flags, +}; diff --git a/target/linux/realtek/files-5.15/drivers/net/dsa/rtl83xx/qos.c b/target/linux/realtek/files-5.15/drivers/net/dsa/rtl83xx/qos.c new file mode 100644 index 0000000000..2fc8d37f3e --- /dev/null +++ b/target/linux/realtek/files-5.15/drivers/net/dsa/rtl83xx/qos.c @@ -0,0 +1,576 @@ +// SPDX-License-Identifier: GPL-2.0-only + +#include +#include + +#include +#include "rtl83xx.h" + +static struct rtl838x_switch_priv *switch_priv; +extern struct rtl83xx_soc_info soc_info; + +enum scheduler_type { + WEIGHTED_FAIR_QUEUE = 0, + WEIGHTED_ROUND_ROBIN, +}; + +int max_available_queue[] = {0, 1, 2, 3, 4, 5, 6, 7}; +int default_queue_weights[] = {1, 1, 1, 1, 1, 1, 1, 1}; +int dot1p_priority_remapping[] = {0, 1, 2, 3, 4, 5, 6, 7}; + +static void rtl839x_read_scheduling_table(int port) +{ + u32 cmd = 1 << 9 /* Execute cmd */ + | 0 << 8 /* Read */ + | 0 << 6 /* Table type 0b00 */ + | (port & 0x3f); + rtl839x_exec_tbl2_cmd(cmd); +} + +static void rtl839x_write_scheduling_table(int port) +{ + u32 cmd = 1 << 9 /* Execute cmd */ + | 1 << 8 /* Write */ + | 0 << 6 /* Table type 0b00 */ + | (port & 0x3f); + rtl839x_exec_tbl2_cmd(cmd); +} + +static void rtl839x_read_out_q_table(int port) +{ + u32 cmd = 1 << 9 /* Execute cmd */ + | 0 << 8 /* Read */ + | 2 << 6 /* Table type 0b10 */ + | (port & 0x3f); + rtl839x_exec_tbl2_cmd(cmd); +} + +static void rtl838x_storm_enable(struct rtl838x_switch_priv *priv, int port, bool enable) +{ + // Enable Storm control for that port for UC, MC, and BC + if (enable) + sw_w32(0x7, RTL838X_STORM_CTRL_LB_CTRL(port)); + else + sw_w32(0x0, RTL838X_STORM_CTRL_LB_CTRL(port)); +} + +u32 rtl838x_get_egress_rate(struct rtl838x_switch_priv *priv, int port) +{ + u32 rate; + + if (port > priv->cpu_port) + return 0; + rate = sw_r32(RTL838X_SCHED_P_EGR_RATE_CTRL(port)) & 0x3fff; + return rate; +} + +/* Sets the rate limit, 10MBit/s is equal to a rate value of 625 */ +int rtl838x_set_egress_rate(struct rtl838x_switch_priv *priv, int port, u32 rate) +{ + u32 old_rate; + + if (port > priv->cpu_port) + return -1; + + old_rate = sw_r32(RTL838X_SCHED_P_EGR_RATE_CTRL(port)); + sw_w32(rate, RTL838X_SCHED_P_EGR_RATE_CTRL(port)); + + return old_rate; +} + +/* Set the rate limit for a particular queue in Bits/s + * units of the rate is 16Kbps + */ +void rtl838x_egress_rate_queue_limit(struct rtl838x_switch_priv *priv, int port, + int queue, u32 rate) +{ + if (port > priv->cpu_port) + return; + if (queue > 7) + return; + sw_w32(rate, RTL838X_SCHED_Q_EGR_RATE_CTRL(port, queue)); +} + +static void rtl838x_rate_control_init(struct rtl838x_switch_priv *priv) +{ + int i; + + pr_info("Enabling Storm control\n"); + // TICK_PERIOD_PPS + if (priv->id == 0x8380) + sw_w32_mask(0x3ff << 20, 434 << 20, RTL838X_SCHED_LB_TICK_TKN_CTRL_0); + + // Set burst rate + sw_w32(0x00008000, RTL838X_STORM_CTRL_BURST_0); // UC + sw_w32(0x80008000, RTL838X_STORM_CTRL_BURST_1); // MC and BC + + // Set burst Packets per Second to 32 + sw_w32(0x00000020, RTL838X_STORM_CTRL_BURST_PPS_0); // UC + sw_w32(0x00200020, RTL838X_STORM_CTRL_BURST_PPS_1); // MC and BC + + // Include IFG in storm control, rate based on bytes/s (0 = packets) + sw_w32_mask(0, 1 << 6 | 1 << 5, RTL838X_STORM_CTRL); + // Bandwidth control includes preamble and IFG (10 Bytes) + sw_w32_mask(0, 1, RTL838X_SCHED_CTRL); + + // On SoCs except RTL8382M, set burst size of port egress + if (priv->id != 0x8382) + sw_w32_mask(0xffff, 0x800, RTL838X_SCHED_LB_THR); + + /* Enable storm control on all ports with a PHY and limit rates, + * for UC and MC for both known and unknown addresses */ + for (i = 0; i < priv->cpu_port; i++) { + if (priv->ports[i].phy) { + sw_w32((1 << 18) | 0x8000, RTL838X_STORM_CTRL_PORT_UC(i)); + sw_w32((1 << 18) | 0x8000, RTL838X_STORM_CTRL_PORT_MC(i)); + sw_w32(0x8000, RTL838X_STORM_CTRL_PORT_BC(i)); + rtl838x_storm_enable(priv, i, true); + } + } + + // Attack prevention, enable all attack prevention measures + //sw_w32(0x1ffff, RTL838X_ATK_PRVNT_CTRL); + /* Attack prevention, drop (bit = 0) problematic packets on all ports. + * Setting bit = 1 means: trap to CPU + */ + //sw_w32(0, RTL838X_ATK_PRVNT_ACT); + // Enable attack prevention on all ports + //sw_w32(0x0fffffff, RTL838X_ATK_PRVNT_PORT_EN); +} + +/* Sets the rate limit, 10MBit/s is equal to a rate value of 625 */ +u32 rtl839x_get_egress_rate(struct rtl838x_switch_priv *priv, int port) +{ + u32 rate; + + pr_debug("%s: Getting egress rate on port %d to %d\n", __func__, port, rate); + if (port >= priv->cpu_port) + return 0; + + mutex_lock(&priv->reg_mutex); + + rtl839x_read_scheduling_table(port); + + rate = sw_r32(RTL839X_TBL_ACCESS_DATA_2(7)); + rate <<= 12; + rate |= sw_r32(RTL839X_TBL_ACCESS_DATA_2(8)) >> 20; + + mutex_unlock(&priv->reg_mutex); + + return rate; +} + +/* Sets the rate limit, 10MBit/s is equal to a rate value of 625, returns previous rate */ +int rtl839x_set_egress_rate(struct rtl838x_switch_priv *priv, int port, u32 rate) +{ + u32 old_rate; + + pr_debug("%s: Setting egress rate on port %d to %d\n", __func__, port, rate); + if (port >= priv->cpu_port) + return -1; + + mutex_lock(&priv->reg_mutex); + + rtl839x_read_scheduling_table(port); + + old_rate = sw_r32(RTL839X_TBL_ACCESS_DATA_2(7)) & 0xff; + old_rate <<= 12; + old_rate |= sw_r32(RTL839X_TBL_ACCESS_DATA_2(8)) >> 20; + sw_w32_mask(0xff, (rate >> 12) & 0xff, RTL839X_TBL_ACCESS_DATA_2(7)); + sw_w32_mask(0xfff << 20, rate << 20, RTL839X_TBL_ACCESS_DATA_2(8)); + + rtl839x_write_scheduling_table(port); + + mutex_unlock(&priv->reg_mutex); + + return old_rate; +} + +/* Set the rate limit for a particular queue in Bits/s + * units of the rate is 16Kbps + */ +void rtl839x_egress_rate_queue_limit(struct rtl838x_switch_priv *priv, int port, + int queue, u32 rate) +{ + int lsb = 128 + queue * 20; + int low_byte = 8 - (lsb >> 5); + int start_bit = lsb - (low_byte << 5); + u32 high_mask = 0xfffff >> (32 - start_bit); + + pr_debug("%s: Setting egress rate on port %d, queue %d to %d\n", + __func__, port, queue, rate); + if (port >= priv->cpu_port) + return; + if (queue > 7) + return; + + mutex_lock(&priv->reg_mutex); + + rtl839x_read_scheduling_table(port); + + sw_w32_mask(0xfffff << start_bit, (rate & 0xfffff) << start_bit, + RTL839X_TBL_ACCESS_DATA_2(low_byte)); + if (high_mask) + sw_w32_mask(high_mask, (rate & 0xfffff) >> (32- start_bit), + RTL839X_TBL_ACCESS_DATA_2(low_byte - 1)); + + rtl839x_write_scheduling_table(port); + + mutex_unlock(&priv->reg_mutex); +} + +static void rtl839x_rate_control_init(struct rtl838x_switch_priv *priv) +{ + int p, q; + + pr_info("%s: enabling rate control\n", __func__); + /* Tick length and token size settings for SoC with 250MHz, + * RTL8350 family would use 50MHz + */ + // Set the special tick period + sw_w32(976563, RTL839X_STORM_CTRL_SPCL_LB_TICK_TKN_CTRL); + // Ingress tick period and token length 10G + sw_w32(18 << 11 | 151, RTL839X_IGR_BWCTRL_LB_TICK_TKN_CTRL_0); + // Ingress tick period and token length 1G + sw_w32(245 << 11 | 129, RTL839X_IGR_BWCTRL_LB_TICK_TKN_CTRL_1); + // Egress tick period 10G, bytes/token 10G and tick period 1G, bytes/token 1G + sw_w32(18 << 24 | 151 << 16 | 185 << 8 | 97, RTL839X_SCHED_LB_TICK_TKN_CTRL); + // Set the tick period of the CPU and the Token Len + sw_w32(3815 << 8 | 1, RTL839X_SCHED_LB_TICK_TKN_PPS_CTRL); + + // Set the Weighted Fair Queueing burst size + sw_w32_mask(0xffff, 4500, RTL839X_SCHED_LB_THR); + + // Storm-rate calculation is based on bytes/sec (bit 5), include IFG (bit 6) + sw_w32_mask(0, 1 << 5 | 1 << 6, RTL839X_STORM_CTRL); + + /* Based on the rate control mode being bytes/s + * set tick period and token length for 10G + */ + sw_w32(18 << 10 | 151, RTL839X_STORM_CTRL_LB_TICK_TKN_CTRL_0); + /* and for 1G ports */ + sw_w32(246 << 10 | 129, RTL839X_STORM_CTRL_LB_TICK_TKN_CTRL_1); + + /* Set default burst rates on all ports (the same for 1G / 10G) with a PHY + * for UC, MC and BC + * For 1G port, the minimum burst rate is 1700, maximum 65535, + * For 10G ports it is 2650 and 1048575 respectively */ + for (p = 0; p < priv->cpu_port; p++) { + if (priv->ports[p].phy && !priv->ports[p].is10G) { + sw_w32_mask(0xffff, 0x8000, RTL839X_STORM_CTRL_PORT_UC_1(p)); + sw_w32_mask(0xffff, 0x8000, RTL839X_STORM_CTRL_PORT_MC_1(p)); + sw_w32_mask(0xffff, 0x8000, RTL839X_STORM_CTRL_PORT_BC_1(p)); + } + } + + /* Setup ingress/egress per-port rate control */ + for (p = 0; p < priv->cpu_port; p++) { + if (!priv->ports[p].phy) + continue; + + if (priv->ports[p].is10G) + rtl839x_set_egress_rate(priv, p, 625000); // 10GB/s + else + rtl839x_set_egress_rate(priv, p, 62500); // 1GB/s + + // Setup queues: all RTL83XX SoCs have 8 queues, maximum rate + for (q = 0; q < 8; q++) + rtl839x_egress_rate_queue_limit(priv, p, q, 0xfffff); + + if (priv->ports[p].is10G) { + // Set high threshold to maximum + sw_w32_mask(0xffff, 0xffff, RTL839X_IGR_BWCTRL_PORT_CTRL_10G_0(p)); + } else { + // Set high threshold to maximum + sw_w32_mask(0xffff, 0xffff, RTL839X_IGR_BWCTRL_PORT_CTRL_1(p)); + } + } + + // Set global ingress low watermark rate + sw_w32(65532, RTL839X_IGR_BWCTRL_CTRL_LB_THR); +} + + + +void rtl838x_setup_prio2queue_matrix(int *min_queues) +{ + int i; + u32 v; + + pr_info("Current Intprio2queue setting: %08x\n", sw_r32(RTL838X_QM_INTPRI2QID_CTRL)); + for (i = 0; i < MAX_PRIOS; i++) + v |= i << (min_queues[i] * 3); + sw_w32(v, RTL838X_QM_INTPRI2QID_CTRL); +} + +void rtl839x_setup_prio2queue_matrix(int *min_queues) +{ + int i, q; + + pr_info("Current Intprio2queue setting: %08x\n", sw_r32(RTL839X_QM_INTPRI2QID_CTRL(0))); + for (i = 0; i < MAX_PRIOS; i++) { + q = min_queues[i]; + sw_w32(i << (q * 3), RTL839X_QM_INTPRI2QID_CTRL(q)); + } +} + +/* Sets the CPU queue depending on the internal priority of a packet */ +void rtl83xx_setup_prio2queue_cpu_matrix(int *max_queues) +{ + int reg = soc_info.family == RTL8380_FAMILY_ID ? RTL838X_QM_PKT2CPU_INTPRI_MAP + : RTL839X_QM_PKT2CPU_INTPRI_MAP; + int i; + u32 v; + + pr_info("QM_PKT2CPU_INTPRI_MAP: %08x\n", sw_r32(reg)); + for (i = 0; i < MAX_PRIOS; i++) + v |= max_queues[i] << (i * 3); + sw_w32(v, reg); +} + +void rtl83xx_setup_default_prio2queue(void) +{ + if (soc_info.family == RTL8380_FAMILY_ID) { + rtl838x_setup_prio2queue_matrix(max_available_queue); + } else { + rtl839x_setup_prio2queue_matrix(max_available_queue); + } + rtl83xx_setup_prio2queue_cpu_matrix(max_available_queue); +} + +/* Sets the output queue assigned to a port, the port can be the CPU-port */ +void rtl839x_set_egress_queue(int port, int queue) +{ + sw_w32(queue << ((port % 10) *3), RTL839X_QM_PORT_QNUM(port)); +} + +/* Sets the priority assigned of an ingress port, the port can be the CPU-port */ +void rtl83xx_set_ingress_priority(int port, int priority) +{ + if (soc_info.family == RTL8380_FAMILY_ID) + sw_w32(priority << ((port % 10) *3), RTL838X_PRI_SEL_PORT_PRI(port)); + else + sw_w32(priority << ((port % 10) *3), RTL839X_PRI_SEL_PORT_PRI(port)); + +} + +int rtl839x_get_scheduling_algorithm(struct rtl838x_switch_priv *priv, int port) +{ + u32 v; + + mutex_lock(&priv->reg_mutex); + + rtl839x_read_scheduling_table(port); + v = sw_r32(RTL839X_TBL_ACCESS_DATA_2(8)); + + mutex_unlock(&priv->reg_mutex); + + if (v & BIT(19)) + return WEIGHTED_ROUND_ROBIN; + return WEIGHTED_FAIR_QUEUE; +} + +void rtl839x_set_scheduling_algorithm(struct rtl838x_switch_priv *priv, int port, + enum scheduler_type sched) +{ + enum scheduler_type t = rtl839x_get_scheduling_algorithm(priv, port); + u32 v, oam_state, oam_port_state; + u32 count; + int i, egress_rate; + + mutex_lock(&priv->reg_mutex); + /* Check whether we need to empty the egress queue of that port due to Errata E0014503 */ + if (sched == WEIGHTED_FAIR_QUEUE && t == WEIGHTED_ROUND_ROBIN && port != priv->cpu_port) { + // Read Operations, Adminstatrion and Management control register + oam_state = sw_r32(RTL839X_OAM_CTRL); + + // Get current OAM state + oam_port_state = sw_r32(RTL839X_OAM_PORT_ACT_CTRL(port)); + + // Disable OAM to block traffice + v = sw_r32(RTL839X_OAM_CTRL); + sw_w32_mask(0, 1, RTL839X_OAM_CTRL); + v = sw_r32(RTL839X_OAM_CTRL); + + // Set to trap action OAM forward (bits 1, 2) and OAM Mux Action Drop (bit 0) + sw_w32(0x2, RTL839X_OAM_PORT_ACT_CTRL(port)); + + // Set port egress rate to unlimited + egress_rate = rtl839x_set_egress_rate(priv, port, 0xFFFFF); + + // Wait until the egress used page count of that port is 0 + i = 0; + do { + usleep_range(100, 200); + rtl839x_read_out_q_table(port); + count = sw_r32(RTL839X_TBL_ACCESS_DATA_2(6)); + count >>= 20; + i++; + } while (i < 3500 && count > 0); + } + + // Actually set the scheduling algorithm + rtl839x_read_scheduling_table(port); + sw_w32_mask(BIT(19), sched ? BIT(19) : 0, RTL839X_TBL_ACCESS_DATA_2(8)); + rtl839x_write_scheduling_table(port); + + if (sched == WEIGHTED_FAIR_QUEUE && t == WEIGHTED_ROUND_ROBIN && port != priv->cpu_port) { + // Restore OAM state to control register + sw_w32(oam_state, RTL839X_OAM_CTRL); + + // Restore trap action state + sw_w32(oam_port_state, RTL839X_OAM_PORT_ACT_CTRL(port)); + + // Restore port egress rate + rtl839x_set_egress_rate(priv, port, egress_rate); + } + + mutex_unlock(&priv->reg_mutex); +} + +void rtl839x_set_scheduling_queue_weights(struct rtl838x_switch_priv *priv, int port, + int *queue_weights) +{ + int i, lsb, low_byte, start_bit, high_mask; + + mutex_lock(&priv->reg_mutex); + + rtl839x_read_scheduling_table(port); + + for (i = 0; i < 8; i++) { + lsb = 48 + i * 8; + low_byte = 8 - (lsb >> 5); + start_bit = lsb - (low_byte << 5); + high_mask = 0x3ff >> (32 - start_bit); + sw_w32_mask(0x3ff << start_bit, (queue_weights[i] & 0x3ff) << start_bit, + RTL839X_TBL_ACCESS_DATA_2(low_byte)); + if (high_mask) + sw_w32_mask(high_mask, (queue_weights[i] & 0x3ff) >> (32- start_bit), + RTL839X_TBL_ACCESS_DATA_2(low_byte - 1)); + } + + rtl839x_write_scheduling_table(port); + mutex_unlock(&priv->reg_mutex); +} + +void rtl838x_config_qos(void) +{ + int i, p; + u32 v; + + pr_info("Setting up RTL838X QoS\n"); + pr_info("RTL838X_PRI_SEL_TBL_CTRL(i): %08x\n", sw_r32(RTL838X_PRI_SEL_TBL_CTRL(0))); + rtl83xx_setup_default_prio2queue(); + + // Enable inner (bit 12) and outer (bit 13) priority remapping from DSCP + sw_w32_mask(0, BIT(12) | BIT(13), RTL838X_PRI_DSCP_INVLD_CTRL0); + + /* Set default weight for calculating internal priority, in prio selection group 0 + * Port based (prio 3), Port outer-tag (4), DSCP (5), Inner Tag (6), Outer Tag (7) + */ + v = 3 | (4 << 3) | (5 << 6) | (6 << 9) | (7 << 12); + sw_w32(v, RTL838X_PRI_SEL_TBL_CTRL(0)); + + // Set the inner and outer priority one-to-one to re-marked outer dot1p priority + v = 0; + for (p = 0; p < 8; p++) + v |= p << (3 * p); + sw_w32(v, RTL838X_RMK_OPRI_CTRL); + sw_w32(v, RTL838X_RMK_IPRI_CTRL); + + v = 0; + for (p = 0; p < 8; p++) + v |= (dot1p_priority_remapping[p] & 0x7) << (p * 3); + sw_w32(v, RTL838X_PRI_SEL_IPRI_REMAP); + + // On all ports set scheduler type to WFQ + for (i = 0; i <= soc_info.cpu_port; i++) + sw_w32(0, RTL838X_SCHED_P_TYPE_CTRL(i)); + + // Enable egress scheduler for CPU-Port + sw_w32_mask(0, BIT(8), RTL838X_SCHED_LB_CTRL(soc_info.cpu_port)); + + // Enable egress drop allways on + sw_w32_mask(0, BIT(11), RTL838X_FC_P_EGR_DROP_CTRL(soc_info.cpu_port)); + + // Give special trap frames priority 7 (BPDUs) and routing exceptions: + sw_w32_mask(0, 7 << 3 | 7, RTL838X_QM_PKT2CPU_INTPRI_2); + // Give RMA frames priority 7: + sw_w32_mask(0, 7, RTL838X_QM_PKT2CPU_INTPRI_1); +} + +void rtl839x_config_qos(void) +{ + int port, p, q; + u32 v; + struct rtl838x_switch_priv *priv = switch_priv; + + pr_info("Setting up RTL839X QoS\n"); + pr_info("RTL839X_PRI_SEL_TBL_CTRL(i): %08x\n", sw_r32(RTL839X_PRI_SEL_TBL_CTRL(0))); + rtl83xx_setup_default_prio2queue(); + + for (port = 0; port < soc_info.cpu_port; port++) + sw_w32(7, RTL839X_QM_PORT_QNUM(port)); + + // CPU-port gets queue number 7 + sw_w32(7, RTL839X_QM_PORT_QNUM(soc_info.cpu_port)); + + for (port = 0; port <= soc_info.cpu_port; port++) { + rtl83xx_set_ingress_priority(port, 0); + rtl839x_set_scheduling_algorithm(priv, port, WEIGHTED_FAIR_QUEUE); + rtl839x_set_scheduling_queue_weights(priv, port, default_queue_weights); + // Do re-marking based on outer tag + sw_w32_mask(0, BIT(port % 32), RTL839X_RMK_PORT_DEI_TAG_CTRL(port)); + } + + // Remap dot1p priorities to internal priority, for this the outer tag needs be re-marked + v = 0; + for (p = 0; p < 8; p++) + v |= (dot1p_priority_remapping[p] & 0x7) << (p * 3); + sw_w32(v, RTL839X_PRI_SEL_IPRI_REMAP); + + /* Configure Drop Precedence for Drop Eligible Indicator (DEI) + * Index 0: 0 + * Index 1: 2 + * Each indicator is 2 bits long + */ + sw_w32(2 << 2, RTL839X_PRI_SEL_DEI2DP_REMAP); + + // Re-mark DEI: 4 bit-fields of 2 bits each, field 0 is bits 0-1, ... + sw_w32((0x1 << 2) | (0x1 << 4), RTL839X_RMK_DEI_CTRL); + + /* Set Congestion avoidance drop probability to 0 for drop precedences 0-2 (bits 24-31) + * low threshold (bits 0-11) to 4095 and high threshold (bits 12-23) to 4095 + * Weighted Random Early Detection (WRED) is used + */ + sw_w32(4095 << 12| 4095, RTL839X_WRED_PORT_THR_CTRL(0)); + sw_w32(4095 << 12| 4095, RTL839X_WRED_PORT_THR_CTRL(1)); + sw_w32(4095 << 12| 4095, RTL839X_WRED_PORT_THR_CTRL(2)); + + /* Set queue-based congestion avoidance properties, register fields are as + * for forward RTL839X_WRED_PORT_THR_CTRL + */ + for (q = 0; q < 8; q++) { + sw_w32(255 << 24 | 78 << 12 | 68, RTL839X_WRED_QUEUE_THR_CTRL(q, 0)); + sw_w32(255 << 24 | 74 << 12 | 64, RTL839X_WRED_QUEUE_THR_CTRL(q, 0)); + sw_w32(255 << 24 | 70 << 12 | 60, RTL839X_WRED_QUEUE_THR_CTRL(q, 0)); + } +} + +void __init rtl83xx_setup_qos(struct rtl838x_switch_priv *priv) +{ + switch_priv = priv; + + pr_info("In %s\n", __func__); + + if (priv->family_id == RTL8380_FAMILY_ID) + return rtl838x_config_qos(); + else if (priv->family_id == RTL8390_FAMILY_ID) + return rtl839x_config_qos(); + + if (priv->family_id == RTL8380_FAMILY_ID) + rtl838x_rate_control_init(priv); + else if (priv->family_id == RTL8390_FAMILY_ID) + rtl839x_rate_control_init(priv); + +} diff --git a/target/linux/realtek/files-5.15/drivers/net/dsa/rtl83xx/rtl838x.c b/target/linux/realtek/files-5.15/drivers/net/dsa/rtl83xx/rtl838x.c new file mode 100644 index 0000000000..9ce5098979 --- /dev/null +++ b/target/linux/realtek/files-5.15/drivers/net/dsa/rtl83xx/rtl838x.c @@ -0,0 +1,2054 @@ +// SPDX-License-Identifier: GPL-2.0-only + +#include +#include +#include + +#include "rtl83xx.h" + +#define RTL838X_VLAN_PORT_TAG_STS_UNTAG 0x0 +#define RTL838X_VLAN_PORT_TAG_STS_TAGGED 0x1 +#define RTL838X_VLAN_PORT_TAG_STS_PRIORITY_TAGGED 0x2 + +#define RTL838X_VLAN_PORT_TAG_STS_CTRL_BASE 0xA530 +/* port 0-28 */ +#define RTL838X_VLAN_PORT_TAG_STS_CTRL(port) \ + RTL838X_VLAN_PORT_TAG_STS_CTRL_BASE + (port << 2) + +#define RTL838X_VLAN_PORT_TAG_STS_CTRL_EGR_P_OTAG_KEEP_MASK GENMASK(11,10) +#define RTL838X_VLAN_PORT_TAG_STS_CTRL_EGR_P_ITAG_KEEP_MASK GENMASK(9,8) +#define RTL838X_VLAN_PORT_TAG_STS_CTRL_IGR_P_OTAG_KEEP_MASK GENMASK(7,6) +#define RTL838X_VLAN_PORT_TAG_STS_CTRL_IGR_P_ITAG_KEEP_MASK GENMASK(5,4) +#define RTL838X_VLAN_PORT_TAG_STS_CTRL_OTAG_STS_MASK GENMASK(3,2) +#define RTL838X_VLAN_PORT_TAG_STS_CTRL_ITAG_STS_MASK GENMASK(1,0) + +extern struct mutex smi_lock; + +// see_dal_maple_acl_log2PhyTmplteField and src/app/diag_v2/src/diag_acl.c +/* Definition of the RTL838X-specific template field IDs as used in the PIE */ +enum template_field_id { + TEMPLATE_FIELD_SPMMASK = 0, + TEMPLATE_FIELD_SPM0 = 1, // Source portmask ports 0-15 + TEMPLATE_FIELD_SPM1 = 2, // Source portmask ports 16-28 + TEMPLATE_FIELD_RANGE_CHK = 3, + TEMPLATE_FIELD_DMAC0 = 4, // Destination MAC [15:0] + TEMPLATE_FIELD_DMAC1 = 5, // Destination MAC [31:16] + TEMPLATE_FIELD_DMAC2 = 6, // Destination MAC [47:32] + TEMPLATE_FIELD_SMAC0 = 7, // Source MAC [15:0] + TEMPLATE_FIELD_SMAC1 = 8, // Source MAC [31:16] + TEMPLATE_FIELD_SMAC2 = 9, // Source MAC [47:32] + TEMPLATE_FIELD_ETHERTYPE = 10, // Ethernet typ + TEMPLATE_FIELD_OTAG = 11, // Outer VLAN tag + TEMPLATE_FIELD_ITAG = 12, // Inner VLAN tag + TEMPLATE_FIELD_SIP0 = 13, // IPv4 or IPv6 source IP[15:0] or ARP/RARP + // source protocol address in header + TEMPLATE_FIELD_SIP1 = 14, // IPv4 or IPv6 source IP[31:16] or ARP/RARP + TEMPLATE_FIELD_DIP0 = 15, // IPv4 or IPv6 destination IP[15:0] + TEMPLATE_FIELD_DIP1 = 16, // IPv4 or IPv6 destination IP[31:16] + TEMPLATE_FIELD_IP_TOS_PROTO = 17, // IPv4 TOS/IPv6 traffic class and + // IPv4 proto/IPv6 next header fields + TEMPLATE_FIELD_L34_HEADER = 18, // packet with extra tag and IPv6 with auth, dest, + // frag, route, hop-by-hop option header, + // IGMP type, TCP flag + TEMPLATE_FIELD_L4_SPORT = 19, // TCP/UDP source port + TEMPLATE_FIELD_L4_DPORT = 20, // TCP/UDP destination port + TEMPLATE_FIELD_ICMP_IGMP = 21, + TEMPLATE_FIELD_IP_RANGE = 22, + TEMPLATE_FIELD_FIELD_SELECTOR_VALID = 23, // Field selector mask + TEMPLATE_FIELD_FIELD_SELECTOR_0 = 24, + TEMPLATE_FIELD_FIELD_SELECTOR_1 = 25, + TEMPLATE_FIELD_FIELD_SELECTOR_2 = 26, + TEMPLATE_FIELD_FIELD_SELECTOR_3 = 27, + TEMPLATE_FIELD_SIP2 = 28, // IPv6 source IP[47:32] + TEMPLATE_FIELD_SIP3 = 29, // IPv6 source IP[63:48] + TEMPLATE_FIELD_SIP4 = 30, // IPv6 source IP[79:64] + TEMPLATE_FIELD_SIP5 = 31, // IPv6 source IP[95:80] + TEMPLATE_FIELD_SIP6 = 32, // IPv6 source IP[111:96] + TEMPLATE_FIELD_SIP7 = 33, // IPv6 source IP[127:112] + TEMPLATE_FIELD_DIP2 = 34, // IPv6 destination IP[47:32] + TEMPLATE_FIELD_DIP3 = 35, // IPv6 destination IP[63:48] + TEMPLATE_FIELD_DIP4 = 36, // IPv6 destination IP[79:64] + TEMPLATE_FIELD_DIP5 = 37, // IPv6 destination IP[95:80] + TEMPLATE_FIELD_DIP6 = 38, // IPv6 destination IP[111:96] + TEMPLATE_FIELD_DIP7 = 39, // IPv6 destination IP[127:112] + TEMPLATE_FIELD_FWD_VID = 40, // Forwarding VLAN-ID + TEMPLATE_FIELD_FLOW_LABEL = 41, +}; + +/* + * The RTL838X SoCs use 5 fixed templates with definitions for which data fields are to + * be copied from the Ethernet Frame header into the 12 User-definable fields of the Packet + * Inspection Engine's buffer. The following defines the field contents for each of the fixed + * templates. Additionally, 3 user-definable templates can be set up via the definitions + * in RTL838X_ACL_TMPLTE_CTRL control registers. + * TODO: See all src/app/diag_v2/src/diag_pie.c + */ +#define N_FIXED_TEMPLATES 5 +static enum template_field_id fixed_templates[N_FIXED_TEMPLATES][N_FIXED_FIELDS] = +{ + { + TEMPLATE_FIELD_SPM0, TEMPLATE_FIELD_SPM1, TEMPLATE_FIELD_OTAG, + TEMPLATE_FIELD_SMAC0, TEMPLATE_FIELD_SMAC1, TEMPLATE_FIELD_SMAC2, + TEMPLATE_FIELD_DMAC0, TEMPLATE_FIELD_DMAC1, TEMPLATE_FIELD_DMAC2, + TEMPLATE_FIELD_ETHERTYPE, TEMPLATE_FIELD_ITAG, TEMPLATE_FIELD_RANGE_CHK + }, { + TEMPLATE_FIELD_SIP0, TEMPLATE_FIELD_SIP1, TEMPLATE_FIELD_DIP0, + TEMPLATE_FIELD_DIP1,TEMPLATE_FIELD_IP_TOS_PROTO, TEMPLATE_FIELD_L4_SPORT, + TEMPLATE_FIELD_L4_DPORT, TEMPLATE_FIELD_ICMP_IGMP, TEMPLATE_FIELD_ITAG, + TEMPLATE_FIELD_RANGE_CHK, TEMPLATE_FIELD_SPM0, TEMPLATE_FIELD_SPM1 + }, { + TEMPLATE_FIELD_DMAC0, TEMPLATE_FIELD_DMAC1, TEMPLATE_FIELD_DMAC2, + TEMPLATE_FIELD_ITAG, TEMPLATE_FIELD_ETHERTYPE, TEMPLATE_FIELD_IP_TOS_PROTO, + TEMPLATE_FIELD_L4_DPORT, TEMPLATE_FIELD_L4_SPORT, TEMPLATE_FIELD_SIP0, + TEMPLATE_FIELD_SIP1, TEMPLATE_FIELD_DIP0, TEMPLATE_FIELD_DIP1 + }, { + TEMPLATE_FIELD_DIP0, TEMPLATE_FIELD_DIP1, TEMPLATE_FIELD_DIP2, + TEMPLATE_FIELD_DIP3, TEMPLATE_FIELD_DIP4, TEMPLATE_FIELD_DIP5, + TEMPLATE_FIELD_DIP6, TEMPLATE_FIELD_DIP7, TEMPLATE_FIELD_L4_DPORT, + TEMPLATE_FIELD_L4_SPORT, TEMPLATE_FIELD_ICMP_IGMP, TEMPLATE_FIELD_IP_TOS_PROTO + }, { + TEMPLATE_FIELD_SIP0, TEMPLATE_FIELD_SIP1, TEMPLATE_FIELD_SIP2, + TEMPLATE_FIELD_SIP3, TEMPLATE_FIELD_SIP4, TEMPLATE_FIELD_SIP5, + TEMPLATE_FIELD_SIP6, TEMPLATE_FIELD_SIP7, TEMPLATE_FIELD_ITAG, + TEMPLATE_FIELD_RANGE_CHK, TEMPLATE_FIELD_SPM0, TEMPLATE_FIELD_SPM1 + }, +}; + +void rtl838x_print_matrix(void) +{ + unsigned volatile int *ptr8; + int i; + + ptr8 = RTL838X_SW_BASE + RTL838X_PORT_ISO_CTRL(0); + for (i = 0; i < 28; i += 8) + pr_debug("> %8x %8x %8x %8x %8x %8x %8x %8x\n", + ptr8[i + 0], ptr8[i + 1], ptr8[i + 2], ptr8[i + 3], + ptr8[i + 4], ptr8[i + 5], ptr8[i + 6], ptr8[i + 7]); + pr_debug("CPU_PORT> %8x\n", ptr8[28]); +} + +static inline int rtl838x_port_iso_ctrl(int p) +{ + return RTL838X_PORT_ISO_CTRL(p); +} + +static inline void rtl838x_exec_tbl0_cmd(u32 cmd) +{ + sw_w32(cmd, RTL838X_TBL_ACCESS_CTRL_0); + do { } while (sw_r32(RTL838X_TBL_ACCESS_CTRL_0) & BIT(15)); +} + +static inline void rtl838x_exec_tbl1_cmd(u32 cmd) +{ + sw_w32(cmd, RTL838X_TBL_ACCESS_CTRL_1); + do { } while (sw_r32(RTL838X_TBL_ACCESS_CTRL_1) & BIT(15)); +} + +static inline int rtl838x_tbl_access_data_0(int i) +{ + return RTL838X_TBL_ACCESS_DATA_0(i); +} + +static void rtl838x_vlan_tables_read(u32 vlan, struct rtl838x_vlan_info *info) +{ + u32 v; + // Read VLAN table (0) via register 0 + struct table_reg *r = rtl_table_get(RTL8380_TBL_0, 0); + + rtl_table_read(r, vlan); + info->tagged_ports = sw_r32(rtl_table_data(r, 0)); + v = sw_r32(rtl_table_data(r, 1)); + pr_debug("VLAN_READ %d: %016llx %08x\n", vlan, info->tagged_ports, v); + rtl_table_release(r); + + info->profile_id = v & 0x7; + info->hash_mc_fid = !!(v & 0x8); + info->hash_uc_fid = !!(v & 0x10); + info->fid = (v >> 5) & 0x3f; + + // Read UNTAG table (0) via table register 1 + r = rtl_table_get(RTL8380_TBL_1, 0); + rtl_table_read(r, vlan); + info->untagged_ports = sw_r32(rtl_table_data(r, 0)); + rtl_table_release(r); +} + +static void rtl838x_vlan_set_tagged(u32 vlan, struct rtl838x_vlan_info *info) +{ + u32 v; + // Access VLAN table (0) via register 0 + struct table_reg *r = rtl_table_get(RTL8380_TBL_0, 0); + + sw_w32(info->tagged_ports, rtl_table_data(r, 0)); + + v = info->profile_id; + v |= info->hash_mc_fid ? 0x8 : 0; + v |= info->hash_uc_fid ? 0x10 : 0; + v |= ((u32)info->fid) << 5; + sw_w32(v, rtl_table_data(r, 1)); + + rtl_table_write(r, vlan); + rtl_table_release(r); +} + +static void rtl838x_vlan_set_untagged(u32 vlan, u64 portmask) +{ + // Access UNTAG table (0) via register 1 + struct table_reg *r = rtl_table_get(RTL8380_TBL_1, 0); + + sw_w32(portmask & 0x1fffffff, rtl_table_data(r, 0)); + rtl_table_write(r, vlan); + rtl_table_release(r); +} + +/* Sets the L2 forwarding to be based on either the inner VLAN tag or the outer + */ +static void rtl838x_vlan_fwd_on_inner(int port, bool is_set) +{ + if (is_set) + sw_w32_mask(BIT(port), 0, RTL838X_VLAN_PORT_FWD); + else + sw_w32_mask(0, BIT(port), RTL838X_VLAN_PORT_FWD); +} + +static u64 rtl838x_l2_hash_seed(u64 mac, u32 vid) +{ + return mac << 12 | vid; +} + +/* + * Applies the same hash algorithm as the one used currently by the ASIC to the seed + * and returns a key into the L2 hash table + */ +static u32 rtl838x_l2_hash_key(struct rtl838x_switch_priv *priv, u64 seed) +{ + u32 h1, h2, h3, h; + + if (sw_r32(priv->r->l2_ctrl_0) & 1) { + h1 = (seed >> 11) & 0x7ff; + h1 = ((h1 & 0x1f) << 6) | ((h1 >> 5) & 0x3f); + + h2 = (seed >> 33) & 0x7ff; + h2 = ((h2 & 0x3f) << 5) | ((h2 >> 6) & 0x1f); + + h3 = (seed >> 44) & 0x7ff; + h3 = ((h3 & 0x7f) << 4) | ((h3 >> 7) & 0xf); + + h = h1 ^ h2 ^ h3 ^ ((seed >> 55) & 0x1ff); + h ^= ((seed >> 22) & 0x7ff) ^ (seed & 0x7ff); + } else { + h = ((seed >> 55) & 0x1ff) ^ ((seed >> 44) & 0x7ff) + ^ ((seed >> 33) & 0x7ff) ^ ((seed >> 22) & 0x7ff) + ^ ((seed >> 11) & 0x7ff) ^ (seed & 0x7ff); + } + + return h; +} + +static inline int rtl838x_mac_force_mode_ctrl(int p) +{ + return RTL838X_MAC_FORCE_MODE_CTRL + (p << 2); +} + +static inline int rtl838x_mac_port_ctrl(int p) +{ + return RTL838X_MAC_PORT_CTRL(p); +} + +static inline int rtl838x_l2_port_new_salrn(int p) +{ + return RTL838X_L2_PORT_NEW_SALRN(p); +} + +static inline int rtl838x_l2_port_new_sa_fwd(int p) +{ + return RTL838X_L2_PORT_NEW_SA_FWD(p); +} + +static inline int rtl838x_mac_link_spd_sts(int p) +{ + return RTL838X_MAC_LINK_SPD_STS(p); +} + +inline static int rtl838x_trk_mbr_ctr(int group) +{ + return RTL838X_TRK_MBR_CTR + (group << 2); +} + +/* + * Fills an L2 entry structure from the SoC registers + */ +static void rtl838x_fill_l2_entry(u32 r[], struct rtl838x_l2_entry *e) +{ + /* Table contains different entry types, we need to identify the right one: + * Check for MC entries, first + * In contrast to the RTL93xx SoCs, there is no valid bit, use heuristics to + * identify valid entries + */ + e->is_ip_mc = !!(r[0] & BIT(22)); + e->is_ipv6_mc = !!(r[0] & BIT(21)); + e->type = L2_INVALID; + + if (!e->is_ip_mc && !e->is_ipv6_mc) { + e->mac[0] = (r[1] >> 20); + e->mac[1] = (r[1] >> 12); + e->mac[2] = (r[1] >> 4); + e->mac[3] = (r[1] & 0xf) << 4 | (r[2] >> 28); + e->mac[4] = (r[2] >> 20); + e->mac[5] = (r[2] >> 12); + + e->rvid = r[2] & 0xfff; + e->vid = r[0] & 0xfff; + + /* Is it a unicast entry? check multicast bit */ + if (!(e->mac[0] & 1)) { + e->is_static = !!((r[0] >> 19) & 1); + e->port = (r[0] >> 12) & 0x1f; + e->block_da = !!(r[1] & BIT(30)); + e->block_sa = !!(r[1] & BIT(31)); + e->suspended = !!(r[1] & BIT(29)); + e->next_hop = !!(r[1] & BIT(28)); + if (e->next_hop) { + pr_debug("Found next hop entry, need to read extra data\n"); + e->nh_vlan_target = !!(r[0] & BIT(9)); + e->nh_route_id = r[0] & 0x1ff; + e->vid = e->rvid; + } + e->age = (r[0] >> 17) & 0x3; + e->valid = true; + + /* A valid entry has one of mutli-cast, aging, sa/da-blocking, + * next-hop or static entry bit set */ + if (!(r[0] & 0x007c0000) && !(r[1] & 0xd0000000)) + e->valid = false; + else + e->type = L2_UNICAST; + } else { // L2 multicast + pr_debug("Got L2 MC entry: %08x %08x %08x\n", r[0], r[1], r[2]); + e->valid = true; + e->type = L2_MULTICAST; + e->mc_portmask_index = (r[0] >> 12) & 0x1ff; + } + } else { // IPv4 and IPv6 multicast + e->valid = true; + e->mc_portmask_index = (r[0] >> 12) & 0x1ff; + e->mc_gip = (r[1] << 20) | (r[2] >> 12); + e->rvid = r[2] & 0xfff; + } + if (e->is_ip_mc) + e->type = IP4_MULTICAST; + if (e->is_ipv6_mc) + e->type = IP6_MULTICAST; +} + +/* + * Fills the 3 SoC table registers r[] with the information of in the rtl838x_l2_entry + */ +static void rtl838x_fill_l2_row(u32 r[], struct rtl838x_l2_entry *e) +{ + u64 mac = ether_addr_to_u64(e->mac); + + if (!e->valid) { + r[0] = r[1] = r[2] = 0; + return; + } + + r[0] = e->is_ip_mc ? BIT(22) : 0; + r[0] |= e->is_ipv6_mc ? BIT(21) : 0; + + if (!e->is_ip_mc && !e->is_ipv6_mc) { + r[1] = mac >> 20; + r[2] = (mac & 0xfffff) << 12; + + /* Is it a unicast entry? check multicast bit */ + if (!(e->mac[0] & 1)) { + r[0] |= e->is_static ? BIT(19) : 0; + r[0] |= (e->port & 0x3f) << 12; + r[0] |= e->vid; + r[1] |= e->block_da ? BIT(30) : 0; + r[1] |= e->block_sa ? BIT(31) : 0; + r[1] |= e->suspended ? BIT(29) : 0; + r[2] |= e->rvid & 0xfff; + if (e->next_hop) { + r[1] |= BIT(28); + r[0] |= e->nh_vlan_target ? BIT(9) : 0; + r[0] |= e->nh_route_id & 0x1ff; + } + r[0] |= (e->age & 0x3) << 17; + } else { // L2 Multicast + r[0] |= (e->mc_portmask_index & 0x1ff) << 12; + r[2] |= e->rvid & 0xfff; + r[0] |= e->vid & 0xfff; + pr_debug("FILL MC: %08x %08x %08x\n", r[0], r[1], r[2]); + } + } else { // IPv4 and IPv6 multicast + r[0] |= (e->mc_portmask_index & 0x1ff) << 12; + r[1] = e->mc_gip >> 20; + r[2] = e->mc_gip << 12; + r[2] |= e->rvid; + } +} + +/* + * Read an L2 UC or MC entry out of a hash bucket of the L2 forwarding table + * hash is the id of the bucket and pos is the position of the entry in that bucket + * The data read from the SoC is filled into rtl838x_l2_entry + */ +static u64 rtl838x_read_l2_entry_using_hash(u32 hash, u32 pos, struct rtl838x_l2_entry *e) +{ + u64 entry; + u32 r[3]; + struct table_reg *q = rtl_table_get(RTL8380_TBL_L2, 0); // Access L2 Table 0 + u32 idx = (0 << 14) | (hash << 2) | pos; // Search SRAM, with hash and at pos in bucket + int i; + + rtl_table_read(q, idx); + for (i= 0; i < 3; i++) + r[i] = sw_r32(rtl_table_data(q, i)); + + rtl_table_release(q); + + rtl838x_fill_l2_entry(r, e); + if (!e->valid) + return 0; + + entry = (((u64) r[1]) << 32) | (r[2]); // mac and vid concatenated as hash seed + return entry; +} + +static void rtl838x_write_l2_entry_using_hash(u32 hash, u32 pos, struct rtl838x_l2_entry *e) +{ + u32 r[3]; + struct table_reg *q = rtl_table_get(RTL8380_TBL_L2, 0); + int i; + + u32 idx = (0 << 14) | (hash << 2) | pos; // Access SRAM, with hash and at pos in bucket + + rtl838x_fill_l2_row(r, e); + + for (i= 0; i < 3; i++) + sw_w32(r[i], rtl_table_data(q, i)); + + rtl_table_write(q, idx); + rtl_table_release(q); +} + +static u64 rtl838x_read_cam(int idx, struct rtl838x_l2_entry *e) +{ + u64 entry; + u32 r[3]; + struct table_reg *q = rtl_table_get(RTL8380_TBL_L2, 1); // Access L2 Table 1 + int i; + + rtl_table_read(q, idx); + for (i= 0; i < 3; i++) + r[i] = sw_r32(rtl_table_data(q, i)); + + rtl_table_release(q); + + rtl838x_fill_l2_entry(r, e); + if (!e->valid) + return 0; + + pr_debug("Found in CAM: R1 %x R2 %x R3 %x\n", r[0], r[1], r[2]); + + // Return MAC with concatenated VID ac concatenated ID + entry = (((u64) r[1]) << 32) | r[2]; + return entry; +} + +static void rtl838x_write_cam(int idx, struct rtl838x_l2_entry *e) +{ + u32 r[3]; + struct table_reg *q = rtl_table_get(RTL8380_TBL_L2, 1); // Access L2 Table 1 + int i; + + rtl838x_fill_l2_row(r, e); + + for (i= 0; i < 3; i++) + sw_w32(r[i], rtl_table_data(q, i)); + + rtl_table_write(q, idx); + rtl_table_release(q); +} + +static u64 rtl838x_read_mcast_pmask(int idx) +{ + u32 portmask; + // Read MC_PMSK (2) via register RTL8380_TBL_L2 + struct table_reg *q = rtl_table_get(RTL8380_TBL_L2, 2); + + rtl_table_read(q, idx); + portmask = sw_r32(rtl_table_data(q, 0)); + rtl_table_release(q); + + return portmask; +} + +static void rtl838x_write_mcast_pmask(int idx, u64 portmask) +{ + // Access MC_PMSK (2) via register RTL8380_TBL_L2 + struct table_reg *q = rtl_table_get(RTL8380_TBL_L2, 2); + + sw_w32(((u32)portmask) & 0x1fffffff, rtl_table_data(q, 0)); + rtl_table_write(q, idx); + rtl_table_release(q); +} + +static void rtl838x_vlan_profile_setup(int profile) +{ + u32 pmask_id = UNKNOWN_MC_PMASK; + // Enable L2 Learning BIT 0, portmask UNKNOWN_MC_PMASK for unknown MC traffic flooding + u32 p = 1 | pmask_id << 1 | pmask_id << 10 | pmask_id << 19; + + sw_w32(p, RTL838X_VLAN_PROFILE(profile)); + + /* RTL8380 and RTL8390 use an index into the portmask table to set the + * unknown multicast portmask, setup a default at a safe location + * On RTL93XX, the portmask is directly set in the profile, + * see e.g. rtl9300_vlan_profile_setup + */ + rtl838x_write_mcast_pmask(UNKNOWN_MC_PMASK, 0x1fffffff); +} + +static void rtl838x_l2_learning_setup(void) +{ + /* Set portmask for broadcast traffic and unknown unicast address flooding + * to the reserved entry in the portmask table used also for + * multicast flooding */ + sw_w32(UNKNOWN_MC_PMASK << 12 | UNKNOWN_MC_PMASK, RTL838X_L2_FLD_PMSK); + + /* Enable learning constraint system-wide (bit 0), per-port (bit 1) + * and per vlan (bit 2) */ + sw_w32(0x7, RTL838X_L2_LRN_CONSTRT_EN); + + // Limit learning to maximum: 16k entries, after that just flood (bits 0-1) + sw_w32((0x3fff << 2) | 0, RTL838X_L2_LRN_CONSTRT); + + // Do not trap ARP packets to CPU_PORT + sw_w32(0, RTL838X_SPCL_TRAP_ARP_CTRL); +} + +static void rtl838x_enable_learning(int port, bool enable) +{ + // Limit learning to maximum: 16k entries + + sw_w32_mask(0x3fff << 2, enable ? (0x3fff << 2) : 0, + RTL838X_L2_PORT_LRN_CONSTRT + (port << 2)); +} + +static void rtl838x_enable_flood(int port, bool enable) +{ + /* + * 0: Forward + * 1: Disable + * 2: to CPU + * 3: Copy to CPU + */ + sw_w32_mask(0x3, enable ? 0 : 1, + RTL838X_L2_PORT_LRN_CONSTRT + (port << 2)); +} + +static void rtl838x_enable_mcast_flood(int port, bool enable) +{ + +} + +static void rtl838x_enable_bcast_flood(int port, bool enable) +{ + +} + +static void rtl838x_stp_get(struct rtl838x_switch_priv *priv, u16 msti, u32 port_state[]) +{ + int i; + u32 cmd = 1 << 15 /* Execute cmd */ + | 1 << 14 /* Read */ + | 2 << 12 /* Table type 0b10 */ + | (msti & 0xfff); + priv->r->exec_tbl0_cmd(cmd); + + for (i = 0; i < 2; i++) + port_state[i] = sw_r32(priv->r->tbl_access_data_0(i)); +} + +static void rtl838x_stp_set(struct rtl838x_switch_priv *priv, u16 msti, u32 port_state[]) +{ + int i; + u32 cmd = 1 << 15 /* Execute cmd */ + | 0 << 14 /* Write */ + | 2 << 12 /* Table type 0b10 */ + | (msti & 0xfff); + + for (i = 0; i < 2; i++) + sw_w32(port_state[i], priv->r->tbl_access_data_0(i)); + priv->r->exec_tbl0_cmd(cmd); +} + +u64 rtl838x_traffic_get(int source) +{ + return rtl838x_get_port_reg(rtl838x_port_iso_ctrl(source)); +} + +void rtl838x_traffic_set(int source, u64 dest_matrix) +{ + rtl838x_set_port_reg(dest_matrix, rtl838x_port_iso_ctrl(source)); +} + +void rtl838x_traffic_enable(int source, int dest) +{ + rtl838x_mask_port_reg(0, BIT(dest), rtl838x_port_iso_ctrl(source)); +} + +void rtl838x_traffic_disable(int source, int dest) +{ + rtl838x_mask_port_reg(BIT(dest), 0, rtl838x_port_iso_ctrl(source)); +} + +/* + * Enables or disables the EEE/EEEP capability of a port + */ +static void rtl838x_port_eee_set(struct rtl838x_switch_priv *priv, int port, bool enable) +{ + u32 v; + + // This works only for Ethernet ports, and on the RTL838X, ports from 24 are SFP + if (port >= 24) + return; + + pr_debug("In %s: setting port %d to %d\n", __func__, port, enable); + v = enable ? 0x3 : 0x0; + + // Set EEE state for 100 (bit 9) & 1000MBit (bit 10) + sw_w32_mask(0x3 << 9, v << 9, priv->r->mac_force_mode_ctrl(port)); + + // Set TX/RX EEE state + if (enable) { + sw_w32_mask(0, BIT(port), RTL838X_EEE_PORT_TX_EN); + sw_w32_mask(0, BIT(port), RTL838X_EEE_PORT_RX_EN); + } else { + sw_w32_mask(BIT(port), 0, RTL838X_EEE_PORT_TX_EN); + sw_w32_mask(BIT(port), 0, RTL838X_EEE_PORT_RX_EN); + } + priv->ports[port].eee_enabled = enable; +} + + +/* + * Get EEE own capabilities and negotiation result + */ +static int rtl838x_eee_port_ability(struct rtl838x_switch_priv *priv, + struct ethtool_eee *e, int port) +{ + u64 link; + + if (port >= 24) + return 0; + + link = rtl839x_get_port_reg_le(RTL838X_MAC_LINK_STS); + if (!(link & BIT(port))) + return 0; + + if (sw_r32(rtl838x_mac_force_mode_ctrl(port)) & BIT(9)) + e->advertised |= ADVERTISED_100baseT_Full; + + if (sw_r32(rtl838x_mac_force_mode_ctrl(port)) & BIT(10)) + e->advertised |= ADVERTISED_1000baseT_Full; + + if (sw_r32(RTL838X_MAC_EEE_ABLTY) & BIT(port)) { + e->lp_advertised = ADVERTISED_100baseT_Full; + e->lp_advertised |= ADVERTISED_1000baseT_Full; + return 1; + } + + return 0; +} + +static void rtl838x_init_eee(struct rtl838x_switch_priv *priv, bool enable) +{ + int i; + + pr_info("Setting up EEE, state: %d\n", enable); + sw_w32_mask(0x4, 0, RTL838X_SMI_GLB_CTRL); + + /* Set timers for EEE */ + sw_w32(0x5001411, RTL838X_EEE_TX_TIMER_GIGA_CTRL); + sw_w32(0x5001417, RTL838X_EEE_TX_TIMER_GELITE_CTRL); + + // Enable EEE MAC support on ports + for (i = 0; i < priv->cpu_port; i++) { + if (priv->ports[i].phy) + rtl838x_port_eee_set(priv, i, enable); + } + priv->eee_enabled = enable; +} + +static void rtl838x_pie_lookup_enable(struct rtl838x_switch_priv *priv, int index) +{ + int block = index / PIE_BLOCK_SIZE; + u32 block_state = sw_r32(RTL838X_ACL_BLK_LOOKUP_CTRL); + + // Make sure rule-lookup is enabled in the block + if (!(block_state & BIT(block))) + sw_w32(block_state | BIT(block), RTL838X_ACL_BLK_LOOKUP_CTRL); +} + +static void rtl838x_pie_rule_del(struct rtl838x_switch_priv *priv, int index_from, int index_to) +{ + int block_from = index_from / PIE_BLOCK_SIZE; + int block_to = index_to / PIE_BLOCK_SIZE; + u32 v = (index_from << 1)| (index_to << 12 ) | BIT(0); + int block; + u32 block_state; + + pr_debug("%s: from %d to %d\n", __func__, index_from, index_to); + mutex_lock(&priv->reg_mutex); + + // Remember currently active blocks + block_state = sw_r32(RTL838X_ACL_BLK_LOOKUP_CTRL); + + // Make sure rule-lookup is disabled in the relevant blocks + for (block = block_from; block <= block_to; block++) { + if (block_state & BIT(block)) + sw_w32(block_state & (~BIT(block)), RTL838X_ACL_BLK_LOOKUP_CTRL); + } + + // Write from-to and execute bit into control register + sw_w32(v, RTL838X_ACL_CLR_CTRL); + + // Wait until command has completed + do { + } while (sw_r32(RTL838X_ACL_CLR_CTRL) & BIT(0)); + + // Re-enable rule lookup + for (block = block_from; block <= block_to; block++) { + if (!(block_state & BIT(block))) + sw_w32(block_state | BIT(block), RTL838X_ACL_BLK_LOOKUP_CTRL); + } + + mutex_unlock(&priv->reg_mutex); +} + +/* + * Reads the intermediate representation of the templated match-fields of the + * PIE rule in the pie_rule structure and fills in the raw data fields in the + * raw register space r[]. + * The register space configuration size is identical for the RTL8380/90 and RTL9300, + * however the RTL9310 has 2 more registers / fields and the physical field-ids + * are specific to every platform. + */ +static void rtl838x_write_pie_templated(u32 r[], struct pie_rule *pr, enum template_field_id t[]) +{ + int i; + enum template_field_id field_type; + u16 data, data_m; + + for (i = 0; i < N_FIXED_FIELDS; i++) { + field_type = t[i]; + data = data_m = 0; + + switch (field_type) { + case TEMPLATE_FIELD_SPM0: + data = pr->spm; + data_m = pr->spm_m; + break; + case TEMPLATE_FIELD_SPM1: + data = pr->spm >> 16; + data_m = pr->spm_m >> 16; + break; + case TEMPLATE_FIELD_OTAG: + data = pr->otag; + data_m = pr->otag_m; + break; + case TEMPLATE_FIELD_SMAC0: + data = pr->smac[4]; + data = (data << 8) | pr->smac[5]; + data_m = pr->smac_m[4]; + data_m = (data_m << 8) | pr->smac_m[5]; + break; + case TEMPLATE_FIELD_SMAC1: + data = pr->smac[2]; + data = (data << 8) | pr->smac[3]; + data_m = pr->smac_m[2]; + data_m = (data_m << 8) | pr->smac_m[3]; + break; + case TEMPLATE_FIELD_SMAC2: + data = pr->smac[0]; + data = (data << 8) | pr->smac[1]; + data_m = pr->smac_m[0]; + data_m = (data_m << 8) | pr->smac_m[1]; + break; + case TEMPLATE_FIELD_DMAC0: + data = pr->dmac[4]; + data = (data << 8) | pr->dmac[5]; + data_m = pr->dmac_m[4]; + data_m = (data_m << 8) | pr->dmac_m[5]; + break; + case TEMPLATE_FIELD_DMAC1: + data = pr->dmac[2]; + data = (data << 8) | pr->dmac[3]; + data_m = pr->dmac_m[2]; + data_m = (data_m << 8) | pr->dmac_m[3]; + break; + case TEMPLATE_FIELD_DMAC2: + data = pr->dmac[0]; + data = (data << 8) | pr->dmac[1]; + data_m = pr->dmac_m[0]; + data_m = (data_m << 8) | pr->dmac_m[1]; + break; + case TEMPLATE_FIELD_ETHERTYPE: + data = pr->ethertype; + data_m = pr->ethertype_m; + break; + case TEMPLATE_FIELD_ITAG: + data = pr->itag; + data_m = pr->itag_m; + break; + case TEMPLATE_FIELD_RANGE_CHK: + data = pr->field_range_check; + data_m = pr->field_range_check_m; + break; + case TEMPLATE_FIELD_SIP0: + if (pr->is_ipv6) { + data = pr->sip6.s6_addr16[7]; + data_m = pr->sip6_m.s6_addr16[7]; + } else { + data = pr->sip; + data_m = pr->sip_m; + } + break; + case TEMPLATE_FIELD_SIP1: + if (pr->is_ipv6) { + data = pr->sip6.s6_addr16[6]; + data_m = pr->sip6_m.s6_addr16[6]; + } else { + data = pr->sip >> 16; + data_m = pr->sip_m >> 16; + } + break; + + case TEMPLATE_FIELD_SIP2: + case TEMPLATE_FIELD_SIP3: + case TEMPLATE_FIELD_SIP4: + case TEMPLATE_FIELD_SIP5: + case TEMPLATE_FIELD_SIP6: + case TEMPLATE_FIELD_SIP7: + data = pr->sip6.s6_addr16[5 - (field_type - TEMPLATE_FIELD_SIP2)]; + data_m = pr->sip6_m.s6_addr16[5 - (field_type - TEMPLATE_FIELD_SIP2)]; + break; + + case TEMPLATE_FIELD_DIP0: + if (pr->is_ipv6) { + data = pr->dip6.s6_addr16[7]; + data_m = pr->dip6_m.s6_addr16[7]; + } else { + data = pr->dip; + data_m = pr->dip_m; + } + break; + + case TEMPLATE_FIELD_DIP1: + if (pr->is_ipv6) { + data = pr->dip6.s6_addr16[6]; + data_m = pr->dip6_m.s6_addr16[6]; + } else { + data = pr->dip >> 16; + data_m = pr->dip_m >> 16; + } + break; + + case TEMPLATE_FIELD_DIP2: + case TEMPLATE_FIELD_DIP3: + case TEMPLATE_FIELD_DIP4: + case TEMPLATE_FIELD_DIP5: + case TEMPLATE_FIELD_DIP6: + case TEMPLATE_FIELD_DIP7: + data = pr->dip6.s6_addr16[5 - (field_type - TEMPLATE_FIELD_DIP2)]; + data_m = pr->dip6_m.s6_addr16[5 - (field_type - TEMPLATE_FIELD_DIP2)]; + break; + + case TEMPLATE_FIELD_IP_TOS_PROTO: + data = pr->tos_proto; + data_m = pr->tos_proto_m; + break; + case TEMPLATE_FIELD_L4_SPORT: + data = pr->sport; + data_m = pr->sport_m; + break; + case TEMPLATE_FIELD_L4_DPORT: + data = pr->dport; + data_m = pr->dport_m; + break; + case TEMPLATE_FIELD_ICMP_IGMP: + data = pr->icmp_igmp; + data_m = pr->icmp_igmp_m; + break; + default: + pr_info("%s: unknown field %d\n", __func__, field_type); + continue; + } + if (!(i % 2)) { + r[5 - i / 2] = data; + r[12 - i / 2] = data_m; + } else { + r[5 - i / 2] |= ((u32)data) << 16; + r[12 - i / 2] |= ((u32)data_m) << 16; + } + } +} + +/* + * Creates the intermediate representation of the templated match-fields of the + * PIE rule in the pie_rule structure by reading the raw data fields in the + * raw register space r[]. + * The register space configuration size is identical for the RTL8380/90 and RTL9300, + * however the RTL9310 has 2 more registers / fields and the physical field-ids + */ +static void rtl838x_read_pie_templated(u32 r[], struct pie_rule *pr, enum template_field_id t[]) +{ + int i; + enum template_field_id field_type; + u16 data, data_m; + + for (i = 0; i < N_FIXED_FIELDS; i++) { + field_type = t[i]; + if (!(i % 2)) { + data = r[5 - i / 2]; + data_m = r[12 - i / 2]; + } else { + data = r[5 - i / 2] >> 16; + data_m = r[12 - i / 2] >> 16; + } + + switch (field_type) { + case TEMPLATE_FIELD_SPM0: + pr->spm = (pr->spn << 16) | data; + pr->spm_m = (pr->spn << 16) | data_m; + break; + case TEMPLATE_FIELD_SPM1: + pr->spm = data; + pr->spm_m = data_m; + break; + case TEMPLATE_FIELD_OTAG: + pr->otag = data; + pr->otag_m = data_m; + break; + case TEMPLATE_FIELD_SMAC0: + pr->smac[4] = data >> 8; + pr->smac[5] = data; + pr->smac_m[4] = data >> 8; + pr->smac_m[5] = data; + break; + case TEMPLATE_FIELD_SMAC1: + pr->smac[2] = data >> 8; + pr->smac[3] = data; + pr->smac_m[2] = data >> 8; + pr->smac_m[3] = data; + break; + case TEMPLATE_FIELD_SMAC2: + pr->smac[0] = data >> 8; + pr->smac[1] = data; + pr->smac_m[0] = data >> 8; + pr->smac_m[1] = data; + break; + case TEMPLATE_FIELD_DMAC0: + pr->dmac[4] = data >> 8; + pr->dmac[5] = data; + pr->dmac_m[4] = data >> 8; + pr->dmac_m[5] = data; + break; + case TEMPLATE_FIELD_DMAC1: + pr->dmac[2] = data >> 8; + pr->dmac[3] = data; + pr->dmac_m[2] = data >> 8; + pr->dmac_m[3] = data; + break; + case TEMPLATE_FIELD_DMAC2: + pr->dmac[0] = data >> 8; + pr->dmac[1] = data; + pr->dmac_m[0] = data >> 8; + pr->dmac_m[1] = data; + break; + case TEMPLATE_FIELD_ETHERTYPE: + pr->ethertype = data; + pr->ethertype_m = data_m; + break; + case TEMPLATE_FIELD_ITAG: + pr->itag = data; + pr->itag_m = data_m; + break; + case TEMPLATE_FIELD_RANGE_CHK: + pr->field_range_check = data; + pr->field_range_check_m = data_m; + break; + case TEMPLATE_FIELD_SIP0: + pr->sip = data; + pr->sip_m = data_m; + break; + case TEMPLATE_FIELD_SIP1: + pr->sip = (pr->sip << 16) | data; + pr->sip_m = (pr->sip << 16) | data_m; + break; + case TEMPLATE_FIELD_SIP2: + pr->is_ipv6 = true; + // Make use of limitiations on the position of the match values + ipv6_addr_set(&pr->sip6, pr->sip, r[5 - i / 2], + r[4 - i / 2], r[3 - i / 2]); + ipv6_addr_set(&pr->sip6_m, pr->sip_m, r[5 - i / 2], + r[4 - i / 2], r[3 - i / 2]); + case TEMPLATE_FIELD_SIP3: + case TEMPLATE_FIELD_SIP4: + case TEMPLATE_FIELD_SIP5: + case TEMPLATE_FIELD_SIP6: + case TEMPLATE_FIELD_SIP7: + break; + + case TEMPLATE_FIELD_DIP0: + pr->dip = data; + pr->dip_m = data_m; + break; + case TEMPLATE_FIELD_DIP1: + pr->dip = (pr->dip << 16) | data; + pr->dip_m = (pr->dip << 16) | data_m; + break; + case TEMPLATE_FIELD_DIP2: + pr->is_ipv6 = true; + ipv6_addr_set(&pr->dip6, pr->dip, r[5 - i / 2], + r[4 - i / 2], r[3 - i / 2]); + ipv6_addr_set(&pr->dip6_m, pr->dip_m, r[5 - i / 2], + r[4 - i / 2], r[3 - i / 2]); + case TEMPLATE_FIELD_DIP3: + case TEMPLATE_FIELD_DIP4: + case TEMPLATE_FIELD_DIP5: + case TEMPLATE_FIELD_DIP6: + case TEMPLATE_FIELD_DIP7: + break; + case TEMPLATE_FIELD_IP_TOS_PROTO: + pr->tos_proto = data; + pr->tos_proto_m = data_m; + break; + case TEMPLATE_FIELD_L4_SPORT: + pr->sport = data; + pr->sport_m = data_m; + break; + case TEMPLATE_FIELD_L4_DPORT: + pr->dport = data; + pr->dport_m = data_m; + break; + case TEMPLATE_FIELD_ICMP_IGMP: + pr->icmp_igmp = data; + pr->icmp_igmp_m = data_m; + break; + default: + pr_info("%s: unknown field %d\n", __func__, field_type); + } + } +} + +static void rtl838x_read_pie_fixed_fields(u32 r[], struct pie_rule *pr) +{ + pr->spmmask_fix = (r[6] >> 22) & 0x3; + pr->spn = (r[6] >> 16) & 0x3f; + pr->mgnt_vlan = (r[6] >> 15) & 1; + pr->dmac_hit_sw = (r[6] >> 14) & 1; + pr->not_first_frag = (r[6] >> 13) & 1; + pr->frame_type_l4 = (r[6] >> 10) & 7; + pr->frame_type = (r[6] >> 8) & 3; + pr->otag_fmt = (r[6] >> 7) & 1; + pr->itag_fmt = (r[6] >> 6) & 1; + pr->otag_exist = (r[6] >> 5) & 1; + pr->itag_exist = (r[6] >> 4) & 1; + pr->frame_type_l2 = (r[6] >> 2) & 3; + pr->tid = r[6] & 3; + + pr->spmmask_fix_m = (r[13] >> 22) & 0x3; + pr->spn_m = (r[13] >> 16) & 0x3f; + pr->mgnt_vlan_m = (r[13] >> 15) & 1; + pr->dmac_hit_sw_m = (r[13] >> 14) & 1; + pr->not_first_frag_m = (r[13] >> 13) & 1; + pr->frame_type_l4_m = (r[13] >> 10) & 7; + pr->frame_type_m = (r[13] >> 8) & 3; + pr->otag_fmt_m = (r[13] >> 7) & 1; + pr->itag_fmt_m = (r[13] >> 6) & 1; + pr->otag_exist_m = (r[13] >> 5) & 1; + pr->itag_exist_m = (r[13] >> 4) & 1; + pr->frame_type_l2_m = (r[13] >> 2) & 3; + pr->tid_m = r[13] & 3; + + pr->valid = r[14] & BIT(31); + pr->cond_not = r[14] & BIT(30); + pr->cond_and1 = r[14] & BIT(29); + pr->cond_and2 = r[14] & BIT(28); + pr->ivalid = r[14] & BIT(27); + + pr->drop = (r[17] >> 14) & 3; + pr->fwd_sel = r[17] & BIT(13); + pr->ovid_sel = r[17] & BIT(12); + pr->ivid_sel = r[17] & BIT(11); + pr->flt_sel = r[17] & BIT(10); + pr->log_sel = r[17] & BIT(9); + pr->rmk_sel = r[17] & BIT(8); + pr->meter_sel = r[17] & BIT(7); + pr->tagst_sel = r[17] & BIT(6); + pr->mir_sel = r[17] & BIT(5); + pr->nopri_sel = r[17] & BIT(4); + pr->cpupri_sel = r[17] & BIT(3); + pr->otpid_sel = r[17] & BIT(2); + pr->itpid_sel = r[17] & BIT(1); + pr->shaper_sel = r[17] & BIT(0); +} + +static void rtl838x_write_pie_fixed_fields(u32 r[], struct pie_rule *pr) +{ + r[6] = ((u32) (pr->spmmask_fix & 0x3)) << 22; + r[6] |= ((u32) (pr->spn & 0x3f)) << 16; + r[6] |= pr->mgnt_vlan ? BIT(15) : 0; + r[6] |= pr->dmac_hit_sw ? BIT(14) : 0; + r[6] |= pr->not_first_frag ? BIT(13) : 0; + r[6] |= ((u32) (pr->frame_type_l4 & 0x7)) << 10; + r[6] |= ((u32) (pr->frame_type & 0x3)) << 8; + r[6] |= pr->otag_fmt ? BIT(7) : 0; + r[6] |= pr->itag_fmt ? BIT(6) : 0; + r[6] |= pr->otag_exist ? BIT(5) : 0; + r[6] |= pr->itag_exist ? BIT(4) : 0; + r[6] |= ((u32) (pr->frame_type_l2 & 0x3)) << 2; + r[6] |= ((u32) (pr->tid & 0x3)); + + r[13] = ((u32) (pr->spmmask_fix_m & 0x3)) << 22; + r[13] |= ((u32) (pr->spn_m & 0x3f)) << 16; + r[13] |= pr->mgnt_vlan_m ? BIT(15) : 0; + r[13] |= pr->dmac_hit_sw_m ? BIT(14) : 0; + r[13] |= pr->not_first_frag_m ? BIT(13) : 0; + r[13] |= ((u32) (pr->frame_type_l4_m & 0x7)) << 10; + r[13] |= ((u32) (pr->frame_type_m & 0x3)) << 8; + r[13] |= pr->otag_fmt_m ? BIT(7) : 0; + r[13] |= pr->itag_fmt_m ? BIT(6) : 0; + r[13] |= pr->otag_exist_m ? BIT(5) : 0; + r[13] |= pr->itag_exist_m ? BIT(4) : 0; + r[13] |= ((u32) (pr->frame_type_l2_m & 0x3)) << 2; + r[13] |= ((u32) (pr->tid_m & 0x3)); + + r[14] = pr->valid ? BIT(31) : 0; + r[14] |= pr->cond_not ? BIT(30) : 0; + r[14] |= pr->cond_and1 ? BIT(29) : 0; + r[14] |= pr->cond_and2 ? BIT(28) : 0; + r[14] |= pr->ivalid ? BIT(27) : 0; + + if (pr->drop) + r[17] = 0x1 << 14; // Standard drop action + else + r[17] = 0; + r[17] |= pr->fwd_sel ? BIT(13) : 0; + r[17] |= pr->ovid_sel ? BIT(12) : 0; + r[17] |= pr->ivid_sel ? BIT(11) : 0; + r[17] |= pr->flt_sel ? BIT(10) : 0; + r[17] |= pr->log_sel ? BIT(9) : 0; + r[17] |= pr->rmk_sel ? BIT(8) : 0; + r[17] |= pr->meter_sel ? BIT(7) : 0; + r[17] |= pr->tagst_sel ? BIT(6) : 0; + r[17] |= pr->mir_sel ? BIT(5) : 0; + r[17] |= pr->nopri_sel ? BIT(4) : 0; + r[17] |= pr->cpupri_sel ? BIT(3) : 0; + r[17] |= pr->otpid_sel ? BIT(2) : 0; + r[17] |= pr->itpid_sel ? BIT(1) : 0; + r[17] |= pr->shaper_sel ? BIT(0) : 0; +} + +static int rtl838x_write_pie_action(u32 r[], struct pie_rule *pr) +{ + u16 *aif = (u16 *)&r[17]; + u16 data; + int fields_used = 0; + + aif--; + + pr_debug("%s, at %08x\n", __func__, (u32)aif); + /* Multiple actions can be linked to a match of a PIE rule, + * they have different precedence depending on their type and this precedence + * defines which Action Information Field (0-4) in the IACL table stores + * the additional data of the action (like e.g. the port number a packet is + * forwarded to) */ + // TODO: count bits in selectors to limit to a maximum number of actions + if (pr->fwd_sel) { // Forwarding action + data = pr->fwd_act << 13; + data |= pr->fwd_data; + data |= pr->bypass_all ? BIT(12) : 0; + data |= pr->bypass_ibc_sc ? BIT(11) : 0; + data |= pr->bypass_igr_stp ? BIT(10) : 0; + *aif-- = data; + fields_used++; + } + + if (pr->ovid_sel) { // Outer VID action + data = (pr->ovid_act & 0x3) << 12; + data |= pr->ovid_data; + *aif-- = data; + fields_used++; + } + + if (pr->ivid_sel) { // Inner VID action + data = (pr->ivid_act & 0x3) << 12; + data |= pr->ivid_data; + *aif-- = data; + fields_used++; + } + + if (pr->flt_sel) { // Filter action + *aif-- = pr->flt_data; + fields_used++; + } + + if (pr->log_sel) { // Log action + if (fields_used >= 4) + return -1; + *aif-- = pr->log_data; + fields_used++; + } + + if (pr->rmk_sel) { // Remark action + if (fields_used >= 4) + return -1; + *aif-- = pr->rmk_data; + fields_used++; + } + + if (pr->meter_sel) { // Meter action + if (fields_used >= 4) + return -1; + *aif-- = pr->meter_data; + fields_used++; + } + + if (pr->tagst_sel) { // Egress Tag Status action + if (fields_used >= 4) + return -1; + *aif-- = pr->tagst_data; + fields_used++; + } + + if (pr->mir_sel) { // Mirror action + if (fields_used >= 4) + return -1; + *aif-- = pr->mir_data; + fields_used++; + } + + if (pr->nopri_sel) { // Normal Priority action + if (fields_used >= 4) + return -1; + *aif-- = pr->nopri_data; + fields_used++; + } + + if (pr->cpupri_sel) { // CPU Priority action + if (fields_used >= 4) + return -1; + *aif-- = pr->nopri_data; + fields_used++; + } + + if (pr->otpid_sel) { // OTPID action + if (fields_used >= 4) + return -1; + *aif-- = pr->otpid_data; + fields_used++; + } + + if (pr->itpid_sel) { // ITPID action + if (fields_used >= 4) + return -1; + *aif-- = pr->itpid_data; + fields_used++; + } + + if (pr->shaper_sel) { // Traffic shaper action + if (fields_used >= 4) + return -1; + *aif-- = pr->shaper_data; + fields_used++; + } + + return 0; +} + +static void rtl838x_read_pie_action(u32 r[], struct pie_rule *pr) +{ + u16 *aif = (u16 *)&r[17]; + + aif--; + + pr_debug("%s, at %08x\n", __func__, (u32)aif); + if (pr->drop) + pr_debug("%s: Action Drop: %d", __func__, pr->drop); + + if (pr->fwd_sel){ // Forwarding action + pr->fwd_act = *aif >> 13; + pr->fwd_data = *aif--; + pr->bypass_all = pr->fwd_data & BIT(12); + pr->bypass_ibc_sc = pr->fwd_data & BIT(11); + pr->bypass_igr_stp = pr->fwd_data & BIT(10); + if (pr->bypass_all || pr->bypass_ibc_sc || pr->bypass_igr_stp) + pr->bypass_sel = true; + } + if (pr->ovid_sel) // Outer VID action + pr->ovid_data = *aif--; + if (pr->ivid_sel) // Inner VID action + pr->ivid_data = *aif--; + if (pr->flt_sel) // Filter action + pr->flt_data = *aif--; + if (pr->log_sel) // Log action + pr->log_data = *aif--; + if (pr->rmk_sel) // Remark action + pr->rmk_data = *aif--; + if (pr->meter_sel) // Meter action + pr->meter_data = *aif--; + if (pr->tagst_sel) // Egress Tag Status action + pr->tagst_data = *aif--; + if (pr->mir_sel) // Mirror action + pr->mir_data = *aif--; + if (pr->nopri_sel) // Normal Priority action + pr->nopri_data = *aif--; + if (pr->cpupri_sel) // CPU Priority action + pr->nopri_data = *aif--; + if (pr->otpid_sel) // OTPID action + pr->otpid_data = *aif--; + if (pr->itpid_sel) // ITPID action + pr->itpid_data = *aif--; + if (pr->shaper_sel) // Traffic shaper action + pr->shaper_data = *aif--; +} + +static void rtl838x_pie_rule_dump_raw(u32 r[]) +{ + pr_info("Raw IACL table entry:\n"); + pr_info("Match : %08x %08x %08x %08x %08x %08x\n", r[0], r[1], r[2], r[3], r[4], r[5]); + pr_info("Fixed : %08x\n", r[6]); + pr_info("Match M: %08x %08x %08x %08x %08x %08x\n", r[7], r[8], r[9], r[10], r[11], r[12]); + pr_info("Fixed M: %08x\n", r[13]); + pr_info("AIF : %08x %08x %08x\n", r[14], r[15], r[16]); + pr_info("Sel : %08x\n", r[17]); +} + +static void rtl838x_pie_rule_dump(struct pie_rule *pr) +{ + pr_info("Drop: %d, fwd: %d, ovid: %d, ivid: %d, flt: %d, log: %d, rmk: %d, meter: %d tagst: %d, mir: %d, nopri: %d, cpupri: %d, otpid: %d, itpid: %d, shape: %d\n", + pr->drop, pr->fwd_sel, pr->ovid_sel, pr->ivid_sel, pr->flt_sel, pr->log_sel, pr->rmk_sel, pr->log_sel, pr->tagst_sel, pr->mir_sel, pr->nopri_sel, + pr->cpupri_sel, pr->otpid_sel, pr->itpid_sel, pr->shaper_sel); + if (pr->fwd_sel) + pr_info("FWD: %08x\n", pr->fwd_data); + pr_info("TID: %x, %x\n", pr->tid, pr->tid_m); +} + +static int rtl838x_pie_rule_read(struct rtl838x_switch_priv *priv, int idx, struct pie_rule *pr) +{ + // Read IACL table (1) via register 0 + struct table_reg *q = rtl_table_get(RTL8380_TBL_0, 1); + u32 r[18]; + int i; + int block = idx / PIE_BLOCK_SIZE; + u32 t_select = sw_r32(RTL838X_ACL_BLK_TMPLTE_CTRL(block)); + + memset(pr, 0, sizeof(*pr)); + rtl_table_read(q, idx); + for (i = 0; i < 18; i++) + r[i] = sw_r32(rtl_table_data(q, i)); + + rtl_table_release(q); + + rtl838x_read_pie_fixed_fields(r, pr); + if (!pr->valid) + return 0; + + pr_info("%s: template_selectors %08x, tid: %d\n", __func__, t_select, pr->tid); + rtl838x_pie_rule_dump_raw(r); + + rtl838x_read_pie_templated(r, pr, fixed_templates[(t_select >> (pr->tid * 3)) & 0x7]); + + rtl838x_read_pie_action(r, pr); + + return 0; +} + +static int rtl838x_pie_rule_write(struct rtl838x_switch_priv *priv, int idx, struct pie_rule *pr) +{ + // Access IACL table (1) via register 0 + struct table_reg *q = rtl_table_get(RTL8380_TBL_0, 1); + u32 r[18]; + int i, err = 0; + int block = idx / PIE_BLOCK_SIZE; + u32 t_select = sw_r32(RTL838X_ACL_BLK_TMPLTE_CTRL(block)); + + pr_debug("%s: %d, t_select: %08x\n", __func__, idx, t_select); + + for (i = 0; i < 18; i++) + r[i] = 0; + + if (!pr->valid) + goto err_out; + + rtl838x_write_pie_fixed_fields(r, pr); + + pr_debug("%s: template %d\n", __func__, (t_select >> (pr->tid * 3)) & 0x7); + rtl838x_write_pie_templated(r, pr, fixed_templates[(t_select >> (pr->tid * 3)) & 0x7]); + + if (rtl838x_write_pie_action(r, pr)) { + pr_err("Rule actions too complex\n"); + goto err_out; + } + +// rtl838x_pie_rule_dump_raw(r); + + for (i = 0; i < 18; i++) + sw_w32(r[i], rtl_table_data(q, i)); + +err_out: + rtl_table_write(q, idx); + rtl_table_release(q); + + return err; +} + +static bool rtl838x_pie_templ_has(int t, enum template_field_id field_type) +{ + int i; + enum template_field_id ft; + + for (i = 0; i < N_FIXED_FIELDS; i++) { + ft = fixed_templates[t][i]; + if (field_type == ft) + return true; + } + + return false; +} + +static int rtl838x_pie_verify_template(struct rtl838x_switch_priv *priv, + struct pie_rule *pr, int t, int block) +{ + int i; + + if (!pr->is_ipv6 && pr->sip_m && !rtl838x_pie_templ_has(t, TEMPLATE_FIELD_SIP0)) + return -1; + + if (!pr->is_ipv6 && pr->dip_m && !rtl838x_pie_templ_has(t, TEMPLATE_FIELD_DIP0)) + return -1; + + if (pr->is_ipv6) { + if ((pr->sip6_m.s6_addr32[0] || pr->sip6_m.s6_addr32[1] + || pr->sip6_m.s6_addr32[2] || pr->sip6_m.s6_addr32[3]) + && !rtl838x_pie_templ_has(t, TEMPLATE_FIELD_SIP2)) + return -1; + if ((pr->dip6_m.s6_addr32[0] || pr->dip6_m.s6_addr32[1] + || pr->dip6_m.s6_addr32[2] || pr->dip6_m.s6_addr32[3]) + && !rtl838x_pie_templ_has(t, TEMPLATE_FIELD_DIP2)) + return -1; + } + + if (ether_addr_to_u64(pr->smac) && !rtl838x_pie_templ_has(t, TEMPLATE_FIELD_SMAC0)) + return -1; + + if (ether_addr_to_u64(pr->dmac) && !rtl838x_pie_templ_has(t, TEMPLATE_FIELD_DMAC0)) + return -1; + + // TODO: Check more + + i = find_first_zero_bit(&priv->pie_use_bm[block * 4], PIE_BLOCK_SIZE); + + if (i >= PIE_BLOCK_SIZE) + return -1; + + return i + PIE_BLOCK_SIZE * block; +} + +static int rtl838x_pie_rule_add(struct rtl838x_switch_priv *priv, struct pie_rule *pr) +{ + int idx, block, j, t; + + pr_debug("In %s\n", __func__); + + mutex_lock(&priv->pie_mutex); + + for (block = 0; block < priv->n_pie_blocks; block++) { + for (j = 0; j < 3; j++) { + t = (sw_r32(RTL838X_ACL_BLK_TMPLTE_CTRL(block)) >> (j * 3)) & 0x7; + pr_debug("Testing block %d, template %d, template id %d\n", block, j, t); + idx = rtl838x_pie_verify_template(priv, pr, t, block); + if (idx >= 0) + break; + } + if (j < 3) + break; + } + + if (block >= priv->n_pie_blocks) { + mutex_unlock(&priv->pie_mutex); + return -EOPNOTSUPP; + } + + pr_debug("Using block: %d, index %d, template-id %d\n", block, idx, j); + set_bit(idx, priv->pie_use_bm); + + pr->valid = true; + pr->tid = j; // Mapped to template number + pr->tid_m = 0x3; + pr->id = idx; + + rtl838x_pie_lookup_enable(priv, idx); + rtl838x_pie_rule_write(priv, idx, pr); + + mutex_unlock(&priv->pie_mutex); + return 0; +} + +static void rtl838x_pie_rule_rm(struct rtl838x_switch_priv *priv, struct pie_rule *pr) +{ + int idx = pr->id; + + rtl838x_pie_rule_del(priv, idx, idx); + clear_bit(idx, priv->pie_use_bm); +} + +/* + * Initializes the Packet Inspection Engine: + * powers it up, enables default matching templates for all blocks + * and clears all rules possibly installed by u-boot + */ +static void rtl838x_pie_init(struct rtl838x_switch_priv *priv) +{ + int i; + u32 template_selectors; + + mutex_init(&priv->pie_mutex); + + // Enable ACL lookup on all ports, including CPU_PORT + for (i = 0; i <= priv->cpu_port; i++) + sw_w32(1, RTL838X_ACL_PORT_LOOKUP_CTRL(i)); + + // Power on all PIE blocks + for (i = 0; i < priv->n_pie_blocks; i++) + sw_w32_mask(0, BIT(i), RTL838X_ACL_BLK_PWR_CTRL); + + // Include IPG in metering + sw_w32(1, RTL838X_METER_GLB_CTRL); + + // Delete all present rules + rtl838x_pie_rule_del(priv, 0, priv->n_pie_blocks * PIE_BLOCK_SIZE - 1); + + // Routing bypasses source port filter: disable write-protection, first + sw_w32_mask(0, 3, RTL838X_INT_RW_CTRL); + sw_w32_mask(0, 1, RTL838X_DMY_REG27); + sw_w32_mask(3, 0, RTL838X_INT_RW_CTRL); + + // Enable predefined templates 0, 1 and 2 for even blocks + template_selectors = 0 | (1 << 3) | (2 << 6); + for (i = 0; i < 6; i += 2) + sw_w32(template_selectors, RTL838X_ACL_BLK_TMPLTE_CTRL(i)); + + // Enable predefined templates 0, 3 and 4 (IPv6 support) for odd blocks + template_selectors = 0 | (3 << 3) | (4 << 6); + for (i = 1; i < priv->n_pie_blocks; i += 2) + sw_w32(template_selectors, RTL838X_ACL_BLK_TMPLTE_CTRL(i)); + + // Group each pair of physical blocks together to a logical block + sw_w32(0b10101010101, RTL838X_ACL_BLK_GROUP_CTRL); +} + +static u32 rtl838x_packet_cntr_read(int counter) +{ + u32 v; + + // Read LOG table (3) via register RTL8380_TBL_0 + struct table_reg *r = rtl_table_get(RTL8380_TBL_0, 3); + + pr_debug("In %s, id %d\n", __func__, counter); + rtl_table_read(r, counter / 2); + + pr_debug("Registers: %08x %08x\n", + sw_r32(rtl_table_data(r, 0)), sw_r32(rtl_table_data(r, 1))); + // The table has a size of 2 registers + if (counter % 2) + v = sw_r32(rtl_table_data(r, 0)); + else + v = sw_r32(rtl_table_data(r, 1)); + + rtl_table_release(r); + + return v; +} + +static void rtl838x_packet_cntr_clear(int counter) +{ + // Access LOG table (3) via register RTL8380_TBL_0 + struct table_reg *r = rtl_table_get(RTL8380_TBL_0, 3); + + pr_debug("In %s, id %d\n", __func__, counter); + // The table has a size of 2 registers + if (counter % 2) + sw_w32(0, rtl_table_data(r, 0)); + else + sw_w32(0, rtl_table_data(r, 1)); + + rtl_table_write(r, counter / 2); + + rtl_table_release(r); +} + +static void rtl838x_route_read(int idx, struct rtl83xx_route *rt) +{ + // Read ROUTING table (2) via register RTL8380_TBL_1 + struct table_reg *r = rtl_table_get(RTL8380_TBL_1, 2); + + pr_debug("In %s, id %d\n", __func__, idx); + rtl_table_read(r, idx); + + // The table has a size of 2 registers + rt->nh.gw = sw_r32(rtl_table_data(r, 0)); + rt->nh.gw <<= 32; + rt->nh.gw |= sw_r32(rtl_table_data(r, 1)); + + rtl_table_release(r); +} + +static void rtl838x_route_write(int idx, struct rtl83xx_route *rt) +{ + // Access ROUTING table (2) via register RTL8380_TBL_1 + struct table_reg *r = rtl_table_get(RTL8380_TBL_1, 2); + + pr_debug("In %s, id %d, gw: %016llx\n", __func__, idx, rt->nh.gw); + sw_w32(rt->nh.gw >> 32, rtl_table_data(r, 0)); + sw_w32(rt->nh.gw, rtl_table_data(r, 1)); + rtl_table_write(r, idx); + + rtl_table_release(r); +} + +static int rtl838x_l3_setup(struct rtl838x_switch_priv *priv) +{ + // Nothing to be done + return 0; +} + +void rtl838x_vlan_port_keep_tag_set(int port, bool keep_outer, bool keep_inner) +{ + sw_w32(FIELD_PREP(RTL838X_VLAN_PORT_TAG_STS_CTRL_OTAG_STS_MASK, + keep_outer ? RTL838X_VLAN_PORT_TAG_STS_TAGGED : RTL838X_VLAN_PORT_TAG_STS_UNTAG) | + FIELD_PREP(RTL838X_VLAN_PORT_TAG_STS_CTRL_ITAG_STS_MASK, + keep_inner ? RTL838X_VLAN_PORT_TAG_STS_TAGGED : RTL838X_VLAN_PORT_TAG_STS_UNTAG), + RTL838X_VLAN_PORT_TAG_STS_CTRL(port)); +} + +void rtl838x_vlan_port_pvidmode_set(int port, enum pbvlan_type type, enum pbvlan_mode mode) +{ + if (type == PBVLAN_TYPE_INNER) + sw_w32_mask(0x3, mode, RTL838X_VLAN_PORT_PB_VLAN + (port << 2)); + else + sw_w32_mask(0x3 << 14, mode << 14, RTL838X_VLAN_PORT_PB_VLAN + (port << 2)); +} + +void rtl838x_vlan_port_pvid_set(int port, enum pbvlan_type type, int pvid) +{ + if (type == PBVLAN_TYPE_INNER) + sw_w32_mask(0xfff << 2, pvid << 2, RTL838X_VLAN_PORT_PB_VLAN + (port << 2)); + else + sw_w32_mask(0xfff << 16, pvid << 16, RTL838X_VLAN_PORT_PB_VLAN + (port << 2)); +} + +static int rtl838x_set_ageing_time(unsigned long msec) +{ + int t = sw_r32(RTL838X_L2_CTRL_1); + + t &= 0x7FFFFF; + t = t * 128 / 625; /* Aging time in seconds. 0: L2 aging disabled */ + pr_debug("L2 AGING time: %d sec\n", t); + + t = (msec * 625 + 127000) / 128000; + t = t > 0x7FFFFF ? 0x7FFFFF : t; + sw_w32_mask(0x7FFFFF, t, RTL838X_L2_CTRL_1); + pr_debug("Dynamic aging for ports: %x\n", sw_r32(RTL838X_L2_PORT_AGING_OUT)); + + return 0; +} + +static void rtl838x_set_igr_filter(int port, enum igr_filter state) +{ + sw_w32_mask(0x3 << ((port & 0xf)<<1), state << ((port & 0xf)<<1), + RTL838X_VLAN_PORT_IGR_FLTR + (((port >> 4) << 2))); +} + +static void rtl838x_set_egr_filter(int port, enum egr_filter state) +{ + sw_w32_mask(0x1 << (port % 0x1d), state << (port % 0x1d), + RTL838X_VLAN_PORT_EGR_FLTR + (((port / 29) << 2))); +} + +void rtl838x_set_distribution_algorithm(int group, int algoidx, u32 algomsk) +{ + algoidx &= 1; // RTL838X only supports 2 concurrent algorithms + sw_w32_mask(1 << (group % 8), algoidx << (group % 8), + RTL838X_TRK_HASH_IDX_CTRL + ((group >> 3) << 2)); + sw_w32(algomsk, RTL838X_TRK_HASH_CTRL + (algoidx << 2)); +} + +void rtl838x_set_receive_management_action(int port, rma_ctrl_t type, action_type_t action) +{ + switch(type) { + case BPDU: + sw_w32_mask(3 << ((port & 0xf) << 1), (action & 0x3) << ((port & 0xf) << 1), + RTL838X_RMA_BPDU_CTRL + ((port >> 4) << 2)); + break; + case PTP: + sw_w32_mask(3 << ((port & 0xf) << 1), (action & 0x3) << ((port & 0xf) << 1), + RTL838X_RMA_PTP_CTRL + ((port >> 4) << 2)); + break; + case LLTP: + sw_w32_mask(3 << ((port & 0xf) << 1), (action & 0x3) << ((port & 0xf) << 1), + RTL838X_RMA_LLTP_CTRL + ((port >> 4) << 2)); + break; + default: + break; + } +} + +const struct rtl838x_reg rtl838x_reg = { + .mask_port_reg_be = rtl838x_mask_port_reg, + .set_port_reg_be = rtl838x_set_port_reg, + .get_port_reg_be = rtl838x_get_port_reg, + .mask_port_reg_le = rtl838x_mask_port_reg, + .set_port_reg_le = rtl838x_set_port_reg, + .get_port_reg_le = rtl838x_get_port_reg, + .stat_port_rst = RTL838X_STAT_PORT_RST, + .stat_rst = RTL838X_STAT_RST, + .stat_port_std_mib = RTL838X_STAT_PORT_STD_MIB, + .port_iso_ctrl = rtl838x_port_iso_ctrl, + .traffic_enable = rtl838x_traffic_enable, + .traffic_disable = rtl838x_traffic_disable, + .traffic_get = rtl838x_traffic_get, + .traffic_set = rtl838x_traffic_set, + .l2_ctrl_0 = RTL838X_L2_CTRL_0, + .l2_ctrl_1 = RTL838X_L2_CTRL_1, + .l2_port_aging_out = RTL838X_L2_PORT_AGING_OUT, + .set_ageing_time = rtl838x_set_ageing_time, + .smi_poll_ctrl = RTL838X_SMI_POLL_CTRL, + .l2_tbl_flush_ctrl = RTL838X_L2_TBL_FLUSH_CTRL, + .exec_tbl0_cmd = rtl838x_exec_tbl0_cmd, + .exec_tbl1_cmd = rtl838x_exec_tbl1_cmd, + .tbl_access_data_0 = rtl838x_tbl_access_data_0, + .isr_glb_src = RTL838X_ISR_GLB_SRC, + .isr_port_link_sts_chg = RTL838X_ISR_PORT_LINK_STS_CHG, + .imr_port_link_sts_chg = RTL838X_IMR_PORT_LINK_STS_CHG, + .imr_glb = RTL838X_IMR_GLB, + .vlan_tables_read = rtl838x_vlan_tables_read, + .vlan_set_tagged = rtl838x_vlan_set_tagged, + .vlan_set_untagged = rtl838x_vlan_set_untagged, + .mac_force_mode_ctrl = rtl838x_mac_force_mode_ctrl, + .vlan_profile_dump = rtl838x_vlan_profile_dump, + .vlan_profile_setup = rtl838x_vlan_profile_setup, + .vlan_fwd_on_inner = rtl838x_vlan_fwd_on_inner, + .set_vlan_igr_filter = rtl838x_set_igr_filter, + .set_vlan_egr_filter = rtl838x_set_egr_filter, + .enable_learning = rtl838x_enable_learning, + .enable_flood = rtl838x_enable_flood, + .enable_mcast_flood = rtl838x_enable_mcast_flood, + .enable_bcast_flood = rtl838x_enable_bcast_flood, + .stp_get = rtl838x_stp_get, + .stp_set = rtl838x_stp_set, + .mac_port_ctrl = rtl838x_mac_port_ctrl, + .l2_port_new_salrn = rtl838x_l2_port_new_salrn, + .l2_port_new_sa_fwd = rtl838x_l2_port_new_sa_fwd, + .mir_ctrl = RTL838X_MIR_CTRL, + .mir_dpm = RTL838X_MIR_DPM_CTRL, + .mir_spm = RTL838X_MIR_SPM_CTRL, + .mac_link_sts = RTL838X_MAC_LINK_STS, + .mac_link_dup_sts = RTL838X_MAC_LINK_DUP_STS, + .mac_link_spd_sts = rtl838x_mac_link_spd_sts, + .mac_rx_pause_sts = RTL838X_MAC_RX_PAUSE_STS, + .mac_tx_pause_sts = RTL838X_MAC_TX_PAUSE_STS, + .read_l2_entry_using_hash = rtl838x_read_l2_entry_using_hash, + .write_l2_entry_using_hash = rtl838x_write_l2_entry_using_hash, + .read_cam = rtl838x_read_cam, + .write_cam = rtl838x_write_cam, + .vlan_port_keep_tag_set = rtl838x_vlan_port_keep_tag_set, + .vlan_port_pvidmode_set = rtl838x_vlan_port_pvidmode_set, + .vlan_port_pvid_set = rtl838x_vlan_port_pvid_set, + .trk_mbr_ctr = rtl838x_trk_mbr_ctr, + .rma_bpdu_fld_pmask = RTL838X_RMA_BPDU_FLD_PMSK, + .spcl_trap_eapol_ctrl = RTL838X_SPCL_TRAP_EAPOL_CTRL, + .init_eee = rtl838x_init_eee, + .port_eee_set = rtl838x_port_eee_set, + .eee_port_ability = rtl838x_eee_port_ability, + .l2_hash_seed = rtl838x_l2_hash_seed, + .l2_hash_key = rtl838x_l2_hash_key, + .read_mcast_pmask = rtl838x_read_mcast_pmask, + .write_mcast_pmask = rtl838x_write_mcast_pmask, + .pie_init = rtl838x_pie_init, + .pie_rule_read = rtl838x_pie_rule_read, + .pie_rule_write = rtl838x_pie_rule_write, + .pie_rule_add = rtl838x_pie_rule_add, + .pie_rule_rm = rtl838x_pie_rule_rm, + .l2_learning_setup = rtl838x_l2_learning_setup, + .packet_cntr_read = rtl838x_packet_cntr_read, + .packet_cntr_clear = rtl838x_packet_cntr_clear, + .route_read = rtl838x_route_read, + .route_write = rtl838x_route_write, + .l3_setup = rtl838x_l3_setup, + .set_distribution_algorithm = rtl838x_set_distribution_algorithm, + .set_receive_management_action = rtl838x_set_receive_management_action, +}; + +irqreturn_t rtl838x_switch_irq(int irq, void *dev_id) +{ + struct dsa_switch *ds = dev_id; + u32 status = sw_r32(RTL838X_ISR_GLB_SRC); + u32 ports = sw_r32(RTL838X_ISR_PORT_LINK_STS_CHG); + u32 link; + int i; + + /* Clear status */ + sw_w32(ports, RTL838X_ISR_PORT_LINK_STS_CHG); + pr_info("RTL8380 Link change: status: %x, ports %x\n", status, ports); + + for (i = 0; i < 28; i++) { + if (ports & BIT(i)) { + link = sw_r32(RTL838X_MAC_LINK_STS); + if (link & BIT(i)) + dsa_port_phylink_mac_change(ds, i, true); + else + dsa_port_phylink_mac_change(ds, i, false); + } + } + return IRQ_HANDLED; +} + +int rtl838x_smi_wait_op(int timeout) +{ + int ret = 0; + u32 val; + + ret = readx_poll_timeout(sw_r32, RTL838X_SMI_ACCESS_PHY_CTRL_1, + val, !(val & 0x1), 20, timeout); + if (ret) + pr_err("%s: timeout\n", __func__); + + return ret; +} + +/* + * Reads a register in a page from the PHY + */ +int rtl838x_read_phy(u32 port, u32 page, u32 reg, u32 *val) +{ + u32 v; + u32 park_page; + + if (port > 31) { + *val = 0xffff; + return 0; + } + + if (page > 4095 || reg > 31) + return -ENOTSUPP; + + mutex_lock(&smi_lock); + + if (rtl838x_smi_wait_op(100000)) + goto timeout; + + sw_w32_mask(0xffff0000, port << 16, RTL838X_SMI_ACCESS_PHY_CTRL_2); + + park_page = sw_r32(RTL838X_SMI_ACCESS_PHY_CTRL_1) & ((0x1f << 15) | 0x2); + v = reg << 20 | page << 3; + sw_w32(v | park_page, RTL838X_SMI_ACCESS_PHY_CTRL_1); + sw_w32_mask(0, 1, RTL838X_SMI_ACCESS_PHY_CTRL_1); + + if (rtl838x_smi_wait_op(100000)) + goto timeout; + + *val = sw_r32(RTL838X_SMI_ACCESS_PHY_CTRL_2) & 0xffff; + + mutex_unlock(&smi_lock); + return 0; + +timeout: + mutex_unlock(&smi_lock); + return -ETIMEDOUT; +} + +/* + * Write to a register in a page of the PHY + */ +int rtl838x_write_phy(u32 port, u32 page, u32 reg, u32 val) +{ + u32 v; + u32 park_page; + + val &= 0xffff; + if (port > 31 || page > 4095 || reg > 31) + return -ENOTSUPP; + + mutex_lock(&smi_lock); + if (rtl838x_smi_wait_op(100000)) + goto timeout; + + sw_w32(BIT(port), RTL838X_SMI_ACCESS_PHY_CTRL_0); + mdelay(10); + + sw_w32_mask(0xffff0000, val << 16, RTL838X_SMI_ACCESS_PHY_CTRL_2); + + park_page = sw_r32(RTL838X_SMI_ACCESS_PHY_CTRL_1) & ((0x1f << 15) | 0x2); + v = reg << 20 | page << 3 | 0x4; + sw_w32(v | park_page, RTL838X_SMI_ACCESS_PHY_CTRL_1); + sw_w32_mask(0, 1, RTL838X_SMI_ACCESS_PHY_CTRL_1); + + if (rtl838x_smi_wait_op(100000)) + goto timeout; + + mutex_unlock(&smi_lock); + return 0; + +timeout: + mutex_unlock(&smi_lock); + return -ETIMEDOUT; +} + +/* + * Read an mmd register of a PHY + */ +int rtl838x_read_mmd_phy(u32 port, u32 addr, u32 reg, u32 *val) +{ + u32 v; + + mutex_lock(&smi_lock); + + if (rtl838x_smi_wait_op(100000)) + goto timeout; + + sw_w32(1 << port, RTL838X_SMI_ACCESS_PHY_CTRL_0); + mdelay(10); + + sw_w32_mask(0xffff0000, port << 16, RTL838X_SMI_ACCESS_PHY_CTRL_2); + + v = addr << 16 | reg; + sw_w32(v, RTL838X_SMI_ACCESS_PHY_CTRL_3); + + /* mmd-access | read | cmd-start */ + v = 1 << 1 | 0 << 2 | 1; + sw_w32(v, RTL838X_SMI_ACCESS_PHY_CTRL_1); + + if (rtl838x_smi_wait_op(100000)) + goto timeout; + + *val = sw_r32(RTL838X_SMI_ACCESS_PHY_CTRL_2) & 0xffff; + + mutex_unlock(&smi_lock); + return 0; + +timeout: + mutex_unlock(&smi_lock); + return -ETIMEDOUT; +} + +/* + * Write to an mmd register of a PHY + */ +int rtl838x_write_mmd_phy(u32 port, u32 addr, u32 reg, u32 val) +{ + u32 v; + + pr_debug("MMD write: port %d, dev %d, reg %d, val %x\n", port, addr, reg, val); + val &= 0xffff; + mutex_lock(&smi_lock); + + if (rtl838x_smi_wait_op(100000)) + goto timeout; + + sw_w32(1 << port, RTL838X_SMI_ACCESS_PHY_CTRL_0); + mdelay(10); + + sw_w32_mask(0xffff0000, val << 16, RTL838X_SMI_ACCESS_PHY_CTRL_2); + + sw_w32_mask(0x1f << 16, addr << 16, RTL838X_SMI_ACCESS_PHY_CTRL_3); + sw_w32_mask(0xffff, reg, RTL838X_SMI_ACCESS_PHY_CTRL_3); + /* mmd-access | write | cmd-start */ + v = 1 << 1 | 1 << 2 | 1; + sw_w32(v, RTL838X_SMI_ACCESS_PHY_CTRL_1); + + if (rtl838x_smi_wait_op(100000)) + goto timeout; + + mutex_unlock(&smi_lock); + return 0; + +timeout: + mutex_unlock(&smi_lock); + return -ETIMEDOUT; +} + +void rtl8380_get_version(struct rtl838x_switch_priv *priv) +{ + u32 rw_save, info_save; + u32 info; + + rw_save = sw_r32(RTL838X_INT_RW_CTRL); + sw_w32(rw_save | 0x3, RTL838X_INT_RW_CTRL); + + info_save = sw_r32(RTL838X_CHIP_INFO); + sw_w32(info_save | 0xA0000000, RTL838X_CHIP_INFO); + + info = sw_r32(RTL838X_CHIP_INFO); + sw_w32(info_save, RTL838X_CHIP_INFO); + sw_w32(rw_save, RTL838X_INT_RW_CTRL); + + if ((info & 0xFFFF) == 0x6275) { + if (((info >> 16) & 0x1F) == 0x1) + priv->version = RTL8380_VERSION_A; + else if (((info >> 16) & 0x1F) == 0x2) + priv->version = RTL8380_VERSION_B; + else + priv->version = RTL8380_VERSION_B; + } else { + priv->version = '-'; + } +} + +void rtl838x_vlan_profile_dump(int profile) +{ + u32 p; + + if (profile < 0 || profile > 7) + return; + + p = sw_r32(RTL838X_VLAN_PROFILE(profile)); + + pr_info("VLAN profile %d: L2 learning: %d, UNKN L2MC FLD PMSK %d, \ + UNKN IPMC FLD PMSK %d, UNKN IPv6MC FLD PMSK: %d", + profile, p & 1, (p >> 1) & 0x1ff, (p >> 10) & 0x1ff, (p >> 19) & 0x1ff); +} + +void rtl8380_sds_rst(int mac) +{ + u32 offset = (mac == 24) ? 0 : 0x100; + + sw_w32_mask(1 << 11, 0, RTL838X_SDS4_FIB_REG0 + offset); + sw_w32_mask(0x3, 0, RTL838X_SDS4_REG28 + offset); + sw_w32_mask(0x3, 0x3, RTL838X_SDS4_REG28 + offset); + sw_w32_mask(0, 0x1 << 6, RTL838X_SDS4_DUMMY0 + offset); + sw_w32_mask(0x1 << 6, 0, RTL838X_SDS4_DUMMY0 + offset); + pr_debug("SERDES reset: %d\n", mac); +} + +int rtl8380_sds_power(int mac, int val) +{ + u32 mode = (val == 1) ? 0x4 : 0x9; + u32 offset = (mac == 24) ? 5 : 0; + + if ((mac != 24) && (mac != 26)) { + pr_err("%s: not a fibre port: %d\n", __func__, mac); + return -1; + } + + sw_w32_mask(0x1f << offset, mode << offset, RTL838X_SDS_MODE_SEL); + + rtl8380_sds_rst(mac); + + return 0; +} diff --git a/target/linux/realtek/files-5.15/drivers/net/dsa/rtl83xx/rtl838x.h b/target/linux/realtek/files-5.15/drivers/net/dsa/rtl83xx/rtl838x.h new file mode 100644 index 0000000000..19049e4c95 --- /dev/null +++ b/target/linux/realtek/files-5.15/drivers/net/dsa/rtl83xx/rtl838x.h @@ -0,0 +1,1093 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ + +#ifndef _RTL838X_H +#define _RTL838X_H + +#include + +/* + * Register definition + */ +#define RTL838X_MAC_PORT_CTRL(port) (0xd560 + (((port) << 7))) +#define RTL839X_MAC_PORT_CTRL(port) (0x8004 + (((port) << 7))) +#define RTL930X_MAC_PORT_CTRL(port) (0x3260 + (((port) << 6))) +#define RTL931X_MAC_PORT_CTRL (0x6004) + +#define RTL930X_MAC_L2_PORT_CTRL(port) (0x3268 + (((port) << 6))) +#define RTL931X_MAC_L2_PORT_CTRL (0x6000) + +#define RTL838X_RST_GLB_CTRL_0 (0x003c) + +#define RTL838X_MAC_FORCE_MODE_CTRL (0xa104) +#define RTL839X_MAC_FORCE_MODE_CTRL (0x02bc) +#define RTL930X_MAC_FORCE_MODE_CTRL (0xCA1C) +#define RTL931X_MAC_FORCE_MODE_CTRL (0x0DCC) + +#define RTL838X_DMY_REG31 (0x3b28) +#define RTL838X_SDS_MODE_SEL (0x0028) +#define RTL838X_SDS_CFG_REG (0x0034) +#define RTL838X_INT_MODE_CTRL (0x005c) +#define RTL838X_CHIP_INFO (0x00d8) +#define RTL839X_CHIP_INFO (0x0ff4) +#define RTL838X_PORT_ISO_CTRL(port) (0x4100 + ((port) << 2)) +#define RTL839X_PORT_ISO_CTRL(port) (0x1400 + ((port) << 3)) + +/* Packet statistics */ +#define RTL838X_STAT_PORT_STD_MIB (0x1200) +#define RTL839X_STAT_PORT_STD_MIB (0xC000) +#define RTL930X_STAT_PORT_MIB_CNTR (0x0664) +#define RTL838X_STAT_RST (0x3100) +#define RTL839X_STAT_RST (0xF504) +#define RTL930X_STAT_RST (0x3240) +#define RTL931X_STAT_RST (0x7ef4) +#define RTL838X_STAT_PORT_RST (0x3104) +#define RTL839X_STAT_PORT_RST (0xF508) +#define RTL930X_STAT_PORT_RST (0x3244) +#define RTL931X_STAT_PORT_RST (0x7ef8) +#define RTL838X_STAT_CTRL (0x3108) +#define RTL839X_STAT_CTRL (0x04cc) +#define RTL930X_STAT_CTRL (0x3248) +#define RTL931X_STAT_CTRL (0x5720) + +/* Registers of the internal Serdes of the 8390 */ +#define RTL8390_SDS0_1_XSG0 (0xA000) +#define RTL8390_SDS0_1_XSG1 (0xA100) +#define RTL839X_SDS12_13_XSG0 (0xB800) +#define RTL839X_SDS12_13_XSG1 (0xB900) +#define RTL839X_SDS12_13_PWR0 (0xb880) +#define RTL839X_SDS12_13_PWR1 (0xb980) + +/* Registers of the internal Serdes of the 8380 */ +#define RTL838X_SDS4_FIB_REG0 (0xF800) +#define RTL838X_SDS4_REG28 (0xef80) +#define RTL838X_SDS4_DUMMY0 (0xef8c) +#define RTL838X_SDS5_EXT_REG6 (0xf18c) + +/* VLAN registers */ +#define RTL838X_VLAN_CTRL (0x3A74) +#define RTL838X_VLAN_PROFILE(idx) (0x3A88 + ((idx) << 2)) +#define RTL838X_VLAN_PORT_EGR_FLTR (0x3A84) +#define RTL838X_VLAN_PORT_PB_VLAN (0x3C00) +#define RTL838X_VLAN_PORT_IGR_FLTR (0x3A7C) + +#define RTL839X_VLAN_PROFILE(idx) (0x25C0 + (((idx) << 3))) +#define RTL839X_VLAN_CTRL (0x26D4) +#define RTL839X_VLAN_PORT_PB_VLAN (0x26D8) +#define RTL839X_VLAN_PORT_IGR_FLTR (0x27B4) +#define RTL839X_VLAN_PORT_EGR_FLTR (0x27C4) + +#define RTL930X_VLAN_PROFILE_SET(idx) (0x9c60 + (((idx) * 20))) +#define RTL930X_VLAN_CTRL (0x82D4) +#define RTL930X_VLAN_PORT_PB_VLAN (0x82D8) +#define RTL930X_VLAN_PORT_IGR_FLTR (0x83C0) +#define RTL930X_VLAN_PORT_EGR_FLTR (0x83C8) + +#define RTL931X_VLAN_PROFILE_SET(idx) (0x9800 + (((idx) * 28))) +#define RTL931X_VLAN_CTRL (0x94E4) +#define RTL931X_VLAN_PORT_IGR_CTRL (0x94E8) +#define RTL931X_VLAN_PORT_IGR_FLTR (0x96B4) +#define RTL931X_VLAN_PORT_EGR_FLTR (0x96C4) + +/* Table access registers */ +#define RTL838X_TBL_ACCESS_CTRL_0 (0x6914) +#define RTL838X_TBL_ACCESS_DATA_0(idx) (0x6918 + ((idx) << 2)) +#define RTL838X_TBL_ACCESS_CTRL_1 (0xA4C8) +#define RTL838X_TBL_ACCESS_DATA_1(idx) (0xA4CC + ((idx) << 2)) + +#define RTL839X_TBL_ACCESS_CTRL_0 (0x1190) +#define RTL839X_TBL_ACCESS_DATA_0(idx) (0x1194 + ((idx) << 2)) +#define RTL839X_TBL_ACCESS_CTRL_1 (0x6b80) +#define RTL839X_TBL_ACCESS_DATA_1(idx) (0x6b84 + ((idx) << 2)) +#define RTL839X_TBL_ACCESS_CTRL_2 (0x611C) +#define RTL839X_TBL_ACCESS_DATA_2(i) (0x6120 + (((i) << 2))) + +#define RTL930X_TBL_ACCESS_CTRL_0 (0xB340) +#define RTL930X_TBL_ACCESS_DATA_0(idx) (0xB344 + ((idx) << 2)) +#define RTL930X_TBL_ACCESS_CTRL_1 (0xB3A0) +#define RTL930X_TBL_ACCESS_DATA_1(idx) (0xB3A4 + ((idx) << 2)) +#define RTL930X_TBL_ACCESS_CTRL_2 (0xCE04) +#define RTL930X_TBL_ACCESS_DATA_2(i) (0xCE08 + (((i) << 2))) + +#define RTL931X_TBL_ACCESS_CTRL_0 (0x8500) +#define RTL931X_TBL_ACCESS_DATA_0(idx) (0x8508 + ((idx) << 2)) +#define RTL931X_TBL_ACCESS_CTRL_1 (0x40C0) +#define RTL931X_TBL_ACCESS_DATA_1(idx) (0x40C4 + ((idx) << 2)) +#define RTL931X_TBL_ACCESS_CTRL_2 (0x8528) +#define RTL931X_TBL_ACCESS_DATA_2(i) (0x852C + (((i) << 2))) +#define RTL931X_TBL_ACCESS_CTRL_3 (0x0200) +#define RTL931X_TBL_ACCESS_DATA_3(i) (0x0204 + (((i) << 2))) +#define RTL931X_TBL_ACCESS_CTRL_4 (0x20DC) +#define RTL931X_TBL_ACCESS_DATA_4(i) (0x20E0 + (((i) << 2))) +#define RTL931X_TBL_ACCESS_CTRL_5 (0x7E1C) +#define RTL931X_TBL_ACCESS_DATA_5(i) (0x7E20 + (((i) << 2))) + +/* MAC handling */ +#define RTL838X_MAC_LINK_STS (0xa188) +#define RTL839X_MAC_LINK_STS (0x0390) +#define RTL930X_MAC_LINK_STS (0xCB10) +#define RTL931X_MAC_LINK_STS (0x0EC0) +#define RTL838X_MAC_LINK_SPD_STS(p) (0xa190 + (((p >> 4) << 2))) +#define RTL839X_MAC_LINK_SPD_STS(p) (0x03a0 + (((p >> 4) << 2))) +#define RTL930X_MAC_LINK_SPD_STS(p) (0xCB18 + (((p >> 3) << 2))) +#define RTL931X_MAC_LINK_SPD_STS (0x0ED0) +#define RTL838X_MAC_LINK_DUP_STS (0xa19c) +#define RTL839X_MAC_LINK_DUP_STS (0x03b0) +#define RTL930X_MAC_LINK_DUP_STS (0xCB28) +#define RTL931X_MAC_LINK_DUP_STS (0x0EF0) +#define RTL838X_MAC_TX_PAUSE_STS (0xa1a0) +#define RTL839X_MAC_TX_PAUSE_STS (0x03b8) +#define RTL930X_MAC_TX_PAUSE_STS (0xCB2C) +#define RTL931X_MAC_TX_PAUSE_STS (0x0EF8) +#define RTL838X_MAC_RX_PAUSE_STS (0xa1a4) +#define RTL839X_MAC_RX_PAUSE_STS (0x03c0) +#define RTL930X_MAC_RX_PAUSE_STS (0xCB30) +#define RTL931X_MAC_RX_PAUSE_STS (0x0F00) +#define RTL930X_MAC_LINK_MEDIA_STS (0xCB14) +#define RTL931X_MAC_LINK_MEDIA_STS (0x0EC8) + +/* MAC link state bits */ +#define RTL838X_FORCE_EN (1 << 0) +#define RTL838X_FORCE_LINK_EN (1 << 1) +#define RTL838X_NWAY_EN (1 << 2) +#define RTL838X_DUPLEX_MODE (1 << 3) +#define RTL838X_TX_PAUSE_EN (1 << 6) +#define RTL838X_RX_PAUSE_EN (1 << 7) +#define RTL838X_MAC_FORCE_FC_EN (1 << 8) + +#define RTL839X_FORCE_EN (1 << 0) +#define RTL839X_FORCE_LINK_EN (1 << 1) +#define RTL839X_DUPLEX_MODE (1 << 2) +#define RTL839X_TX_PAUSE_EN (1 << 5) +#define RTL839X_RX_PAUSE_EN (1 << 6) +#define RTL839X_MAC_FORCE_FC_EN (1 << 7) + +#define RTL930X_FORCE_EN (1 << 0) +#define RTL930X_FORCE_LINK_EN (1 << 1) +#define RTL930X_DUPLEX_MODE (1 << 2) +#define RTL930X_TX_PAUSE_EN (1 << 7) +#define RTL930X_RX_PAUSE_EN (1 << 8) +#define RTL930X_MAC_FORCE_FC_EN (1 << 9) + +#define RTL931X_FORCE_EN (1 << 9) +#define RTL931X_FORCE_LINK_EN (1 << 0) +#define RTL931X_DUPLEX_MODE (1 << 2) +#define RTL931X_MAC_FORCE_FC_EN (1 << 4) +#define RTL931X_TX_PAUSE_EN (1 << 16) +#define RTL931X_RX_PAUSE_EN (1 << 17) + +/* EEE */ +#define RTL838X_MAC_EEE_ABLTY (0xa1a8) +#define RTL838X_EEE_PORT_TX_EN (0x014c) +#define RTL838X_EEE_PORT_RX_EN (0x0150) +#define RTL838X_EEE_CLK_STOP_CTRL (0x0148) +#define RTL838X_EEE_TX_TIMER_GIGA_CTRL (0xaa04) +#define RTL838X_EEE_TX_TIMER_GELITE_CTRL (0xaa08) + +#define RTL839X_EEE_TX_TIMER_GELITE_CTRL (0x042C) +#define RTL839X_EEE_TX_TIMER_GIGA_CTRL (0x0430) +#define RTL839X_EEE_TX_TIMER_10G_CTRL (0x0434) +#define RTL839X_EEE_CTRL(p) (0x8008 + ((p) << 7)) +#define RTL839X_MAC_EEE_ABLTY (0x03C8) + +#define RTL930X_MAC_EEE_ABLTY (0xCB34) +#define RTL930X_EEE_CTRL(p) (0x3274 + ((p) << 6)) +#define RTL930X_EEEP_PORT_CTRL(p) (0x3278 + ((p) << 6)) + +/* L2 functionality */ +#define RTL838X_L2_CTRL_0 (0x3200) +#define RTL839X_L2_CTRL_0 (0x3800) +#define RTL930X_L2_CTRL (0x8FD8) +#define RTL931X_L2_CTRL (0xC800) +#define RTL838X_L2_CTRL_1 (0x3204) +#define RTL839X_L2_CTRL_1 (0x3804) +#define RTL930X_L2_AGE_CTRL (0x8FDC) +#define RTL931X_L2_AGE_CTRL (0xC804) +#define RTL838X_L2_PORT_AGING_OUT (0x3358) +#define RTL839X_L2_PORT_AGING_OUT (0x3b74) +#define RTL930X_L2_PORT_AGE_CTRL (0x8FE0) +#define RTL931X_L2_PORT_AGE_CTRL (0xc808) +#define RTL838X_TBL_ACCESS_L2_CTRL (0x6900) +#define RTL839X_TBL_ACCESS_L2_CTRL (0x1180) +#define RTL930X_TBL_ACCESS_L2_CTRL (0xB320) +#define RTL930X_TBL_ACCESS_L2_METHOD_CTRL (0xB324) +#define RTL838X_TBL_ACCESS_L2_DATA(idx) (0x6908 + ((idx) << 2)) +#define RTL839X_TBL_ACCESS_L2_DATA(idx) (0x1184 + ((idx) << 2)) +#define RTL930X_TBL_ACCESS_L2_DATA(idx) (0xab08 + ((idx) << 2)) + +#define RTL838X_L2_TBL_FLUSH_CTRL (0x3370) +#define RTL839X_L2_TBL_FLUSH_CTRL (0x3ba0) +#define RTL930X_L2_TBL_FLUSH_CTRL (0x9404) +#define RTL931X_L2_TBL_FLUSH_CTRL (0xCD9C) + +#define RTL838X_L2_LRN_CONSTRT (0x329C) +#define RTL839X_L2_LRN_CONSTRT (0x3910) +#define RTL930X_L2_LRN_CONSTRT_CTRL (0x909c) +#define RTL931X_L2_LRN_CONSTRT_CTRL (0xC964) + +#define RTL838X_L2_FLD_PMSK (0x3288) +#define RTL839X_L2_FLD_PMSK (0x38EC) +#define RTL930X_L2_BC_FLD_PMSK (0x9068) +#define RTL931X_L2_BC_FLD_PMSK (0xC8FC) + +#define RTL930X_L2_UNKN_UC_FLD_PMSK (0x9064) +#define RTL931X_L2_UNKN_UC_FLD_PMSK (0xC8F4) + +#define RTL838X_L2_LRN_CONSTRT_EN (0x3368) +#define RTL838X_L2_PORT_LRN_CONSTRT (0x32A0) +#define RTL839X_L2_PORT_LRN_CONSTRT (0x3914) + +#define RTL838X_L2_PORT_NEW_SALRN(p) (0x328c + (((p >> 4) << 2))) +#define RTL839X_L2_PORT_NEW_SALRN(p) (0x38F0 + (((p >> 4) << 2))) +#define RTL930X_L2_PORT_SALRN(p) (0x8FEC + (((p >> 4) << 2))) +#define RTL931X_L2_PORT_NEW_SALRN(p) (0xC820 + (((p >> 4) << 2))) + +#define SALRN_PORT_SHIFT(p) ((p % 16) * 2) +#define SALRN_MODE_MASK 0x3 +#define SALRN_MODE_HARDWARE 0 +#define SALRN_MODE_DISABLED 2 + +#define RTL838X_L2_PORT_NEW_SA_FWD(p) (0x3294 + (((p >> 4) << 2))) +#define RTL839X_L2_PORT_NEW_SA_FWD(p) (0x3900 + (((p >> 4) << 2))) +#define RTL930X_L2_PORT_NEW_SA_FWD(p) (0x8FF4 + (((p / 10) << 2))) +#define RTL931X_L2_PORT_NEW_SA_FWD(p) (0xC830 + (((p / 10) << 2))) + +#define RTL930X_ST_CTRL (0x8798) + +#define RTL930X_L2_PORT_SABLK_CTRL (0x905c) +#define RTL930X_L2_PORT_DABLK_CTRL (0x9060) + +#define RTL838X_L2_PORT_LM_ACT(p) (0x3208 + ((p) << 2)) +#define RTL838X_VLAN_PORT_FWD (0x3A78) +#define RTL839X_VLAN_PORT_FWD (0x27AC) +#define RTL930X_VLAN_PORT_FWD (0x834C) +#define RTL931X_VLAN_PORT_FWD (0x95CC) +#define RTL838X_VLAN_FID_CTRL (0x3aa8) + +/* Port Mirroring */ +#define RTL838X_MIR_CTRL (0x5D00) +#define RTL838X_MIR_DPM_CTRL (0x5D20) +#define RTL838X_MIR_SPM_CTRL (0x5D10) + +#define RTL839X_MIR_CTRL (0x2500) +#define RTL839X_MIR_DPM_CTRL (0x2530) +#define RTL839X_MIR_SPM_CTRL (0x2510) + +#define RTL930X_MIR_CTRL (0xA2A0) +#define RTL930X_MIR_DPM_CTRL (0xA2C0) +#define RTL930X_MIR_SPM_CTRL (0xA2B0) + +#define RTL931X_MIR_CTRL (0xAF00) +#define RTL931X_MIR_DPM_CTRL (0xAF30) +#define RTL931X_MIR_SPM_CTRL (0xAF10) + +/* Storm/rate control and scheduling */ +#define RTL838X_STORM_CTRL (0x4700) +#define RTL839X_STORM_CTRL (0x1800) +#define RTL838X_STORM_CTRL_LB_CTRL(p) (0x4884 + (((p) << 2))) +#define RTL838X_STORM_CTRL_BURST_PPS_0 (0x4874) +#define RTL838X_STORM_CTRL_BURST_PPS_1 (0x4878) +#define RTL838X_STORM_CTRL_BURST_0 (0x487c) +#define RTL838X_STORM_CTRL_BURST_1 (0x4880) +#define RTL839X_STORM_CTRL_LB_TICK_TKN_CTRL_0 (0x1804) +#define RTL839X_STORM_CTRL_LB_TICK_TKN_CTRL_1 (0x1808) +#define RTL838X_SCHED_CTRL (0xB980) +#define RTL839X_SCHED_CTRL (0x60F4) +#define RTL838X_SCHED_LB_TICK_TKN_CTRL_0 (0xAD58) +#define RTL838X_SCHED_LB_TICK_TKN_CTRL_1 (0xAD5C) +#define RTL839X_SCHED_LB_TICK_TKN_CTRL_0 (0x1804) +#define RTL839X_SCHED_LB_TICK_TKN_CTRL_1 (0x1808) +#define RTL839X_STORM_CTRL_SPCL_LB_TICK_TKN_CTRL (0x2000) +#define RTL839X_IGR_BWCTRL_LB_TICK_TKN_CTRL_0 (0x1604) +#define RTL839X_IGR_BWCTRL_LB_TICK_TKN_CTRL_1 (0x1608) +#define RTL839X_SCHED_LB_TICK_TKN_CTRL (0x60F8) +#define RTL839X_SCHED_LB_TICK_TKN_PPS_CTRL (0x6200) +#define RTL838X_SCHED_LB_THR (0xB984) +#define RTL839X_SCHED_LB_THR (0x60FC) +#define RTL838X_SCHED_P_EGR_RATE_CTRL(p) (0xC008 + (((p) << 7))) +#define RTL838X_SCHED_Q_EGR_RATE_CTRL(p, q) (0xC00C + (p << 7) + (((q) << 2))) +#define RTL838X_STORM_CTRL_PORT_BC_EXCEED (0x470C) +#define RTL838X_STORM_CTRL_PORT_MC_EXCEED (0x4710) +#define RTL838X_STORM_CTRL_PORT_UC_EXCEED (0x4714) +#define RTL839X_STORM_CTRL_PORT_BC_EXCEED(p) (0x180c + (((p >> 5) << 2))) +#define RTL839X_STORM_CTRL_PORT_MC_EXCEED(p) (0x1814 + (((p >> 5) << 2))) +#define RTL839X_STORM_CTRL_PORT_UC_EXCEED(p) (0x181c + (((p >> 5) << 2))) +#define RTL838X_STORM_CTRL_PORT_UC(p) (0x4718 + (((p) << 2))) +#define RTL838X_STORM_CTRL_PORT_MC(p) (0x478c + (((p) << 2))) +#define RTL838X_STORM_CTRL_PORT_BC(p) (0x4800 + (((p) << 2))) +#define RTL839X_STORM_CTRL_PORT_UC_0(p) (0x185C + (((p) << 3))) +#define RTL839X_STORM_CTRL_PORT_UC_1(p) (0x1860 + (((p) << 3))) +#define RTL839X_STORM_CTRL_PORT_MC_0(p) (0x19FC + (((p) << 3))) +#define RTL839X_STORM_CTRL_PORT_MC_1(p) (0x1a00 + (((p) << 3))) +#define RTL839X_STORM_CTRL_PORT_BC_0(p) (0x1B9C + (((p) << 3))) +#define RTL839X_STORM_CTRL_PORT_BC_1(p) (0x1BA0 + (((p) << 3))) +#define RTL839X_TBL_ACCESS_CTRL_2 (0x611C) +#define RTL839X_TBL_ACCESS_DATA_2(i) (0x6120 + (((i) << 2))) +#define RTL839X_IGR_BWCTRL_PORT_CTRL_10G_0(p) (0x1618 + (((p) << 3))) +#define RTL839X_IGR_BWCTRL_PORT_CTRL_10G_1(p) (0x161C + (((p) << 3))) +#define RTL839X_IGR_BWCTRL_PORT_CTRL_0(p) (0x1640 + (((p) << 3))) +#define RTL839X_IGR_BWCTRL_PORT_CTRL_1(p) (0x1644 + (((p) << 3))) +#define RTL839X_IGR_BWCTRL_CTRL_LB_THR (0x1614) + +/* Link aggregation (Trunking) */ +#define TRUNK_DISTRIBUTION_ALGO_SPA_BIT 0x01 +#define TRUNK_DISTRIBUTION_ALGO_SMAC_BIT 0x02 +#define TRUNK_DISTRIBUTION_ALGO_DMAC_BIT 0x04 +#define TRUNK_DISTRIBUTION_ALGO_SIP_BIT 0x08 +#define TRUNK_DISTRIBUTION_ALGO_DIP_BIT 0x10 +#define TRUNK_DISTRIBUTION_ALGO_SRC_L4PORT_BIT 0x20 +#define TRUNK_DISTRIBUTION_ALGO_DST_L4PORT_BIT 0x40 +#define TRUNK_DISTRIBUTION_ALGO_MASKALL 0x7F + +#define TRUNK_DISTRIBUTION_ALGO_L2_SPA_BIT 0x01 +#define TRUNK_DISTRIBUTION_ALGO_L2_SMAC_BIT 0x02 +#define TRUNK_DISTRIBUTION_ALGO_L2_DMAC_BIT 0x04 +#define TRUNK_DISTRIBUTION_ALGO_L2_VLAN_BIT 0x08 +#define TRUNK_DISTRIBUTION_ALGO_L2_MASKALL 0xF + +#define TRUNK_DISTRIBUTION_ALGO_L3_SPA_BIT 0x01 +#define TRUNK_DISTRIBUTION_ALGO_L3_SMAC_BIT 0x02 +#define TRUNK_DISTRIBUTION_ALGO_L3_DMAC_BIT 0x04 +#define TRUNK_DISTRIBUTION_ALGO_L3_VLAN_BIT 0x08 +#define TRUNK_DISTRIBUTION_ALGO_L3_SIP_BIT 0x10 +#define TRUNK_DISTRIBUTION_ALGO_L3_DIP_BIT 0x20 +#define TRUNK_DISTRIBUTION_ALGO_L3_SRC_L4PORT_BIT 0x40 +#define TRUNK_DISTRIBUTION_ALGO_L3_DST_L4PORT_BIT 0x80 +#define TRUNK_DISTRIBUTION_ALGO_L3_PROTO_BIT 0x100 +#define TRUNK_DISTRIBUTION_ALGO_L3_FLOW_LABEL_BIT 0x200 +#define TRUNK_DISTRIBUTION_ALGO_L3_MASKALL 0x3FF + +#define RTL838X_TRK_MBR_CTR (0x3E00) +#define RTL838X_TRK_HASH_IDX_CTRL (0x3E20) +#define RTL838X_TRK_HASH_CTRL (0x3E24) + +#define RTL839X_TRK_MBR_CTR (0x2200) +#define RTL839X_TRK_HASH_IDX_CTRL (0x2280) +#define RTL839X_TRK_HASH_CTRL (0x2284) + +#define RTL930X_TRK_MBR_CTRL (0xA41C) +#define RTL930X_TRK_HASH_CTRL (0x9F80) + +#define RTL931X_TRK_MBR_CTRL (0xB8D0) +#define RTL931X_TRK_HASH_CTRL (0xBA70) + +/* Attack prevention */ +#define RTL838X_ATK_PRVNT_PORT_EN (0x5B00) +#define RTL838X_ATK_PRVNT_CTRL (0x5B04) +#define RTL838X_ATK_PRVNT_ACT (0x5B08) +#define RTL838X_ATK_PRVNT_STS (0x5B1C) + +/* 802.1X */ +#define RTL838X_RMA_BPDU_FLD_PMSK (0x4348) +#define RTL930X_RMA_BPDU_FLD_PMSK (0x9F18) +#define RTL931X_RMA_BPDU_FLD_PMSK (0x8950) +#define RTL839X_RMA_BPDU_FLD_PMSK (0x125C) + +#define RTL838X_SPCL_TRAP_CTRL (0x6980) +#define RTL838X_SPCL_TRAP_EAPOL_CTRL (0x6988) +#define RTL838X_SPCL_TRAP_ARP_CTRL (0x698C) +#define RTL838X_SPCL_TRAP_IGMP_CTRL (0x6984) +#define RTL838X_SPCL_TRAP_IPV6_CTRL (0x6994) +#define RTL838X_SPCL_TRAP_SWITCH_MAC_CTRL (0x6998) + +#define RTL839X_SPCL_TRAP_CTRL (0x1054) +#define RTL839X_SPCL_TRAP_EAPOL_CTRL (0x105C) +#define RTL839X_SPCL_TRAP_ARP_CTRL (0x1060) +#define RTL839X_SPCL_TRAP_IGMP_CTRL (0x1058) +#define RTL839X_SPCL_TRAP_IPV6_CTRL (0x1064) +#define RTL839X_SPCL_TRAP_SWITCH_MAC_CTRL (0x1068) +#define RTL839X_SPCL_TRAP_SWITCH_IPV4_ADDR_CTRL (0x106C) +#define RTL839X_SPCL_TRAP_CRC_CTRL (0x1070) +/* special port action controls */ +/* + values: + 0 = FORWARD (default) + 1 = DROP + 2 = TRAP2CPU + 3 = FLOOD IN ALL PORT + + Register encoding. + offset = CTRL + (port >> 4) << 2 + value/mask = 3 << ((port&0xF) << 1) +*/ + +typedef enum { + BPDU = 0, + PTP, + PTP_UDP, + PTP_ETH2, + LLTP, + EAPOL, + GRATARP, +} rma_ctrl_t; + +typedef enum { + FORWARD = 0, + DROP, + TRAP2CPU, + FLOODALL, + TRAP2MASTERCPU, + COPY2CPU, +} action_type_t; + +#define RTL838X_RMA_BPDU_CTRL (0x4330) +#define RTL839X_RMA_BPDU_CTRL (0x122C) +#define RTL930X_RMA_BPDU_CTRL (0x9E7C) +#define RTL931X_RMA_BPDU_CTRL (0x881C) + +#define RTL838X_RMA_PTP_CTRL (0x4338) +#define RTL839X_RMA_PTP_CTRL (0x123C) +#define RTL930X_RMA_PTP_CTRL (0x9E88) +#define RTL931X_RMA_PTP_CTRL (0x8834) + +#define RTL838X_RMA_LLTP_CTRL (0x4340) +#define RTL839X_RMA_LLTP_CTRL (0x124C) +#define RTL930X_RMA_LLTP_CTRL (0x9EFC) +#define RTL931X_RMA_LLTP_CTRL (0x8918) + +#define RTL930X_RMA_EAPOL_CTRL (0x9F08) +#define RTL931X_RMA_EAPOL_CTRL (0x8930) +#define RTL931X_TRAP_ARP_GRAT_PORT_ACT (0x8C04) + +/* QoS */ +#define RTL838X_QM_INTPRI2QID_CTRL (0x5F00) +#define RTL839X_QM_INTPRI2QID_CTRL(q) (0x1110 + (q << 2)) +#define RTL839X_QM_PORT_QNUM(p) (0x1130 + (((p / 10) << 2))) +#define RTL838X_PRI_SEL_PORT_PRI(p) (0x5FB8 + (((p / 10) << 2))) +#define RTL839X_PRI_SEL_PORT_PRI(p) (0x10A8 + (((p / 10) << 2))) +#define RTL838X_QM_PKT2CPU_INTPRI_MAP (0x5F10) +#define RTL839X_QM_PKT2CPU_INTPRI_MAP (0x1154) +#define RTL838X_PRI_SEL_CTRL (0x10E0) +#define RTL839X_PRI_SEL_CTRL (0x10E0) +#define RTL838X_PRI_SEL_TBL_CTRL(i) (0x5FD8 + (((i) << 2))) +#define RTL839X_PRI_SEL_TBL_CTRL(i) (0x10D0 + (((i) << 2))) +#define RTL838X_QM_PKT2CPU_INTPRI_0 (0x5F04) +#define RTL838X_QM_PKT2CPU_INTPRI_1 (0x5F08) +#define RTL838X_QM_PKT2CPU_INTPRI_2 (0x5F0C) +#define RTL839X_OAM_CTRL (0x2100) +#define RTL839X_OAM_PORT_ACT_CTRL(p) (0x2104 + (((p) << 2))) +#define RTL839X_RMK_PORT_DEI_TAG_CTRL(p) (0x6A9C + (((p >> 5) << 2))) +#define RTL839X_PRI_SEL_IPRI_REMAP (0x1080) +#define RTL838X_PRI_SEL_IPRI_REMAP (0x5F8C) +#define RTL839X_PRI_SEL_DEI2DP_REMAP (0x10EC) +#define RTL839X_PRI_SEL_DSCP2DP_REMAP_ADDR(i) (0x10F0 + (((i >> 4) << 2))) +#define RTL839X_RMK_DEI_CTRL (0x6AA4) +#define RTL839X_WRED_PORT_THR_CTRL(i) (0x6084 + ((i) << 2)) +#define RTL839X_WRED_QUEUE_THR_CTRL(q, i) (0x6090 + ((q) * 12) + ((i) << 2)) +#define RTL838X_PRI_DSCP_INVLD_CTRL0 (0x5FE8) +#define RTL838X_RMK_IPRI_CTRL (0xA460) +#define RTL838X_RMK_OPRI_CTRL (0xA464) +#define RTL838X_SCHED_P_TYPE_CTRL(p) (0xC04C + (((p) << 7))) +#define RTL838X_SCHED_LB_CTRL(p) (0xC004 + (((p) << 7))) +#define RTL838X_FC_P_EGR_DROP_CTRL(p) (0x6B1C + (((p) << 2))) + +/* Debug features */ +#define RTL930X_STAT_PRVTE_DROP_COUNTER0 (0xB5B8) + +/* Packet Inspection Engine */ +#define RTL838X_METER_GLB_CTRL (0x4B08) +#define RTL839X_METER_GLB_CTRL (0x1300) +#define RTL930X_METER_GLB_CTRL (0xa0a0) +#define RTL931X_METER_GLB_CTRL (0x411C) + +#define RTL839X_ACL_CTRL (0x1288) + +#define RTL838X_ACL_BLK_LOOKUP_CTRL (0x6100) +#define RTL839X_ACL_BLK_LOOKUP_CTRL (0x1280) +#define RTL930X_PIE_BLK_LOOKUP_CTRL (0xa5a0) +#define RTL931X_PIE_BLK_LOOKUP_CTRL (0x4180) + +#define RTL838X_ACL_BLK_PWR_CTRL (0x6104) +#define RTL839X_PS_ACL_PWR_CTRL (0x049c) + +#define RTL838X_ACL_BLK_TMPLTE_CTRL(block) (0x6108 + ((block) << 2)) +#define RTL839X_ACL_BLK_TMPLTE_CTRL(block) (0x128c + ((block) << 2)) +#define RTL930X_PIE_BLK_TMPLTE_CTRL(block) (0xa624 + ((block) << 2)) +#define RTL931X_PIE_BLK_TMPLTE_CTRL(block) (0x4214 + ((block) << 2)) + +#define RTL838X_ACL_BLK_GROUP_CTRL (0x615C) +#define RTL839X_ACL_BLK_GROUP_CTRL (0x12ec) + +#define RTL838X_ACL_CLR_CTRL (0x6168) +#define RTL839X_ACL_CLR_CTRL (0x12fc) +#define RTL930X_PIE_CLR_CTRL (0xa66c) +#define RTL931X_PIE_CLR_CTRL (0x42D8) + +#define RTL838X_DMY_REG27 (0x3378) + +#define RTL838X_ACL_PORT_LOOKUP_CTRL(p) (0x616C + (((p) << 2))) +#define RTL930X_ACL_PORT_LOOKUP_CTRL(p) (0xA784 + (((p) << 2))) +#define RTL931X_ACL_PORT_LOOKUP_CTRL(p) (0x44F8 + (((p) << 2))) + +#define RTL930X_PIE_BLK_PHASE_CTRL (0xA5A4) +#define RTL931X_PIE_BLK_PHASE_CTRL (0x4184) + +// PIE actions +#define PIE_ACT_COPY_TO_PORT 2 +#define PIE_ACT_REDIRECT_TO_PORT 4 +#define PIE_ACT_ROUTE_UC 6 +#define PIE_ACT_VID_ASSIGN 0 + +// L3 actions +#define L3_FORWARD 0 +#define L3_DROP 1 +#define L3_TRAP2CPU 2 +#define L3_COPY2CPU 3 +#define L3_TRAP2MASTERCPU 4 +#define L3_COPY2MASTERCPU 5 +#define L3_HARDDROP 6 + +// Route actions +#define ROUTE_ACT_FORWARD 0 +#define ROUTE_ACT_TRAP2CPU 1 +#define ROUTE_ACT_COPY2CPU 2 +#define ROUTE_ACT_DROP 3 + +/* L3 Routing */ +#define RTL839X_ROUTING_SA_CTRL 0x6afc +#define RTL930X_L3_HOST_TBL_CTRL (0xAB48) +#define RTL930X_L3_IPUC_ROUTE_CTRL (0xAB4C) +#define RTL930X_L3_IP6UC_ROUTE_CTRL (0xAB50) +#define RTL930X_L3_IPMC_ROUTE_CTRL (0xAB54) +#define RTL930X_L3_IP6MC_ROUTE_CTRL (0xAB58) +#define RTL930X_L3_IP_MTU_CTRL(i) (0xAB5C + ((i >> 1) << 2)) +#define RTL930X_L3_IP6_MTU_CTRL(i) (0xAB6C + ((i >> 1) << 2)) +#define RTL930X_L3_HW_LU_KEY_CTRL (0xAC9C) +#define RTL930X_L3_HW_LU_KEY_IP_CTRL (0xACA0) +#define RTL930X_L3_HW_LU_CTRL (0xACC0) +#define RTL930X_L3_IP_ROUTE_CTRL 0xab44 + +/* Port LED Control */ +#define RTL930X_LED_PORT_NUM_CTRL(p) (0xCC04 + (((p >> 4) << 2))) +#define RTL930X_LED_SET0_0_CTRL (0xCC28) +#define RTL930X_LED_PORT_COPR_SET_SEL_CTRL(p) (0xCC2C + (((p >> 4) << 2))) +#define RTL930X_LED_PORT_FIB_SET_SEL_CTRL(p) (0xCC34 + (((p >> 4) << 2))) +#define RTL930X_LED_PORT_COPR_MASK_CTRL (0xCC3C) +#define RTL930X_LED_PORT_FIB_MASK_CTRL (0xCC40) +#define RTL930X_LED_PORT_COMBO_MASK_CTRL (0xCC44) + +#define RTL931X_LED_PORT_NUM_CTRL(p) (0x0604 + (((p >> 4) << 2))) +#define RTL931X_LED_SET0_0_CTRL (0x0630) +#define RTL931X_LED_PORT_COPR_SET_SEL_CTRL(p) (0x0634 + (((p >> 4) << 2))) +#define RTL931X_LED_PORT_FIB_SET_SEL_CTRL(p) (0x0644 + (((p >> 4) << 2))) +#define RTL931X_LED_PORT_COPR_MASK_CTRL (0x0654) +#define RTL931X_LED_PORT_FIB_MASK_CTRL (0x065c) +#define RTL931X_LED_PORT_COMBO_MASK_CTRL (0x0664) + +#define MAX_VLANS 4096 +#define MAX_LAGS 16 +#define MAX_PRIOS 8 +#define RTL930X_PORT_IGNORE 0x3f +#define MAX_MC_GROUPS 512 +#define UNKNOWN_MC_PMASK (MAX_MC_GROUPS - 1) +#define PIE_BLOCK_SIZE 128 +#define MAX_PIE_ENTRIES (18 * PIE_BLOCK_SIZE) +#define N_FIXED_FIELDS 12 +#define N_FIXED_FIELDS_RTL931X 14 +#define MAX_COUNTERS 2048 +#define MAX_ROUTES 512 +#define MAX_HOST_ROUTES 1536 +#define MAX_INTF_MTUS 8 +#define DEFAULT_MTU 1536 +#define MAX_INTERFACES 100 +#define MAX_ROUTER_MACS 64 +#define L3_EGRESS_DMACS 2048 +#define MAX_SMACS 64 + +enum phy_type { + PHY_NONE = 0, + PHY_RTL838X_SDS = 1, + PHY_RTL8218B_INT = 2, + PHY_RTL8218B_EXT = 3, + PHY_RTL8214FC = 4, + PHY_RTL839X_SDS = 5, + PHY_RTL930X_SDS = 6, +}; + +enum pbvlan_type { + PBVLAN_TYPE_INNER = 0, + PBVLAN_TYPE_OUTER, +}; + +enum pbvlan_mode { + PBVLAN_MODE_UNTAG_AND_PRITAG = 0, + PBVLAN_MODE_UNTAG_ONLY, + PBVLAN_MODE_ALL_PKT, +}; + +struct rtl838x_port { + bool enable; + u64 pm; + u16 pvid; + bool eee_enabled; + enum phy_type phy; + bool phy_is_integrated; + bool is10G; + bool is2G5; + int sds_num; + int led_set; + const struct dsa_port *dp; +}; + +struct rtl838x_vlan_info { + u64 untagged_ports; + u64 tagged_ports; + u8 profile_id; + bool hash_mc_fid; + bool hash_uc_fid; + u8 fid; // AKA MSTI + + // The following fields are used only by the RTL931X + int if_id; // Interface (index in L3_EGR_INTF_IDX) + u16 multicast_grp_mask; + int l2_tunnel_list_id; +}; + +enum l2_entry_type { + L2_INVALID = 0, + L2_UNICAST = 1, + L2_MULTICAST = 2, + IP4_MULTICAST = 3, + IP6_MULTICAST = 4, +}; + +struct rtl838x_l2_entry { + u8 mac[6]; + u16 vid; + u16 rvid; + u8 port; + bool valid; + enum l2_entry_type type; + bool is_static; + bool is_ip_mc; + bool is_ipv6_mc; + bool block_da; + bool block_sa; + bool suspended; + bool next_hop; + int age; + u8 trunk; + bool is_trunk; + u8 stack_dev; + u16 mc_portmask_index; + u32 mc_gip; + u32 mc_sip; + u16 mc_mac_index; + u16 nh_route_id; + bool nh_vlan_target; // Only RTL83xx: VLAN used for next hop + + // The following is only valid on RTL931x + bool is_open_flow; + bool is_pe_forward; + bool is_local_forward; + bool is_remote_forward; + bool is_l2_tunnel; + int l2_tunnel_id; + int l2_tunnel_list_id; +}; + +enum fwd_rule_action { + FWD_RULE_ACTION_NONE = 0, + FWD_RULE_ACTION_FWD = 1, +}; + +enum pie_phase { + PHASE_VACL = 0, + PHASE_IACL = 1, +}; + +enum igr_filter { + IGR_FORWARD = 0, + IGR_DROP = 1, + IGR_TRAP = 2, +}; + +enum egr_filter { + EGR_DISABLE = 0, + EGR_ENABLE = 1, +}; + +/* Intermediate representation of a Packet Inspection Engine Rule + * as suggested by the Kernel's tc flower offload subsystem + * Field meaning is universal across SoC families, but data content is specific + * to SoC family (e.g. because of different port ranges) */ +struct pie_rule { + int id; + enum pie_phase phase; // Phase in which this template is applied + int packet_cntr; // ID of a packet counter assigned to this rule + int octet_cntr; // ID of a byte counter assigned to this rule + u32 last_packet_cnt; + u64 last_octet_cnt; + + // The following are requirements for the pie template + bool is_egress; + bool is_ipv6; // This is a rule with IPv6 fields + + // Fixed fields that are always matched against on RTL8380 + u8 spmmask_fix; + u8 spn; // Source port number + bool stacking_port; // Source port is stacking port + bool mgnt_vlan; // Packet arrived on management VLAN + bool dmac_hit_sw; // The packet's destination MAC matches one of the device's + bool content_too_deep; // The content of the packet cannot be parsed: too many layers + bool not_first_frag; // Not the first IP fragment + u8 frame_type_l4; // 0: UDP, 1: TCP, 2: ICMP/ICMPv6, 3: IGMP + u8 frame_type; // 0: ARP, 1: L2 only, 2: IPv4, 3: IPv6 + bool otag_fmt; // 0: outer tag packet, 1: outer priority tag or untagged + bool itag_fmt; // 0: inner tag packet, 1: inner priority tag or untagged + bool otag_exist; // packet with outer tag + bool itag_exist; // packet with inner tag + bool frame_type_l2; // 0: Ethernet, 1: LLC_SNAP, 2: LLC_Other, 3: Reserved + bool igr_normal_port; // Ingress port is not cpu or stacking port + u8 tid; // The template ID defining the what the templated fields mean + + // Masks for the fields that are always matched against on RTL8380 + u8 spmmask_fix_m; + u8 spn_m; + bool stacking_port_m; + bool mgnt_vlan_m; + bool dmac_hit_sw_m; + bool content_too_deep_m; + bool not_first_frag_m; + u8 frame_type_l4_m; + u8 frame_type_m; + bool otag_fmt_m; + bool itag_fmt_m; + bool otag_exist_m; + bool itag_exist_m; + bool frame_type_l2_m; + bool igr_normal_port_m; + u8 tid_m; + + // Logical operations between rules, special rules for rule numbers apply + bool valid; + bool cond_not; // Matches when conditions not match + bool cond_and1; // And this rule 2n with the next rule 2n+1 in same block + bool cond_and2; // And this rule m in block 2n with rule m in block 2n+1 + bool ivalid; + + // Actions to be performed + bool drop; // Drop the packet + bool fwd_sel; // Forward packet: to port, portmask, dest route, next rule, drop + bool ovid_sel; // So something to outer vlan-id: shift, re-assign + bool ivid_sel; // Do something to inner vlan-id: shift, re-assign + bool flt_sel; // Filter the packet when sending to certain ports + bool log_sel; // Log the packet in one of the LOG-table counters + bool rmk_sel; // Re-mark the packet, i.e. change the priority-tag + bool meter_sel; // Meter the packet, i.e. limit rate of this type of packet + bool tagst_sel; // Change the ergress tag + bool mir_sel; // Mirror the packet to a Link Aggregation Group + bool nopri_sel; // Change the normal priority + bool cpupri_sel; // Change the CPU priority + bool otpid_sel; // Change Outer Tag Protocol Identifier (802.1q) + bool itpid_sel; // Change Inner Tag Protocol Identifier (802.1q) + bool shaper_sel; // Apply traffic shaper + bool mpls_sel; // MPLS actions + bool bypass_sel; // Bypass actions + bool fwd_sa_lrn; // Learn the source address when forwarding + bool fwd_mod_to_cpu; // Forward the modified VLAN tag format to CPU-port + + // Fields used in predefined templates 0-2 on RTL8380 / 90 / 9300 + u64 spm; // Source Port Matrix + u16 otag; // Outer VLAN-ID + u8 smac[ETH_ALEN]; // Source MAC address + u8 dmac[ETH_ALEN]; // Destination MAC address + u16 ethertype; // Ethernet frame type field in ethernet header + u16 itag; // Inner VLAN-ID + u16 field_range_check; + u32 sip; // Source IP + struct in6_addr sip6; // IPv6 Source IP + u32 dip; // Destination IP + struct in6_addr dip6; // IPv6 Destination IP + u16 tos_proto; // IPv4: TOS + Protocol fields, IPv6: Traffic class + next header + u16 sport; // TCP/UDP source port + u16 dport; // TCP/UDP destination port + u16 icmp_igmp; + u16 tcp_info; + u16 dsap_ssap; // Destination / Source Service Access Point bytes (802.3) + + u64 spm_m; + u16 otag_m; + u8 smac_m[ETH_ALEN]; + u8 dmac_m[ETH_ALEN]; + u8 ethertype_m; + u16 itag_m; + u16 field_range_check_m; + u32 sip_m; + struct in6_addr sip6_m; // IPv6 Source IP mask + u32 dip_m; + struct in6_addr dip6_m; // IPv6 Destination IP mask + u16 tos_proto_m; + u16 sport_m; + u16 dport_m; + u16 icmp_igmp_m; + u16 tcp_info_m; + u16 dsap_ssap_m; + + // Data associated with actions + u8 fwd_act; // Type of forwarding action + // 0: permit, 1: drop, 2: copy to port id, 4: copy to portmask + // 4: redirect to portid, 5: redirect to portmask + // 6: route, 7: vlan leaky (only 8380) + u16 fwd_data; // Additional data for forwarding action, e.g. destination port + u8 ovid_act; + u16 ovid_data; // Outer VLAN ID + u8 ivid_act; + u16 ivid_data; // Inner VLAN ID + u16 flt_data; // Filtering data + u16 log_data; // ID of packet or octet counter in LOG table, on RTL93xx + // unnecessary since PIE-Rule-ID == LOG-counter-ID + bool log_octets; + u8 mpls_act; // MPLS action type + u16 mpls_lib_idx; // MPLS action data + + u16 rmk_data; // Data for remarking + u16 meter_data; // ID of meter for bandwidth control + u16 tagst_data; + u16 mir_data; + u16 nopri_data; + u16 cpupri_data; + u16 otpid_data; + u16 itpid_data; + u16 shaper_data; + + // Bypass actions, ignored on RTL8380 + bool bypass_all; // Not clear + bool bypass_igr_stp; // Bypass Ingress STP state + bool bypass_ibc_sc; // Bypass Ingress Bandwidth Control and Storm Control +}; + +struct rtl838x_l3_intf { + u16 vid; + u8 smac_idx; + u8 ip4_mtu_id; + u8 ip6_mtu_id; + u16 ip4_mtu; + u16 ip6_mtu; + u8 ttl_scope; + u8 hl_scope; + u8 ip4_icmp_redirect; + u8 ip6_icmp_redirect; + u8 ip4_pbr_icmp_redirect; + u8 ip6_pbr_icmp_redirect; +}; + +/* + * An entry in the RTL93XX SoC's ROUTER_MAC tables setting up a termination point + * for the L3 routing system. Packets arriving and matching an entry in this table + * will be considered for routing. + * Mask fields state whether the corresponding data fields matter for matching + */ +struct rtl93xx_rt_mac { + bool valid; // Valid or not + bool p_type; // Individual (0) or trunk (1) port + bool p_mask; // Whether the port type is used + u8 p_id; + u8 p_id_mask; // Mask for the port + u8 action; // Routing action performed: 0: FORWARD, 1: DROP, 2: TRAP2CPU + // 3: COPY2CPU, 4: TRAP2MASTERCPU, 5: COPY2MASTERCPU, 6: HARDDROP + u16 vid; + u16 vid_mask; + u64 mac; // MAC address used as source MAC in the routed packet + u64 mac_mask; +}; + +struct rtl83xx_nexthop { + u16 id; // ID: L3_NEXT_HOP table-index or route-index set in L2_NEXT_HOP + u32 dev_id; + u16 port; + u16 vid; // VLAN-ID for L2 table entry (saved from L2-UC entry) + u16 rvid; // Relay VID/FID for the L2 table entry + u64 mac; // The MAC address of the entry in the L2_NEXT_HOP table + u16 mac_id; + u16 l2_id; // Index of this next hop forwarding entry in L2 FIB table + u64 gw; // The gateway MAC address packets are forwarded to + int if_id; // Interface (into L3_EGR_INTF_IDX) +}; + +struct rtl838x_switch_priv; + +struct rtl83xx_flow { + unsigned long cookie; + struct rhash_head node; + struct rcu_head rcu_head; + struct rtl838x_switch_priv *priv; + struct pie_rule rule; + u32 flags; +}; + +struct rtl93xx_route_attr { + bool valid; + bool hit; + bool ttl_dec; + bool ttl_check; + bool dst_null; + bool qos_as; + u8 qos_prio; + u8 type; + u8 action; +}; + +struct rtl83xx_route { + u32 gw_ip; // IP of the route's gateway + u32 dst_ip; // IP of the destination net + struct in6_addr dst_ip6; + int prefix_len; // Network prefix len of the destination net + bool is_host_route; + int id; // ID number of this route + struct rhlist_head linkage; + u16 switch_mac_id; // Index into switch's own MACs, RTL839X only + struct rtl83xx_nexthop nh; + struct pie_rule pr; + struct rtl93xx_route_attr attr; +}; + +struct rtl838x_reg { + void (*mask_port_reg_be)(u64 clear, u64 set, int reg); + void (*set_port_reg_be)(u64 set, int reg); + u64 (*get_port_reg_be)(int reg); + void (*mask_port_reg_le)(u64 clear, u64 set, int reg); + void (*set_port_reg_le)(u64 set, int reg); + u64 (*get_port_reg_le)(int reg); + int stat_port_rst; + int stat_rst; + int stat_port_std_mib; + int (*port_iso_ctrl)(int p); + void (*traffic_enable)(int source, int dest); + void (*traffic_disable)(int source, int dest); + void (*traffic_set)(int source, u64 dest_matrix); + u64 (*traffic_get)(int source); + int l2_ctrl_0; + int l2_ctrl_1; + int smi_poll_ctrl; + u32 l2_port_aging_out; + int l2_tbl_flush_ctrl; + void (*exec_tbl0_cmd)(u32 cmd); + void (*exec_tbl1_cmd)(u32 cmd); + int (*tbl_access_data_0)(int i); + int isr_glb_src; + int isr_port_link_sts_chg; + int imr_port_link_sts_chg; + int imr_glb; + void (*vlan_tables_read)(u32 vlan, struct rtl838x_vlan_info *info); + void (*vlan_set_tagged)(u32 vlan, struct rtl838x_vlan_info *info); + void (*vlan_set_untagged)(u32 vlan, u64 portmask); + void (*vlan_profile_dump)(int index); + void (*vlan_profile_setup)(int profile); + void (*vlan_port_pvidmode_set)(int port, enum pbvlan_type type, enum pbvlan_mode mode); + void (*vlan_port_pvid_set)(int port, enum pbvlan_type type, int pvid); + void (*vlan_port_keep_tag_set)(int port, bool keep_outer, bool keep_inner); + void (*set_vlan_igr_filter)(int port, enum igr_filter state); + void (*set_vlan_egr_filter)(int port, enum egr_filter state); + void (*enable_learning)(int port, bool enable); + void (*enable_flood)(int port, bool enable); + void (*enable_mcast_flood)(int port, bool enable); + void (*enable_bcast_flood)(int port, bool enable); + void (*stp_get)(struct rtl838x_switch_priv *priv, u16 msti, u32 port_state[]); + void (*stp_set)(struct rtl838x_switch_priv *priv, u16 msti, u32 port_state[]); + int (*mac_force_mode_ctrl)(int port); + int (*mac_port_ctrl)(int port); + int (*l2_port_new_salrn)(int port); + int (*l2_port_new_sa_fwd)(int port); + int (*set_ageing_time)(unsigned long msec); + int mir_ctrl; + int mir_dpm; + int mir_spm; + int mac_link_sts; + int mac_link_dup_sts; + int (*mac_link_spd_sts)(int port); + int mac_rx_pause_sts; + int mac_tx_pause_sts; + u64 (*read_l2_entry_using_hash)(u32 hash, u32 position, struct rtl838x_l2_entry *e); + void (*write_l2_entry_using_hash)(u32 hash, u32 pos, struct rtl838x_l2_entry *e); + u64 (*read_cam)(int idx, struct rtl838x_l2_entry *e); + void (*write_cam)(int idx, struct rtl838x_l2_entry *e); + int (*trk_mbr_ctr)(int group); + int rma_bpdu_fld_pmask; + int spcl_trap_eapol_ctrl; + void (*init_eee)(struct rtl838x_switch_priv *priv, bool enable); + void (*port_eee_set)(struct rtl838x_switch_priv *priv, int port, bool enable); + int (*eee_port_ability)(struct rtl838x_switch_priv *priv, + struct ethtool_eee *e, int port); + u64 (*l2_hash_seed)(u64 mac, u32 vid); + u32 (*l2_hash_key)(struct rtl838x_switch_priv *priv, u64 seed); + u64 (*read_mcast_pmask)(int idx); + void (*write_mcast_pmask)(int idx, u64 portmask); + void (*vlan_fwd_on_inner)(int port, bool is_set); + void (*pie_init)(struct rtl838x_switch_priv *priv); + int (*pie_rule_read)(struct rtl838x_switch_priv *priv, int idx, struct pie_rule *pr); + int (*pie_rule_write)(struct rtl838x_switch_priv *priv, int idx, struct pie_rule *pr); + int (*pie_rule_add)(struct rtl838x_switch_priv *priv, struct pie_rule *rule); + void (*pie_rule_rm)(struct rtl838x_switch_priv *priv, struct pie_rule *rule); + void (*l2_learning_setup)(void); + u32 (*packet_cntr_read)(int counter); + void (*packet_cntr_clear)(int counter); + void (*route_read)(int idx, struct rtl83xx_route *rt); + void (*route_write)(int idx, struct rtl83xx_route *rt); + void (*host_route_write)(int idx, struct rtl83xx_route *rt); + int (*l3_setup)(struct rtl838x_switch_priv *priv); + void (*set_l3_nexthop)(int idx, u16 dmac_id, u16 interface); + void (*get_l3_nexthop)(int idx, u16 *dmac_id, u16 *interface); + u64 (*get_l3_egress_mac)(u32 idx); + void (*set_l3_egress_mac)(u32 idx, u64 mac); + int (*find_l3_slot)(struct rtl83xx_route *rt, bool must_exist); + int (*route_lookup_hw)(struct rtl83xx_route *rt); + void (*get_l3_router_mac)(u32 idx, struct rtl93xx_rt_mac *m); + void (*set_l3_router_mac)(u32 idx, struct rtl93xx_rt_mac *m); + void (*set_l3_egress_intf)(int idx, struct rtl838x_l3_intf *intf); + void (*set_distribution_algorithm)(int group, int algoidx, u32 algomask); + void (*set_receive_management_action)(int port, rma_ctrl_t type, action_type_t action); + void (*led_init)(struct rtl838x_switch_priv *priv); +}; + +struct rtl838x_switch_priv { + /* Switch operation */ + struct dsa_switch *ds; + struct device *dev; + u16 id; + u16 family_id; + char version; + struct rtl838x_port ports[57]; + struct mutex reg_mutex; // Mutex for individual register manipulations + struct mutex pie_mutex; // Mutex for Packet Inspection Engine + int link_state_irq; + int mirror_group_ports[4]; + struct mii_bus *mii_bus; + const struct rtl838x_reg *r; + u8 cpu_port; + u8 port_mask; + u8 port_width; + u8 port_ignore; + u64 irq_mask; + u32 fib_entries; + int l2_bucket_size; + struct dentry *dbgfs_dir; + int n_lags; + u64 lags_port_members[MAX_LAGS]; + struct net_device *lag_devs[MAX_LAGS]; + u32 lag_primary[MAX_LAGS]; + u32 is_lagmember[57]; + u64 lagmembers; + struct notifier_block nb; // TODO: change to different name + struct notifier_block ne_nb; + struct notifier_block fib_nb; + bool eee_enabled; + unsigned long int mc_group_bm[MAX_MC_GROUPS >> 5]; + int mc_group_saves[MAX_MC_GROUPS]; + int n_pie_blocks; + struct rhashtable tc_ht; + unsigned long int pie_use_bm[MAX_PIE_ENTRIES >> 5]; + int n_counters; + unsigned long int octet_cntr_use_bm[MAX_COUNTERS >> 5]; + unsigned long int packet_cntr_use_bm[MAX_COUNTERS >> 4]; + struct rhltable routes; + unsigned long int route_use_bm[MAX_ROUTES >> 5]; + unsigned long int host_route_use_bm[MAX_HOST_ROUTES >> 5]; + struct rtl838x_l3_intf *interfaces[MAX_INTERFACES]; + u16 intf_mtus[MAX_INTF_MTUS]; + int intf_mtu_count[MAX_INTF_MTUS]; +}; + +void rtl838x_dbgfs_init(struct rtl838x_switch_priv *priv); +void rtl930x_dbgfs_init(struct rtl838x_switch_priv *priv); + +#endif /* _RTL838X_H */ diff --git a/target/linux/realtek/files-5.15/drivers/net/dsa/rtl83xx/rtl839x.c b/target/linux/realtek/files-5.15/drivers/net/dsa/rtl83xx/rtl839x.c new file mode 100644 index 0000000000..986a4b5f45 --- /dev/null +++ b/target/linux/realtek/files-5.15/drivers/net/dsa/rtl83xx/rtl839x.c @@ -0,0 +1,1937 @@ +// SPDX-License-Identifier: GPL-2.0-only + +#include +#include "rtl83xx.h" + +#define RTL839X_VLAN_PORT_TAG_STS_UNTAG 0x0 +#define RTL839X_VLAN_PORT_TAG_STS_TAGGED 0x1 +#define RTL839X_VLAN_PORT_TAG_STS_PRIORITY_TAGGED 0x2 + +#define RTL839X_VLAN_PORT_TAG_STS_CTRL_BASE 0x6828 +/* port 0-52 */ +#define RTL839X_VLAN_PORT_TAG_STS_CTRL(port) \ + RTL839X_VLAN_PORT_TAG_STS_CTRL_BASE + (port << 2) +#define RTL839X_VLAN_PORT_TAG_STS_CTRL_OTAG_STS_MASK GENMASK(7,6) +#define RTL839X_VLAN_PORT_TAG_STS_CTRL_ITAG_STS_MASK GENMASK(5,4) +#define RTL839X_VLAN_PORT_TAG_STS_CTRL_EGR_P_OTAG_KEEP_MASK GENMASK(3,3) +#define RTL839X_VLAN_PORT_TAG_STS_CTRL_EGR_P_ITAG_KEEP_MASK GENMASK(2,2) +#define RTL839X_VLAN_PORT_TAG_STS_CTRL_IGR_P_OTAG_KEEP_MASK GENMASK(1,1) +#define RTL839X_VLAN_PORT_TAG_STS_CTRL_IGR_P_ITAG_KEEP_MASK GENMASK(0,0) + +extern struct mutex smi_lock; +extern struct rtl83xx_soc_info soc_info; + +/* Definition of the RTL839X-specific template field IDs as used in the PIE */ +enum template_field_id { + TEMPLATE_FIELD_SPMMASK = 0, + TEMPLATE_FIELD_SPM0 = 1, // Source portmask ports 0-15 + TEMPLATE_FIELD_SPM1 = 2, // Source portmask ports 16-31 + TEMPLATE_FIELD_SPM2 = 3, // Source portmask ports 32-47 + TEMPLATE_FIELD_SPM3 = 4, // Source portmask ports 48-56 + TEMPLATE_FIELD_DMAC0 = 5, // Destination MAC [15:0] + TEMPLATE_FIELD_DMAC1 = 6, // Destination MAC [31:16] + TEMPLATE_FIELD_DMAC2 = 7, // Destination MAC [47:32] + TEMPLATE_FIELD_SMAC0 = 8, // Source MAC [15:0] + TEMPLATE_FIELD_SMAC1 = 9, // Source MAC [31:16] + TEMPLATE_FIELD_SMAC2 = 10, // Source MAC [47:32] + TEMPLATE_FIELD_ETHERTYPE = 11, // Ethernet frame type field + // Field-ID 12 is not used + TEMPLATE_FIELD_OTAG = 13, + TEMPLATE_FIELD_ITAG = 14, + TEMPLATE_FIELD_SIP0 = 15, + TEMPLATE_FIELD_SIP1 = 16, + TEMPLATE_FIELD_DIP0 = 17, + TEMPLATE_FIELD_DIP1 = 18, + TEMPLATE_FIELD_IP_TOS_PROTO = 19, + TEMPLATE_FIELD_IP_FLAG = 20, + TEMPLATE_FIELD_L4_SPORT = 21, + TEMPLATE_FIELD_L4_DPORT = 22, + TEMPLATE_FIELD_L34_HEADER = 23, + TEMPLATE_FIELD_ICMP_IGMP = 24, + TEMPLATE_FIELD_VID_RANG0 = 25, + TEMPLATE_FIELD_VID_RANG1 = 26, + TEMPLATE_FIELD_L4_PORT_RANG = 27, + TEMPLATE_FIELD_FIELD_SELECTOR_VALID = 28, + TEMPLATE_FIELD_FIELD_SELECTOR_0 = 29, + TEMPLATE_FIELD_FIELD_SELECTOR_1 = 30, + TEMPLATE_FIELD_FIELD_SELECTOR_2 = 31, + TEMPLATE_FIELD_FIELD_SELECTOR_3 = 32, + TEMPLATE_FIELD_FIELD_SELECTOR_4 = 33, + TEMPLATE_FIELD_FIELD_SELECTOR_5 = 34, + TEMPLATE_FIELD_SIP2 = 35, + TEMPLATE_FIELD_SIP3 = 36, + TEMPLATE_FIELD_SIP4 = 37, + TEMPLATE_FIELD_SIP5 = 38, + TEMPLATE_FIELD_SIP6 = 39, + TEMPLATE_FIELD_SIP7 = 40, + TEMPLATE_FIELD_OLABEL = 41, + TEMPLATE_FIELD_ILABEL = 42, + TEMPLATE_FIELD_OILABEL = 43, + TEMPLATE_FIELD_DPMMASK = 44, + TEMPLATE_FIELD_DPM0 = 45, + TEMPLATE_FIELD_DPM1 = 46, + TEMPLATE_FIELD_DPM2 = 47, + TEMPLATE_FIELD_DPM3 = 48, + TEMPLATE_FIELD_L2DPM0 = 49, + TEMPLATE_FIELD_L2DPM1 = 50, + TEMPLATE_FIELD_L2DPM2 = 51, + TEMPLATE_FIELD_L2DPM3 = 52, + TEMPLATE_FIELD_IVLAN = 53, + TEMPLATE_FIELD_OVLAN = 54, + TEMPLATE_FIELD_FWD_VID = 55, + TEMPLATE_FIELD_DIP2 = 56, + TEMPLATE_FIELD_DIP3 = 57, + TEMPLATE_FIELD_DIP4 = 58, + TEMPLATE_FIELD_DIP5 = 59, + TEMPLATE_FIELD_DIP6 = 60, + TEMPLATE_FIELD_DIP7 = 61, +}; + +// Number of fixed templates predefined in the SoC +#define N_FIXED_TEMPLATES 5 +static enum template_field_id fixed_templates[N_FIXED_TEMPLATES][N_FIXED_FIELDS] = +{ + { + TEMPLATE_FIELD_SPM0, TEMPLATE_FIELD_SPM1, TEMPLATE_FIELD_ITAG, + TEMPLATE_FIELD_SMAC0, TEMPLATE_FIELD_SMAC1, TEMPLATE_FIELD_SMAC2, + TEMPLATE_FIELD_DMAC0, TEMPLATE_FIELD_DMAC1, TEMPLATE_FIELD_DMAC2, + TEMPLATE_FIELD_ETHERTYPE, TEMPLATE_FIELD_SPM2, TEMPLATE_FIELD_SPM3 + }, { + TEMPLATE_FIELD_SIP0, TEMPLATE_FIELD_SIP1, TEMPLATE_FIELD_DIP0, + TEMPLATE_FIELD_DIP1,TEMPLATE_FIELD_IP_TOS_PROTO, TEMPLATE_FIELD_L4_SPORT, + TEMPLATE_FIELD_L4_DPORT, TEMPLATE_FIELD_ICMP_IGMP, TEMPLATE_FIELD_SPM0, + TEMPLATE_FIELD_SPM1, TEMPLATE_FIELD_SPM2, TEMPLATE_FIELD_SPM3 + }, { + TEMPLATE_FIELD_DMAC0, TEMPLATE_FIELD_DMAC1, TEMPLATE_FIELD_DMAC2, + TEMPLATE_FIELD_ITAG, TEMPLATE_FIELD_ETHERTYPE, TEMPLATE_FIELD_IP_TOS_PROTO, + TEMPLATE_FIELD_L4_DPORT, TEMPLATE_FIELD_L4_SPORT, TEMPLATE_FIELD_SIP0, + TEMPLATE_FIELD_SIP1, TEMPLATE_FIELD_DIP0, TEMPLATE_FIELD_DIP1 + }, { + TEMPLATE_FIELD_DIP0, TEMPLATE_FIELD_DIP1, TEMPLATE_FIELD_DIP2, + TEMPLATE_FIELD_DIP3, TEMPLATE_FIELD_DIP4, TEMPLATE_FIELD_DIP5, + TEMPLATE_FIELD_DIP6, TEMPLATE_FIELD_DIP7, TEMPLATE_FIELD_L4_DPORT, + TEMPLATE_FIELD_L4_SPORT, TEMPLATE_FIELD_ICMP_IGMP, TEMPLATE_FIELD_IP_TOS_PROTO + }, { + TEMPLATE_FIELD_SIP0, TEMPLATE_FIELD_SIP1, TEMPLATE_FIELD_SIP2, + TEMPLATE_FIELD_SIP3, TEMPLATE_FIELD_SIP4, TEMPLATE_FIELD_SIP5, + TEMPLATE_FIELD_SIP6, TEMPLATE_FIELD_SIP7, TEMPLATE_FIELD_SPM0, + TEMPLATE_FIELD_SPM1, TEMPLATE_FIELD_SPM2, TEMPLATE_FIELD_SPM3 + }, +}; + +void rtl839x_print_matrix(void) +{ + volatile u64 *ptr9; + int i; + + ptr9 = RTL838X_SW_BASE + RTL839X_PORT_ISO_CTRL(0); + for (i = 0; i < 52; i += 4) + pr_debug("> %16llx %16llx %16llx %16llx\n", + ptr9[i + 0], ptr9[i + 1], ptr9[i + 2], ptr9[i + 3]); + pr_debug("CPU_PORT> %16llx\n", ptr9[52]); +} + +static inline int rtl839x_port_iso_ctrl(int p) +{ + return RTL839X_PORT_ISO_CTRL(p); +} + +static inline void rtl839x_exec_tbl0_cmd(u32 cmd) +{ + sw_w32(cmd, RTL839X_TBL_ACCESS_CTRL_0); + do { } while (sw_r32(RTL839X_TBL_ACCESS_CTRL_0) & BIT(16)); +} + +static inline void rtl839x_exec_tbl1_cmd(u32 cmd) +{ + sw_w32(cmd, RTL839X_TBL_ACCESS_CTRL_1); + do { } while (sw_r32(RTL839X_TBL_ACCESS_CTRL_1) & BIT(16)); +} + +inline void rtl839x_exec_tbl2_cmd(u32 cmd) +{ + sw_w32(cmd, RTL839X_TBL_ACCESS_CTRL_2); + do { } while (sw_r32(RTL839X_TBL_ACCESS_CTRL_2) & (1 << 9)); +} + +static inline int rtl839x_tbl_access_data_0(int i) +{ + return RTL839X_TBL_ACCESS_DATA_0(i); +} + +static void rtl839x_vlan_tables_read(u32 vlan, struct rtl838x_vlan_info *info) +{ + u32 u, v, w; + // Read VLAN table (0) via register 0 + struct table_reg *r = rtl_table_get(RTL8390_TBL_0, 0); + + rtl_table_read(r, vlan); + u = sw_r32(rtl_table_data(r, 0)); + v = sw_r32(rtl_table_data(r, 1)); + w = sw_r32(rtl_table_data(r, 2)); + rtl_table_release(r); + + info->tagged_ports = u; + info->tagged_ports = (info->tagged_ports << 21) | ((v >> 11) & 0x1fffff); + info->profile_id = w >> 30 | ((v & 1) << 2); + info->hash_mc_fid = !!(w & BIT(2)); + info->hash_uc_fid = !!(w & BIT(3)); + info->fid = (v >> 3) & 0xff; + + // Read UNTAG table (0) via table register 1 + r = rtl_table_get(RTL8390_TBL_1, 0); + rtl_table_read(r, vlan); + u = sw_r32(rtl_table_data(r, 0)); + v = sw_r32(rtl_table_data(r, 1)); + rtl_table_release(r); + + info->untagged_ports = u; + info->untagged_ports = (info->untagged_ports << 21) | ((v >> 11) & 0x1fffff); +} + +static void rtl839x_vlan_set_tagged(u32 vlan, struct rtl838x_vlan_info *info) +{ + u32 u, v, w; + // Access VLAN table (0) via register 0 + struct table_reg *r = rtl_table_get(RTL8390_TBL_0, 0); + + u = info->tagged_ports >> 21; + v = info->tagged_ports << 11; + v |= ((u32)info->fid) << 3; + v |= info->hash_uc_fid ? BIT(2) : 0; + v |= info->hash_mc_fid ? BIT(1) : 0; + v |= (info->profile_id & 0x4) ? 1 : 0; + w = ((u32)(info->profile_id & 3)) << 30; + + sw_w32(u, rtl_table_data(r, 0)); + sw_w32(v, rtl_table_data(r, 1)); + sw_w32(w, rtl_table_data(r, 2)); + + rtl_table_write(r, vlan); + rtl_table_release(r); +} + +static void rtl839x_vlan_set_untagged(u32 vlan, u64 portmask) +{ + u32 u, v; + + // Access UNTAG table (0) via table register 1 + struct table_reg *r = rtl_table_get(RTL8390_TBL_1, 0); + + u = portmask >> 21; + v = portmask << 11; + + sw_w32(u, rtl_table_data(r, 0)); + sw_w32(v, rtl_table_data(r, 1)); + rtl_table_write(r, vlan); + + rtl_table_release(r); +} + +/* Sets the L2 forwarding to be based on either the inner VLAN tag or the outer + */ +static void rtl839x_vlan_fwd_on_inner(int port, bool is_set) +{ + if (is_set) + rtl839x_mask_port_reg_be(BIT_ULL(port), 0ULL, RTL839X_VLAN_PORT_FWD); + else + rtl839x_mask_port_reg_be(0ULL, BIT_ULL(port), RTL839X_VLAN_PORT_FWD); +} + +/* + * Hash seed is vid (actually rvid) concatenated with the MAC address + */ +static u64 rtl839x_l2_hash_seed(u64 mac, u32 vid) +{ + u64 v = vid; + + v <<= 48; + v |= mac; + + return v; +} + +/* + * Applies the same hash algorithm as the one used currently by the ASIC to the seed + * and returns a key into the L2 hash table + */ +static u32 rtl839x_l2_hash_key(struct rtl838x_switch_priv *priv, u64 seed) +{ + u32 h1, h2, h; + + if (sw_r32(priv->r->l2_ctrl_0) & 1) { + h1 = (u32) (((seed >> 60) & 0x3f) ^ ((seed >> 54) & 0x3f) + ^ ((seed >> 36) & 0x3f) ^ ((seed >> 30) & 0x3f) + ^ ((seed >> 12) & 0x3f) ^ ((seed >> 6) & 0x3f)); + h2 = (u32) (((seed >> 48) & 0x3f) ^ ((seed >> 42) & 0x3f) + ^ ((seed >> 24) & 0x3f) ^ ((seed >> 18) & 0x3f) + ^ (seed & 0x3f)); + h = (h1 << 6) | h2; + } else { + h = (seed >> 60) + ^ ((((seed >> 48) & 0x3f) << 6) | ((seed >> 54) & 0x3f)) + ^ ((seed >> 36) & 0xfff) ^ ((seed >> 24) & 0xfff) + ^ ((seed >> 12) & 0xfff) ^ (seed & 0xfff); + } + + return h; +} + +static inline int rtl839x_mac_force_mode_ctrl(int p) +{ + return RTL839X_MAC_FORCE_MODE_CTRL + (p << 2); +} + +static inline int rtl839x_mac_port_ctrl(int p) +{ + return RTL839X_MAC_PORT_CTRL(p); +} + +static inline int rtl839x_l2_port_new_salrn(int p) +{ + return RTL839X_L2_PORT_NEW_SALRN(p); +} + +static inline int rtl839x_l2_port_new_sa_fwd(int p) +{ + return RTL839X_L2_PORT_NEW_SA_FWD(p); +} + +static inline int rtl839x_mac_link_spd_sts(int p) +{ + return RTL839X_MAC_LINK_SPD_STS(p); +} + +static inline int rtl839x_trk_mbr_ctr(int group) +{ + return RTL839X_TRK_MBR_CTR + (group << 3); +} + +static void rtl839x_fill_l2_entry(u32 r[], struct rtl838x_l2_entry *e) +{ + /* Table contains different entry types, we need to identify the right one: + * Check for MC entries, first + */ + e->is_ip_mc = !!(r[2] & BIT(31)); + e->is_ipv6_mc = !!(r[2] & BIT(30)); + e->type = L2_INVALID; + if (!e->is_ip_mc && !e->is_ipv6_mc) { + e->mac[0] = (r[0] >> 12); + e->mac[1] = (r[0] >> 4); + e->mac[2] = ((r[1] >> 28) | (r[0] << 4)); + e->mac[3] = (r[1] >> 20); + e->mac[4] = (r[1] >> 12); + e->mac[5] = (r[1] >> 4); + + e->vid = (r[2] >> 4) & 0xfff; + e->rvid = (r[0] >> 20) & 0xfff; + + /* Is it a unicast entry? check multicast bit */ + if (!(e->mac[0] & 1)) { + e->is_static = !!((r[2] >> 18) & 1); + e->port = (r[2] >> 24) & 0x3f; + e->block_da = !!(r[2] & (1 << 19)); + e->block_sa = !!(r[2] & (1 << 20)); + e->suspended = !!(r[2] & (1 << 17)); + e->next_hop = !!(r[2] & (1 << 16)); + if (e->next_hop) { + pr_debug("Found next hop entry, need to read data\n"); + e->nh_vlan_target = !!(r[2] & BIT(15)); + e->nh_route_id = (r[2] >> 4) & 0x1ff; + e->vid = e->rvid; + } + e->age = (r[2] >> 21) & 3; + e->valid = true; + if (!(r[2] & 0xc0fd0000)) /* Check for valid entry */ + e->valid = false; + else + e->type = L2_UNICAST; + } else { + e->valid = true; + e->type = L2_MULTICAST; + e->mc_portmask_index = (r[2] >> 6) & 0xfff; + e->vid = e->rvid; + } + } else { // IPv4 and IPv6 multicast + e->vid = e->rvid = (r[0] << 20) & 0xfff; + e->mc_gip = r[1]; + e->mc_portmask_index = (r[2] >> 6) & 0xfff; + } + if (e->is_ip_mc) { + e->valid = true; + e->type = IP4_MULTICAST; + } + if (e->is_ipv6_mc) { + e->valid = true; + e->type = IP6_MULTICAST; + } + // pr_info("%s: vid %d, rvid: %d\n", __func__, e->vid, e->rvid); +} + +/* + * Fills the 3 SoC table registers r[] with the information in the rtl838x_l2_entry + */ +static void rtl839x_fill_l2_row(u32 r[], struct rtl838x_l2_entry *e) +{ + if (!e->valid) { + r[0] = r[1] = r[2] = 0; + return; + } + + r[2] = e->is_ip_mc ? BIT(31) : 0; + r[2] |= e->is_ipv6_mc ? BIT(30) : 0; + + if (!e->is_ip_mc && !e->is_ipv6_mc) { + r[0] = ((u32)e->mac[0]) << 12; + r[0] |= ((u32)e->mac[1]) << 4; + r[0] |= ((u32)e->mac[2]) >> 4; + r[1] = ((u32)e->mac[2]) << 28; + r[1] |= ((u32)e->mac[3]) << 20; + r[1] |= ((u32)e->mac[4]) << 12; + r[1] |= ((u32)e->mac[5]) << 4; + + if (!(e->mac[0] & 1)) { // Not multicast + r[2] |= e->is_static ? BIT(18) : 0; + r[0] |= ((u32)e->rvid) << 20; + r[2] |= e->port << 24; + r[2] |= e->block_da ? BIT(19) : 0; + r[2] |= e->block_sa ? BIT(20) : 0; + r[2] |= e->suspended ? BIT(17) : 0; + r[2] |= ((u32)e->age) << 21; + if (e->next_hop) { + r[2] |= BIT(16); + r[2] |= e->nh_vlan_target ? BIT(15) : 0; + r[2] |= (e->nh_route_id & 0x7ff) << 4; + } else { + r[2] |= e->vid << 4; + } + pr_debug("Write L2 NH: %08x %08x %08x\n", r[0], r[1], r[2]); + } else { // L2 Multicast + r[0] |= ((u32)e->rvid) << 20; + r[2] |= ((u32)e->mc_portmask_index) << 6; + } + } else { // IPv4 or IPv6 MC entry + r[0] = ((u32)e->rvid) << 20; + r[1] = e->mc_gip; + r[2] |= ((u32)e->mc_portmask_index) << 6; + } +} + +/* + * Read an L2 UC or MC entry out of a hash bucket of the L2 forwarding table + * hash is the id of the bucket and pos is the position of the entry in that bucket + * The data read from the SoC is filled into rtl838x_l2_entry + */ +static u64 rtl839x_read_l2_entry_using_hash(u32 hash, u32 pos, struct rtl838x_l2_entry *e) +{ + u32 r[3]; + struct table_reg *q = rtl_table_get(RTL8390_TBL_L2, 0); + u32 idx = (0 << 14) | (hash << 2) | pos; // Search SRAM, with hash and at pos in bucket + int i; + + rtl_table_read(q, idx); + for (i= 0; i < 3; i++) + r[i] = sw_r32(rtl_table_data(q, i)); + + rtl_table_release(q); + + rtl839x_fill_l2_entry(r, e); + if (!e->valid) + return 0; + + return rtl839x_l2_hash_seed(ether_addr_to_u64(&e->mac[0]), e->rvid); +} + +static void rtl839x_write_l2_entry_using_hash(u32 hash, u32 pos, struct rtl838x_l2_entry *e) +{ + u32 r[3]; + struct table_reg *q = rtl_table_get(RTL8390_TBL_L2, 0); + int i; + + u32 idx = (0 << 14) | (hash << 2) | pos; // Access SRAM, with hash and at pos in bucket + + rtl839x_fill_l2_row(r, e); + + for (i= 0; i < 3; i++) + sw_w32(r[i], rtl_table_data(q, i)); + + rtl_table_write(q, idx); + rtl_table_release(q); +} + +static u64 rtl839x_read_cam(int idx, struct rtl838x_l2_entry *e) +{ + u32 r[3]; + struct table_reg *q = rtl_table_get(RTL8390_TBL_L2, 1); // Access L2 Table 1 + int i; + + rtl_table_read(q, idx); + for (i= 0; i < 3; i++) + r[i] = sw_r32(rtl_table_data(q, i)); + + rtl_table_release(q); + + rtl839x_fill_l2_entry(r, e); + if (!e->valid) + return 0; + + pr_debug("Found in CAM: R1 %x R2 %x R3 %x\n", r[0], r[1], r[2]); + + // Return MAC with concatenated VID ac concatenated ID + return rtl839x_l2_hash_seed(ether_addr_to_u64(&e->mac[0]), e->rvid); +} + +static void rtl839x_write_cam(int idx, struct rtl838x_l2_entry *e) +{ + u32 r[3]; + struct table_reg *q = rtl_table_get(RTL8390_TBL_L2, 1); // Access L2 Table 1 + int i; + + rtl839x_fill_l2_row(r, e); + + for (i= 0; i < 3; i++) + sw_w32(r[i], rtl_table_data(q, i)); + + rtl_table_write(q, idx); + rtl_table_release(q); +} + +static u64 rtl839x_read_mcast_pmask(int idx) +{ + u64 portmask; + // Read MC_PMSK (2) via register RTL8390_TBL_L2 + struct table_reg *q = rtl_table_get(RTL8390_TBL_L2, 2); + + rtl_table_read(q, idx); + portmask = sw_r32(rtl_table_data(q, 0)); + portmask <<= 32; + portmask |= sw_r32(rtl_table_data(q, 1)); + portmask >>= 11; // LSB is bit 11 in data registers + rtl_table_release(q); + + return portmask; +} + +static void rtl839x_write_mcast_pmask(int idx, u64 portmask) +{ + // Access MC_PMSK (2) via register RTL8380_TBL_L2 + struct table_reg *q = rtl_table_get(RTL8390_TBL_L2, 2); + + portmask <<= 11; // LSB is bit 11 in data registers + sw_w32((u32)(portmask >> 32), rtl_table_data(q, 0)); + sw_w32((u32)((portmask & 0xfffff800)), rtl_table_data(q, 1)); + rtl_table_write(q, idx); + rtl_table_release(q); +} + +static void rtl839x_vlan_profile_setup(int profile) +{ + u32 p[2]; + u32 pmask_id = UNKNOWN_MC_PMASK; + + p[0] = pmask_id; // Use portmaks 0xfff for unknown IPv6 MC flooding + // Enable L2 Learning BIT 0, portmask UNKNOWN_MC_PMASK for IP/L2-MC traffic flooding + p[1] = 1 | pmask_id << 1 | pmask_id << 13; + + sw_w32(p[0], RTL839X_VLAN_PROFILE(profile)); + sw_w32(p[1], RTL839X_VLAN_PROFILE(profile) + 4); + + rtl839x_write_mcast_pmask(UNKNOWN_MC_PMASK, 0x001fffffffffffff); +} + +u64 rtl839x_traffic_get(int source) +{ + return rtl839x_get_port_reg_be(rtl839x_port_iso_ctrl(source)); +} + +void rtl839x_traffic_set(int source, u64 dest_matrix) +{ + rtl839x_set_port_reg_be(dest_matrix, rtl839x_port_iso_ctrl(source)); +} + +void rtl839x_traffic_enable(int source, int dest) +{ + rtl839x_mask_port_reg_be(0, BIT_ULL(dest), rtl839x_port_iso_ctrl(source)); +} + +void rtl839x_traffic_disable(int source, int dest) +{ + rtl839x_mask_port_reg_be(BIT_ULL(dest), 0, rtl839x_port_iso_ctrl(source)); +} + +static void rtl839x_l2_learning_setup(void) +{ + /* Set portmask for broadcast (offset bit 12) and unknown unicast (offset 0) + * address flooding to the reserved entry in the portmask table used + * also for multicast flooding */ + sw_w32(UNKNOWN_MC_PMASK << 12 | UNKNOWN_MC_PMASK, RTL839X_L2_FLD_PMSK); + + // Limit learning to maximum: 32k entries, after that just flood (bits 0-1) + sw_w32((0x7fff << 2) | 0, RTL839X_L2_LRN_CONSTRT); + + // Do not trap ARP packets to CPU_PORT + sw_w32(0, RTL839X_SPCL_TRAP_ARP_CTRL); +} + +static void rtl839x_enable_learning(int port, bool enable) +{ + // Limit learning to maximum: 32k entries + + sw_w32_mask(0x7fff << 2, enable ? (0x7fff << 2) : 0, + RTL839X_L2_PORT_LRN_CONSTRT + (port << 2)); +} + +static void rtl839x_enable_flood(int port, bool enable) +{ + /* + * 0: Forward + * 1: Disable + * 2: to CPU + * 3: Copy to CPU + */ + sw_w32_mask(0x3, enable ? 0 : 1, + RTL839X_L2_PORT_LRN_CONSTRT + (port << 2)); +} + +static void rtl839x_enable_mcast_flood(int port, bool enable) +{ + +} + +static void rtl839x_enable_bcast_flood(int port, bool enable) +{ + +} +irqreturn_t rtl839x_switch_irq(int irq, void *dev_id) +{ + struct dsa_switch *ds = dev_id; + u32 status = sw_r32(RTL839X_ISR_GLB_SRC); + u64 ports = rtl839x_get_port_reg_le(RTL839X_ISR_PORT_LINK_STS_CHG); + u64 link; + int i; + + /* Clear status */ + rtl839x_set_port_reg_le(ports, RTL839X_ISR_PORT_LINK_STS_CHG); + pr_debug("RTL8390 Link change: status: %x, ports %llx\n", status, ports); + + for (i = 0; i < RTL839X_CPU_PORT; i++) { + if (ports & BIT_ULL(i)) { + link = rtl839x_get_port_reg_le(RTL839X_MAC_LINK_STS); + if (link & BIT_ULL(i)) + dsa_port_phylink_mac_change(ds, i, true); + else + dsa_port_phylink_mac_change(ds, i, false); + } + } + return IRQ_HANDLED; +} + +// TODO: unused +int rtl8390_sds_power(int mac, int val) +{ + u32 offset = (mac == 48) ? 0x0 : 0x100; + u32 mode = val ? 0 : 1; + + pr_debug("In %s: mac %d, set %d\n", __func__, mac, val); + + if ((mac != 48) && (mac != 49)) { + pr_err("%s: not an SFP port: %d\n", __func__, mac); + return -1; + } + + // Set bit 1003. 1000 starts at 7c + sw_w32_mask(BIT(11), mode << 11, RTL839X_SDS12_13_PWR0 + offset); + + return 0; +} + +static int rtl839x_smi_wait_op(int timeout) +{ + int ret = 0; + u32 val; + + ret = readx_poll_timeout(sw_r32, RTL839X_PHYREG_ACCESS_CTRL, + val, !(val & 0x1), 20, timeout); + if (ret) + pr_err("%s: timeout\n", __func__); + + return ret; +} + +int rtl839x_read_phy(u32 port, u32 page, u32 reg, u32 *val) +{ + u32 v; + int err = 0; + + if (port > 63 || page > 4095 || reg > 31) + return -ENOTSUPP; + + // Take bug on RTL839x Rev <= C into account + if (port >= RTL839X_CPU_PORT) + return -EIO; + + mutex_lock(&smi_lock); + + sw_w32_mask(0xffff0000, port << 16, RTL839X_PHYREG_DATA_CTRL); + v = reg << 5 | page << 10 | ((page == 0x1fff) ? 0x1f : 0) << 23; + sw_w32(v, RTL839X_PHYREG_ACCESS_CTRL); + + sw_w32(0x1ff, RTL839X_PHYREG_CTRL); + + v |= 1; + sw_w32(v, RTL839X_PHYREG_ACCESS_CTRL); + + err = rtl839x_smi_wait_op(100000); + if (err) + goto errout; + + *val = sw_r32(RTL839X_PHYREG_DATA_CTRL) & 0xffff; + +errout: + mutex_unlock(&smi_lock); + return err; +} + +int rtl839x_write_phy(u32 port, u32 page, u32 reg, u32 val) +{ + u32 v; + int err = 0; + + val &= 0xffff; + if (port > 63 || page > 4095 || reg > 31) + return -ENOTSUPP; + + // Take bug on RTL839x Rev <= C into account + if (port >= RTL839X_CPU_PORT) + return -EIO; + + mutex_lock(&smi_lock); + + // Set PHY to access + rtl839x_set_port_reg_le(BIT_ULL(port), RTL839X_PHYREG_PORT_CTRL); + + sw_w32_mask(0xffff0000, val << 16, RTL839X_PHYREG_DATA_CTRL); + + v = reg << 5 | page << 10 | ((page == 0x1fff) ? 0x1f : 0) << 23; + sw_w32(v, RTL839X_PHYREG_ACCESS_CTRL); + + sw_w32(0x1ff, RTL839X_PHYREG_CTRL); + + v |= BIT(3) | 1; /* Write operation and execute */ + sw_w32(v, RTL839X_PHYREG_ACCESS_CTRL); + + err = rtl839x_smi_wait_op(100000); + if (err) + goto errout; + + if (sw_r32(RTL839X_PHYREG_ACCESS_CTRL) & 0x2) + err = -EIO; + +errout: + mutex_unlock(&smi_lock); + return err; +} + +/* + * Read an mmd register of the PHY + */ +int rtl839x_read_mmd_phy(u32 port, u32 devnum, u32 regnum, u32 *val) +{ + int err = 0; + u32 v; + + // Take bug on RTL839x Rev <= C into account + if (port >= RTL839X_CPU_PORT) + return -EIO; + + mutex_lock(&smi_lock); + + // Set PHY to access + sw_w32_mask(0xffff << 16, port << 16, RTL839X_PHYREG_DATA_CTRL); + + // Set MMD device number and register to write to + sw_w32(devnum << 16 | (regnum & 0xffff), RTL839X_PHYREG_MMD_CTRL); + + v = BIT(2) | BIT(0); // MMD-access | EXEC + sw_w32(v, RTL839X_PHYREG_ACCESS_CTRL); + + err = rtl839x_smi_wait_op(100000); + if (err) + goto errout; + + // There is no error-checking via BIT 1 of v, as it does not seem to be set correctly + *val = (sw_r32(RTL839X_PHYREG_DATA_CTRL) & 0xffff); + pr_debug("%s: port %d, regnum: %x, val: %x (err %d)\n", __func__, port, regnum, *val, err); + +errout: + mutex_unlock(&smi_lock); + return err; +} + +/* + * Write to an mmd register of the PHY + */ +int rtl839x_write_mmd_phy(u32 port, u32 devnum, u32 regnum, u32 val) +{ + int err = 0; + u32 v; + + // Take bug on RTL839x Rev <= C into account + if (port >= RTL839X_CPU_PORT) + return -EIO; + + mutex_lock(&smi_lock); + + // Set PHY to access + rtl839x_set_port_reg_le(BIT_ULL(port), RTL839X_PHYREG_PORT_CTRL); + + // Set data to write + sw_w32_mask(0xffff << 16, val << 16, RTL839X_PHYREG_DATA_CTRL); + + // Set MMD device number and register to write to + sw_w32(devnum << 16 | (regnum & 0xffff), RTL839X_PHYREG_MMD_CTRL); + + v = BIT(3) | BIT(2) | BIT(0); // WRITE | MMD-access | EXEC + sw_w32(v, RTL839X_PHYREG_ACCESS_CTRL); + + err = rtl839x_smi_wait_op(100000); + if (err) + goto errout; + + pr_debug("%s: port %d, regnum: %x, val: %x (err %d)\n", __func__, port, regnum, val, err); + +errout: + mutex_unlock(&smi_lock); + return err; +} + +void rtl8390_get_version(struct rtl838x_switch_priv *priv) +{ + u32 info, model; + + sw_w32_mask(0xf << 28, 0xa << 28, RTL839X_CHIP_INFO); + info = sw_r32(RTL839X_CHIP_INFO); + + model = sw_r32(RTL839X_MODEL_NAME_INFO); + priv->version = RTL8390_VERSION_A + ((model & 0x3f) >> 1); + + pr_info("RTL839X Chip-Info: %x, version %c\n", info, priv->version); +} + +void rtl839x_vlan_profile_dump(int profile) +{ + u32 p[2]; + + if (profile < 0 || profile > 7) + return; + + p[0] = sw_r32(RTL839X_VLAN_PROFILE(profile)); + p[1] = sw_r32(RTL839X_VLAN_PROFILE(profile) + 4); + + pr_info("VLAN profile %d: L2 learning: %d, UNKN L2MC FLD PMSK %d, \ + UNKN IPMC FLD PMSK %d, UNKN IPv6MC FLD PMSK: %d", + profile, p[1] & 1, (p[1] >> 1) & 0xfff, (p[1] >> 13) & 0xfff, + (p[0]) & 0xfff); + pr_info("VLAN profile %d: raw %08x, %08x\n", profile, p[0], p[1]); +} + +static void rtl839x_stp_get(struct rtl838x_switch_priv *priv, u16 msti, u32 port_state[]) +{ + int i; + u32 cmd = 1 << 16 /* Execute cmd */ + | 0 << 15 /* Read */ + | 5 << 12 /* Table type 0b101 */ + | (msti & 0xfff); + priv->r->exec_tbl0_cmd(cmd); + + for (i = 0; i < 4; i++) + port_state[i] = sw_r32(priv->r->tbl_access_data_0(i)); +} + +static void rtl839x_stp_set(struct rtl838x_switch_priv *priv, u16 msti, u32 port_state[]) +{ + int i; + u32 cmd = 1 << 16 /* Execute cmd */ + | 1 << 15 /* Write */ + | 5 << 12 /* Table type 0b101 */ + | (msti & 0xfff); + for (i = 0; i < 4; i++) + sw_w32(port_state[i], priv->r->tbl_access_data_0(i)); + priv->r->exec_tbl0_cmd(cmd); +} + +/* + * Enables or disables the EEE/EEEP capability of a port + */ +void rtl839x_port_eee_set(struct rtl838x_switch_priv *priv, int port, bool enable) +{ + u32 v; + + // This works only for Ethernet ports, and on the RTL839X, ports above 47 are SFP + if (port >= 48) + return; + + enable = true; + pr_debug("In %s: setting port %d to %d\n", __func__, port, enable); + v = enable ? 0xf : 0x0; + + // Set EEE for 100, 500, 1000MBit and 10GBit + sw_w32_mask(0xf << 8, v << 8, rtl839x_mac_force_mode_ctrl(port)); + + // Set TX/RX EEE state + v = enable ? 0x3 : 0x0; + sw_w32(v, RTL839X_EEE_CTRL(port)); + + priv->ports[port].eee_enabled = enable; +} + +/* + * Get EEE own capabilities and negotiation result + */ +int rtl839x_eee_port_ability(struct rtl838x_switch_priv *priv, struct ethtool_eee *e, int port) +{ + u64 link, a; + + if (port >= 48) + return 0; + + link = rtl839x_get_port_reg_le(RTL839X_MAC_LINK_STS); + if (!(link & BIT_ULL(port))) + return 0; + + if (sw_r32(rtl839x_mac_force_mode_ctrl(port)) & BIT(8)) + e->advertised |= ADVERTISED_100baseT_Full; + + if (sw_r32(rtl839x_mac_force_mode_ctrl(port)) & BIT(10)) + e->advertised |= ADVERTISED_1000baseT_Full; + + a = rtl839x_get_port_reg_le(RTL839X_MAC_EEE_ABLTY); + pr_info("Link partner: %016llx\n", a); + if (rtl839x_get_port_reg_le(RTL839X_MAC_EEE_ABLTY) & BIT_ULL(port)) { + e->lp_advertised = ADVERTISED_100baseT_Full; + e->lp_advertised |= ADVERTISED_1000baseT_Full; + return 1; + } + + return 0; +} + +static void rtl839x_init_eee(struct rtl838x_switch_priv *priv, bool enable) +{ + int i; + + pr_info("Setting up EEE, state: %d\n", enable); + + // Set wake timer for TX and pause timer both to 0x21 + sw_w32_mask(0xff << 20| 0xff, 0x21 << 20| 0x21, RTL839X_EEE_TX_TIMER_GELITE_CTRL); + // Set pause wake timer for GIGA-EEE to 0x11 + sw_w32_mask(0xff << 20, 0x11 << 20, RTL839X_EEE_TX_TIMER_GIGA_CTRL); + // Set pause wake timer for 10GBit ports to 0x11 + sw_w32_mask(0xff << 20, 0x11 << 20, RTL839X_EEE_TX_TIMER_10G_CTRL); + + // Setup EEE on all ports + for (i = 0; i < priv->cpu_port; i++) { + if (priv->ports[i].phy) + rtl839x_port_eee_set(priv, i, enable); + } + priv->eee_enabled = enable; +} + +static void rtl839x_pie_lookup_enable(struct rtl838x_switch_priv *priv, int index) +{ + int block = index / PIE_BLOCK_SIZE; + + sw_w32_mask(0, BIT(block), RTL839X_ACL_BLK_LOOKUP_CTRL); +} + +/* + * Delete a range of Packet Inspection Engine rules + */ +static int rtl839x_pie_rule_del(struct rtl838x_switch_priv *priv, int index_from, int index_to) +{ + u32 v = (index_from << 1)| (index_to << 13 ) | BIT(0); + + pr_debug("%s: from %d to %d\n", __func__, index_from, index_to); + mutex_lock(&priv->reg_mutex); + + // Write from-to and execute bit into control register + sw_w32(v, RTL839X_ACL_CLR_CTRL); + + // Wait until command has completed + do { + } while (sw_r32(RTL839X_ACL_CLR_CTRL) & BIT(0)); + + mutex_unlock(&priv->reg_mutex); + return 0; +} + +/* + * Reads the intermediate representation of the templated match-fields of the + * PIE rule in the pie_rule structure and fills in the raw data fields in the + * raw register space r[]. + * The register space configuration size is identical for the RTL8380/90 and RTL9300, + * however the RTL9310 has 2 more registers / fields and the physical field-ids are different + * on all SoCs + * On the RTL8390 the template mask registers are not word-aligned! + */ +static void rtl839x_write_pie_templated(u32 r[], struct pie_rule *pr, enum template_field_id t[]) +{ + int i; + enum template_field_id field_type; + u16 data, data_m; + + for (i = 0; i < N_FIXED_FIELDS; i++) { + field_type = t[i]; + data = data_m = 0; + + switch (field_type) { + case TEMPLATE_FIELD_SPM0: + data = pr->spm; + data_m = pr->spm_m; + break; + case TEMPLATE_FIELD_SPM1: + data = pr->spm >> 16; + data_m = pr->spm_m >> 16; + break; + case TEMPLATE_FIELD_SPM2: + data = pr->spm >> 32; + data_m = pr->spm_m >> 32; + break; + case TEMPLATE_FIELD_SPM3: + data = pr->spm >> 48; + data_m = pr->spm_m >> 48; + break; + case TEMPLATE_FIELD_OTAG: + data = pr->otag; + data_m = pr->otag_m; + break; + case TEMPLATE_FIELD_SMAC0: + data = pr->smac[4]; + data = (data << 8) | pr->smac[5]; + data_m = pr->smac_m[4]; + data_m = (data_m << 8) | pr->smac_m[5]; + break; + case TEMPLATE_FIELD_SMAC1: + data = pr->smac[2]; + data = (data << 8) | pr->smac[3]; + data_m = pr->smac_m[2]; + data_m = (data_m << 8) | pr->smac_m[3]; + break; + case TEMPLATE_FIELD_SMAC2: + data = pr->smac[0]; + data = (data << 8) | pr->smac[1]; + data_m = pr->smac_m[0]; + data_m = (data_m << 8) | pr->smac_m[1]; + break; + case TEMPLATE_FIELD_DMAC0: + data = pr->dmac[4]; + data = (data << 8) | pr->dmac[5]; + data_m = pr->dmac_m[4]; + data_m = (data_m << 8) | pr->dmac_m[5]; + break; + case TEMPLATE_FIELD_DMAC1: + data = pr->dmac[2]; + data = (data << 8) | pr->dmac[3]; + data_m = pr->dmac_m[2]; + data_m = (data_m << 8) | pr->dmac_m[3]; + break; + case TEMPLATE_FIELD_DMAC2: + data = pr->dmac[0]; + data = (data << 8) | pr->dmac[1]; + data_m = pr->dmac_m[0]; + data_m = (data_m << 8) | pr->dmac_m[1]; + break; + case TEMPLATE_FIELD_ETHERTYPE: + data = pr->ethertype; + data_m = pr->ethertype_m; + break; + case TEMPLATE_FIELD_ITAG: + data = pr->itag; + data_m = pr->itag_m; + break; + case TEMPLATE_FIELD_SIP0: + if (pr->is_ipv6) { + data = pr->sip6.s6_addr16[7]; + data_m = pr->sip6_m.s6_addr16[7]; + } else { + data = pr->sip; + data_m = pr->sip_m; + } + break; + case TEMPLATE_FIELD_SIP1: + if (pr->is_ipv6) { + data = pr->sip6.s6_addr16[6]; + data_m = pr->sip6_m.s6_addr16[6]; + } else { + data = pr->sip >> 16; + data_m = pr->sip_m >> 16; + } + break; + + case TEMPLATE_FIELD_SIP2: + case TEMPLATE_FIELD_SIP3: + case TEMPLATE_FIELD_SIP4: + case TEMPLATE_FIELD_SIP5: + case TEMPLATE_FIELD_SIP6: + case TEMPLATE_FIELD_SIP7: + data = pr->sip6.s6_addr16[5 - (field_type - TEMPLATE_FIELD_SIP2)]; + data_m = pr->sip6_m.s6_addr16[5 - (field_type - TEMPLATE_FIELD_SIP2)]; + break; + + case TEMPLATE_FIELD_DIP0: + if (pr->is_ipv6) { + data = pr->dip6.s6_addr16[7]; + data_m = pr->dip6_m.s6_addr16[7]; + } else { + data = pr->dip; + data_m = pr->dip_m; + } + break; + + case TEMPLATE_FIELD_DIP1: + if (pr->is_ipv6) { + data = pr->dip6.s6_addr16[6]; + data_m = pr->dip6_m.s6_addr16[6]; + } else { + data = pr->dip >> 16; + data_m = pr->dip_m >> 16; + } + break; + + case TEMPLATE_FIELD_DIP2: + case TEMPLATE_FIELD_DIP3: + case TEMPLATE_FIELD_DIP4: + case TEMPLATE_FIELD_DIP5: + case TEMPLATE_FIELD_DIP6: + case TEMPLATE_FIELD_DIP7: + data = pr->dip6.s6_addr16[5 - (field_type - TEMPLATE_FIELD_DIP2)]; + data_m = pr->dip6_m.s6_addr16[5 - (field_type - TEMPLATE_FIELD_DIP2)]; + break; + + case TEMPLATE_FIELD_IP_TOS_PROTO: + data = pr->tos_proto; + data_m = pr->tos_proto_m; + break; + case TEMPLATE_FIELD_L4_SPORT: + data = pr->sport; + data_m = pr->sport_m; + break; + case TEMPLATE_FIELD_L4_DPORT: + data = pr->dport; + data_m = pr->dport_m; + break; + case TEMPLATE_FIELD_ICMP_IGMP: + data = pr->icmp_igmp; + data_m = pr->icmp_igmp_m; + break; + default: + pr_info("%s: unknown field %d\n", __func__, field_type); + } + + // On the RTL8390, the mask fields are not word aligned! + if (!(i % 2)) { + r[5 - i / 2] = data; + r[12 - i / 2] |= ((u32)data_m << 8); + } else { + r[5 - i / 2] |= ((u32)data) << 16; + r[12 - i / 2] |= ((u32)data_m) << 24; + r[11 - i / 2] |= ((u32)data_m) >> 8; + } + } +} + +/* + * Creates the intermediate representation of the templated match-fields of the + * PIE rule in the pie_rule structure by reading the raw data fields in the + * raw register space r[]. + * The register space configuration size is identical for the RTL8380/90 and RTL9300, + * however the RTL9310 has 2 more registers / fields and the physical field-ids + * On the RTL8390 the template mask registers are not word-aligned! + */ +void rtl839x_read_pie_templated(u32 r[], struct pie_rule *pr, enum template_field_id t[]) +{ + int i; + enum template_field_id field_type; + u16 data, data_m; + + for (i = 0; i < N_FIXED_FIELDS; i++) { + field_type = t[i]; + if (!(i % 2)) { + data = r[5 - i / 2]; + data_m = r[12 - i / 2]; + } else { + data = r[5 - i / 2] >> 16; + data_m = r[12 - i / 2] >> 16; + } + + switch (field_type) { + case TEMPLATE_FIELD_SPM0: + pr->spm = (pr->spn << 16) | data; + pr->spm_m = (pr->spn << 16) | data_m; + break; + case TEMPLATE_FIELD_SPM1: + pr->spm = data; + pr->spm_m = data_m; + break; + case TEMPLATE_FIELD_OTAG: + pr->otag = data; + pr->otag_m = data_m; + break; + case TEMPLATE_FIELD_SMAC0: + pr->smac[4] = data >> 8; + pr->smac[5] = data; + pr->smac_m[4] = data >> 8; + pr->smac_m[5] = data; + break; + case TEMPLATE_FIELD_SMAC1: + pr->smac[2] = data >> 8; + pr->smac[3] = data; + pr->smac_m[2] = data >> 8; + pr->smac_m[3] = data; + break; + case TEMPLATE_FIELD_SMAC2: + pr->smac[0] = data >> 8; + pr->smac[1] = data; + pr->smac_m[0] = data >> 8; + pr->smac_m[1] = data; + break; + case TEMPLATE_FIELD_DMAC0: + pr->dmac[4] = data >> 8; + pr->dmac[5] = data; + pr->dmac_m[4] = data >> 8; + pr->dmac_m[5] = data; + break; + case TEMPLATE_FIELD_DMAC1: + pr->dmac[2] = data >> 8; + pr->dmac[3] = data; + pr->dmac_m[2] = data >> 8; + pr->dmac_m[3] = data; + break; + case TEMPLATE_FIELD_DMAC2: + pr->dmac[0] = data >> 8; + pr->dmac[1] = data; + pr->dmac_m[0] = data >> 8; + pr->dmac_m[1] = data; + break; + case TEMPLATE_FIELD_ETHERTYPE: + pr->ethertype = data; + pr->ethertype_m = data_m; + break; + case TEMPLATE_FIELD_ITAG: + pr->itag = data; + pr->itag_m = data_m; + break; + case TEMPLATE_FIELD_SIP0: + pr->sip = data; + pr->sip_m = data_m; + break; + case TEMPLATE_FIELD_SIP1: + pr->sip = (pr->sip << 16) | data; + pr->sip_m = (pr->sip << 16) | data_m; + break; + case TEMPLATE_FIELD_SIP2: + pr->is_ipv6 = true; + // Make use of limitiations on the position of the match values + ipv6_addr_set(&pr->sip6, pr->sip, r[5 - i / 2], + r[4 - i / 2], r[3 - i / 2]); + ipv6_addr_set(&pr->sip6_m, pr->sip_m, r[5 - i / 2], + r[4 - i / 2], r[3 - i / 2]); + case TEMPLATE_FIELD_SIP3: + case TEMPLATE_FIELD_SIP4: + case TEMPLATE_FIELD_SIP5: + case TEMPLATE_FIELD_SIP6: + case TEMPLATE_FIELD_SIP7: + break; + + case TEMPLATE_FIELD_DIP0: + pr->dip = data; + pr->dip_m = data_m; + break; + + case TEMPLATE_FIELD_DIP1: + pr->dip = (pr->dip << 16) | data; + pr->dip_m = (pr->dip << 16) | data_m; + break; + + case TEMPLATE_FIELD_DIP2: + pr->is_ipv6 = true; + ipv6_addr_set(&pr->dip6, pr->dip, r[5 - i / 2], + r[4 - i / 2], r[3 - i / 2]); + ipv6_addr_set(&pr->dip6_m, pr->dip_m, r[5 - i / 2], + r[4 - i / 2], r[3 - i / 2]); + case TEMPLATE_FIELD_DIP3: + case TEMPLATE_FIELD_DIP4: + case TEMPLATE_FIELD_DIP5: + case TEMPLATE_FIELD_DIP6: + case TEMPLATE_FIELD_DIP7: + break; + case TEMPLATE_FIELD_IP_TOS_PROTO: + pr->tos_proto = data; + pr->tos_proto_m = data_m; + break; + case TEMPLATE_FIELD_L4_SPORT: + pr->sport = data; + pr->sport_m = data_m; + break; + case TEMPLATE_FIELD_L4_DPORT: + pr->dport = data; + pr->dport_m = data_m; + break; + case TEMPLATE_FIELD_ICMP_IGMP: + pr->icmp_igmp = data; + pr->icmp_igmp_m = data_m; + break; + default: + pr_info("%s: unknown field %d\n", __func__, field_type); + } + } +} + +static void rtl839x_read_pie_fixed_fields(u32 r[], struct pie_rule *pr) +{ + pr->spmmask_fix = (r[6] >> 30) & 0x3; + pr->spn = (r[6] >> 24) & 0x3f; + pr->mgnt_vlan = (r[6] >> 23) & 1; + pr->dmac_hit_sw = (r[6] >> 22) & 1; + pr->not_first_frag = (r[6] >> 21) & 1; + pr->frame_type_l4 = (r[6] >> 18) & 7; + pr->frame_type = (r[6] >> 16) & 3; + pr->otag_fmt = (r[6] >> 15) & 1; + pr->itag_fmt = (r[6] >> 14) & 1; + pr->otag_exist = (r[6] >> 13) & 1; + pr->itag_exist = (r[6] >> 12) & 1; + pr->frame_type_l2 = (r[6] >> 10) & 3; + pr->tid = (r[6] >> 8) & 3; + + pr->spmmask_fix_m = (r[12] >> 6) & 0x3; + pr->spn_m = r[12] & 0x3f; + pr->mgnt_vlan_m = (r[13] >> 31) & 1; + pr->dmac_hit_sw_m = (r[13] >> 30) & 1; + pr->not_first_frag_m = (r[13] >> 29) & 1; + pr->frame_type_l4_m = (r[13] >> 26) & 7; + pr->frame_type_m = (r[13] >> 24) & 3; + pr->otag_fmt_m = (r[13] >> 23) & 1; + pr->itag_fmt_m = (r[13] >> 22) & 1; + pr->otag_exist_m = (r[13] >> 21) & 1; + pr->itag_exist_m = (r[13] >> 20) & 1; + pr->frame_type_l2_m = (r[13] >> 18) & 3; + pr->tid_m = (r[13] >> 16) & 3; + + pr->valid = r[13] & BIT(15); + pr->cond_not = r[13] & BIT(14); + pr->cond_and1 = r[13] & BIT(13); + pr->cond_and2 = r[13] & BIT(12); +} + +static void rtl839x_write_pie_fixed_fields(u32 r[], struct pie_rule *pr) +{ + r[6] = ((u32) (pr->spmmask_fix & 0x3)) << 30; + r[6] |= ((u32) (pr->spn & 0x3f)) << 24; + r[6] |= pr->mgnt_vlan ? BIT(23) : 0; + r[6] |= pr->dmac_hit_sw ? BIT(22) : 0; + r[6] |= pr->not_first_frag ? BIT(21) : 0; + r[6] |= ((u32) (pr->frame_type_l4 & 0x7)) << 18; + r[6] |= ((u32) (pr->frame_type & 0x3)) << 16; + r[6] |= pr->otag_fmt ? BIT(15) : 0; + r[6] |= pr->itag_fmt ? BIT(14) : 0; + r[6] |= pr->otag_exist ? BIT(13) : 0; + r[6] |= pr->itag_exist ? BIT(12) : 0; + r[6] |= ((u32) (pr->frame_type_l2 & 0x3)) << 10; + r[6] |= ((u32) (pr->tid & 0x3)) << 8; + + r[12] |= ((u32) (pr->spmmask_fix_m & 0x3)) << 6; + r[12] |= (u32) (pr->spn_m & 0x3f); + r[13] |= pr->mgnt_vlan_m ? BIT(31) : 0; + r[13] |= pr->dmac_hit_sw_m ? BIT(30) : 0; + r[13] |= pr->not_first_frag_m ? BIT(29) : 0; + r[13] |= ((u32) (pr->frame_type_l4_m & 0x7)) << 26; + r[13] |= ((u32) (pr->frame_type_m & 0x3)) << 24; + r[13] |= pr->otag_fmt_m ? BIT(23) : 0; + r[13] |= pr->itag_fmt_m ? BIT(22) : 0; + r[13] |= pr->otag_exist_m ? BIT(21) : 0; + r[13] |= pr->itag_exist_m ? BIT(20) : 0; + r[13] |= ((u32) (pr->frame_type_l2_m & 0x3)) << 18; + r[13] |= ((u32) (pr->tid_m & 0x3)) << 16; + + r[13] |= pr->valid ? BIT(15) : 0; + r[13] |= pr->cond_not ? BIT(14) : 0; + r[13] |= pr->cond_and1 ? BIT(13) : 0; + r[13] |= pr->cond_and2 ? BIT(12) : 0; +} + +static void rtl839x_write_pie_action(u32 r[], struct pie_rule *pr) +{ + if (pr->drop) { + r[13] |= 0x9; // Set ACT_MASK_FWD & FWD_ACT = DROP + r[13] |= BIT(3); + } else { + r[13] |= pr->fwd_sel ? BIT(3) : 0; + r[13] |= pr->fwd_act; + } + r[13] |= pr->bypass_sel ? BIT(11) : 0; + r[13] |= pr->mpls_sel ? BIT(10) : 0; + r[13] |= pr->nopri_sel ? BIT(9) : 0; + r[13] |= pr->ovid_sel ? BIT(8) : 0; + r[13] |= pr->ivid_sel ? BIT(7) : 0; + r[13] |= pr->meter_sel ? BIT(6) : 0; + r[13] |= pr->mir_sel ? BIT(5) : 0; + r[13] |= pr->log_sel ? BIT(4) : 0; + + r[14] |= ((u32)(pr->fwd_data & 0x3fff)) << 18; + r[14] |= pr->log_octets ? BIT(17) : 0; + r[14] |= ((u32)(pr->log_data & 0x7ff)) << 4; + r[14] |= (pr->mir_data & 0x3) << 3; + r[14] |= ((u32)(pr->meter_data >> 7)) & 0x7; + r[15] |= (u32)(pr->meter_data) << 26; + r[15] |= ((u32)(pr->ivid_act) << 23) & 0x3; + r[15] |= ((u32)(pr->ivid_data) << 9) & 0xfff; + r[15] |= ((u32)(pr->ovid_act) << 6) & 0x3; + r[15] |= ((u32)(pr->ovid_data) >> 4) & 0xff; + r[16] |= ((u32)(pr->ovid_data) & 0xf) << 28; + r[16] |= ((u32)(pr->nopri_data) & 0x7) << 20; + r[16] |= ((u32)(pr->mpls_act) & 0x7) << 20; + r[16] |= ((u32)(pr->mpls_lib_idx) & 0x7) << 20; + r[16] |= pr->bypass_all ? BIT(9) : 0; + r[16] |= pr->bypass_igr_stp ? BIT(8) : 0; + r[16] |= pr->bypass_ibc_sc ? BIT(7) : 0; +} + +static void rtl839x_read_pie_action(u32 r[], struct pie_rule *pr) +{ + if (r[13] & BIT(3)) { // ACT_MASK_FWD set, is it a drop? + if ((r[14] & 0x7) == 1) { + pr->drop = true; + } else { + pr->fwd_sel = true; + pr->fwd_act = r[14] & 0x7; + } + } + + pr->bypass_sel = r[13] & BIT(11); + pr->mpls_sel = r[13] & BIT(10); + pr->nopri_sel = r[13] & BIT(9); + pr->ovid_sel = r[13] & BIT(8); + pr->ivid_sel = r[13] & BIT(7); + pr->meter_sel = r[13] & BIT(6); + pr->mir_sel = r[13] & BIT(5); + pr->log_sel = r[13] & BIT(4); + + // TODO: Read in data fields + + pr->bypass_all = r[16] & BIT(9); + pr->bypass_igr_stp = r[16] & BIT(8); + pr->bypass_ibc_sc = r[16] & BIT(7); +} + +void rtl839x_pie_rule_dump_raw(u32 r[]) +{ + pr_info("Raw IACL table entry:\n"); + pr_info("Match : %08x %08x %08x %08x %08x %08x\n", r[0], r[1], r[2], r[3], r[4], r[5]); + pr_info("Fixed : %06x\n", r[6] >> 8); + pr_info("Match M: %08x %08x %08x %08x %08x %08x\n", + (r[6] << 24) | (r[7] >> 8), (r[7] << 24) | (r[8] >> 8), (r[8] << 24) | (r[9] >> 8), + (r[9] << 24) | (r[10] >> 8), (r[10] << 24) | (r[11] >> 8), + (r[11] << 24) | (r[12] >> 8)); + pr_info("R[13]: %08x\n", r[13]); + pr_info("Fixed M: %06x\n", ((r[12] << 16) | (r[13] >> 16)) & 0xffffff); + pr_info("Valid / not / and1 / and2 : %1x\n", (r[13] >> 12) & 0xf); + pr_info("r 13-16: %08x %08x %08x %08x\n", r[13], r[14], r[15], r[16]); +} + +void rtl839x_pie_rule_dump(struct pie_rule *pr) +{ + pr_info("Drop: %d, fwd: %d, ovid: %d, ivid: %d, flt: %d, log: %d, rmk: %d, meter: %d tagst: %d, mir: %d, nopri: %d, cpupri: %d, otpid: %d, itpid: %d, shape: %d\n", + pr->drop, pr->fwd_sel, pr->ovid_sel, pr->ivid_sel, pr->flt_sel, pr->log_sel, pr->rmk_sel, pr->log_sel, pr->tagst_sel, pr->mir_sel, pr->nopri_sel, + pr->cpupri_sel, pr->otpid_sel, pr->itpid_sel, pr->shaper_sel); + if (pr->fwd_sel) + pr_info("FWD: %08x\n", pr->fwd_data); + pr_info("TID: %x, %x\n", pr->tid, pr->tid_m); +} + +static int rtl839x_pie_rule_read(struct rtl838x_switch_priv *priv, int idx, struct pie_rule *pr) +{ + // Read IACL table (2) via register 0 + struct table_reg *q = rtl_table_get(RTL8380_TBL_0, 2); + u32 r[17]; + int i; + int block = idx / PIE_BLOCK_SIZE; + u32 t_select = sw_r32(RTL839X_ACL_BLK_TMPLTE_CTRL(block)); + + memset(pr, 0, sizeof(*pr)); + rtl_table_read(q, idx); + for (i = 0; i < 17; i++) + r[i] = sw_r32(rtl_table_data(q, i)); + + rtl_table_release(q); + + rtl839x_read_pie_fixed_fields(r, pr); + if (!pr->valid) + return 0; + + pr_debug("%s: template_selectors %08x, tid: %d\n", __func__, t_select, pr->tid); + rtl839x_pie_rule_dump_raw(r); + + rtl839x_read_pie_templated(r, pr, fixed_templates[(t_select >> (pr->tid * 3)) & 0x7]); + + rtl839x_read_pie_action(r, pr); + + return 0; +} + +static int rtl839x_pie_rule_write(struct rtl838x_switch_priv *priv, int idx, struct pie_rule *pr) +{ + // Access IACL table (2) via register 0 + struct table_reg *q = rtl_table_get(RTL8390_TBL_0, 2); + u32 r[17]; + int i; + int block = idx / PIE_BLOCK_SIZE; + u32 t_select = sw_r32(RTL839X_ACL_BLK_TMPLTE_CTRL(block)); + + pr_debug("%s: %d, t_select: %08x\n", __func__, idx, t_select); + + for (i = 0; i < 17; i++) + r[i] = 0; + + if (!pr->valid) { + rtl_table_write(q, idx); + rtl_table_release(q); + return 0; + } + rtl839x_write_pie_fixed_fields(r, pr); + + pr_debug("%s: template %d\n", __func__, (t_select >> (pr->tid * 3)) & 0x7); + rtl839x_write_pie_templated(r, pr, fixed_templates[(t_select >> (pr->tid * 3)) & 0x7]); + + rtl839x_write_pie_action(r, pr); + +// rtl839x_pie_rule_dump_raw(r); + + for (i = 0; i < 17; i++) + sw_w32(r[i], rtl_table_data(q, i)); + + rtl_table_write(q, idx); + rtl_table_release(q); + + return 0; +} + +static bool rtl839x_pie_templ_has(int t, enum template_field_id field_type) +{ + int i; + enum template_field_id ft; + + for (i = 0; i < N_FIXED_FIELDS; i++) { + ft = fixed_templates[t][i]; + if (field_type == ft) + return true; + } + + return false; +} + +static int rtl839x_pie_verify_template(struct rtl838x_switch_priv *priv, + struct pie_rule *pr, int t, int block) +{ + int i; + + if (!pr->is_ipv6 && pr->sip_m && !rtl839x_pie_templ_has(t, TEMPLATE_FIELD_SIP0)) + return -1; + + if (!pr->is_ipv6 && pr->dip_m && !rtl839x_pie_templ_has(t, TEMPLATE_FIELD_DIP0)) + return -1; + + if (pr->is_ipv6) { + if ((pr->sip6_m.s6_addr32[0] || pr->sip6_m.s6_addr32[1] + || pr->sip6_m.s6_addr32[2] || pr->sip6_m.s6_addr32[3]) + && !rtl839x_pie_templ_has(t, TEMPLATE_FIELD_SIP2)) + return -1; + if ((pr->dip6_m.s6_addr32[0] || pr->dip6_m.s6_addr32[1] + || pr->dip6_m.s6_addr32[2] || pr->dip6_m.s6_addr32[3]) + && !rtl839x_pie_templ_has(t, TEMPLATE_FIELD_DIP2)) + return -1; + } + + if (ether_addr_to_u64(pr->smac) && !rtl839x_pie_templ_has(t, TEMPLATE_FIELD_SMAC0)) + return -1; + + if (ether_addr_to_u64(pr->dmac) && !rtl839x_pie_templ_has(t, TEMPLATE_FIELD_DMAC0)) + return -1; + + // TODO: Check more + + i = find_first_zero_bit(&priv->pie_use_bm[block * 4], PIE_BLOCK_SIZE); + + if (i >= PIE_BLOCK_SIZE) + return -1; + + return i + PIE_BLOCK_SIZE * block; +} + +static int rtl839x_pie_rule_add(struct rtl838x_switch_priv *priv, struct pie_rule *pr) +{ + int idx, block, j, t; + int min_block = 0; + int max_block = priv->n_pie_blocks / 2; + + if (pr->is_egress) { + min_block = max_block; + max_block = priv->n_pie_blocks; + } + + mutex_lock(&priv->pie_mutex); + + for (block = min_block; block < max_block; block++) { + for (j = 0; j < 2; j++) { + t = (sw_r32(RTL839X_ACL_BLK_TMPLTE_CTRL(block)) >> (j * 3)) & 0x7; + idx = rtl839x_pie_verify_template(priv, pr, t, block); + if (idx >= 0) + break; + } + if (j < 2) + break; + } + + if (block >= priv->n_pie_blocks) { + mutex_unlock(&priv->pie_mutex); + return -EOPNOTSUPP; + } + + set_bit(idx, priv->pie_use_bm); + + pr->valid = true; + pr->tid = j; // Mapped to template number + pr->tid_m = 0x3; + pr->id = idx; + + rtl839x_pie_lookup_enable(priv, idx); + rtl839x_pie_rule_write(priv, idx, pr); + + mutex_unlock(&priv->pie_mutex); + return 0; +} + +static void rtl839x_pie_rule_rm(struct rtl838x_switch_priv *priv, struct pie_rule *pr) +{ + int idx = pr->id; + + rtl839x_pie_rule_del(priv, idx, idx); + clear_bit(idx, priv->pie_use_bm); +} + +static void rtl839x_pie_init(struct rtl838x_switch_priv *priv) +{ + int i; + u32 template_selectors; + + mutex_init(&priv->pie_mutex); + + // Power on all PIE blocks + for (i = 0; i < priv->n_pie_blocks; i++) + sw_w32_mask(0, BIT(i), RTL839X_PS_ACL_PWR_CTRL); + + // Set ingress and egress ACL blocks to 50/50: first Egress block is 9 + sw_w32_mask(0x1f, 9, RTL839X_ACL_CTRL); // Writes 9 to cutline field + + // Include IPG in metering + sw_w32(1, RTL839X_METER_GLB_CTRL); + + // Delete all present rules + rtl839x_pie_rule_del(priv, 0, priv->n_pie_blocks * PIE_BLOCK_SIZE - 1); + + // Enable predefined templates 0, 1 for blocks 0-2 + template_selectors = 0 | (1 << 3); + for (i = 0; i < 3; i++) + sw_w32(template_selectors, RTL839X_ACL_BLK_TMPLTE_CTRL(i)); + + // Enable predefined templates 2, 3 for blocks 3-5 + template_selectors = 2 | (3 << 3); + for (i = 3; i < 6; i++) + sw_w32(template_selectors, RTL839X_ACL_BLK_TMPLTE_CTRL(i)); + + // Enable predefined templates 1, 4 for blocks 6-8 + template_selectors = 2 | (3 << 3); + for (i = 6; i < 9; i++) + sw_w32(template_selectors, RTL839X_ACL_BLK_TMPLTE_CTRL(i)); + + // Enable predefined templates 0, 1 for blocks 9-11 + template_selectors = 0 | (1 << 3); + for (i = 9; i < 12; i++) + sw_w32(template_selectors, RTL839X_ACL_BLK_TMPLTE_CTRL(i)); + + // Enable predefined templates 2, 3 for blocks 12-14 + template_selectors = 2 | (3 << 3); + for (i = 12; i < 15; i++) + sw_w32(template_selectors, RTL839X_ACL_BLK_TMPLTE_CTRL(i)); + + // Enable predefined templates 1, 4 for blocks 15-17 + template_selectors = 2 | (3 << 3); + for (i = 15; i < 18; i++) + sw_w32(template_selectors, RTL839X_ACL_BLK_TMPLTE_CTRL(i)); +} + +static u32 rtl839x_packet_cntr_read(int counter) +{ + u32 v; + + // Read LOG table (4) via register RTL8390_TBL_0 + struct table_reg *r = rtl_table_get(RTL8390_TBL_0, 4); + + pr_debug("In %s, id %d\n", __func__, counter); + rtl_table_read(r, counter / 2); + + // The table has a size of 2 registers + if (counter % 2) + v = sw_r32(rtl_table_data(r, 0)); + else + v = sw_r32(rtl_table_data(r, 1)); + + rtl_table_release(r); + + return v; +} + +static void rtl839x_packet_cntr_clear(int counter) +{ + // Access LOG table (4) via register RTL8390_TBL_0 + struct table_reg *r = rtl_table_get(RTL8390_TBL_0, 4); + + pr_debug("In %s, id %d\n", __func__, counter); + // The table has a size of 2 registers + if (counter % 2) + sw_w32(0, rtl_table_data(r, 0)); + else + sw_w32(0, rtl_table_data(r, 1)); + + rtl_table_write(r, counter / 2); + + rtl_table_release(r); +} + +static void rtl839x_route_read(int idx, struct rtl83xx_route *rt) +{ + u64 v; + // Read ROUTING table (2) via register RTL8390_TBL_1 + struct table_reg *r = rtl_table_get(RTL8390_TBL_1, 2); + + pr_debug("In %s\n", __func__); + rtl_table_read(r, idx); + + // The table has a size of 2 registers + v = sw_r32(rtl_table_data(r, 0)); + v <<= 32; + v |= sw_r32(rtl_table_data(r, 1)); + rt->switch_mac_id = (v >> 12) & 0xf; + rt->nh.gw = v >> 16; + + rtl_table_release(r); +} + +static void rtl839x_route_write(int idx, struct rtl83xx_route *rt) +{ + u32 v; + + // Read ROUTING table (2) via register RTL8390_TBL_1 + struct table_reg *r = rtl_table_get(RTL8390_TBL_1, 2); + + pr_debug("In %s\n", __func__); + sw_w32(rt->nh.gw >> 16, rtl_table_data(r, 0)); + v = rt->nh.gw << 16; + v |= rt->switch_mac_id << 12; + sw_w32(v, rtl_table_data(r, 1)); + rtl_table_write(r, idx); + + rtl_table_release(r); +} + +/* + * Configure the switch's own MAC addresses used when routing packets + */ +static void rtl839x_setup_port_macs(struct rtl838x_switch_priv *priv) +{ + int i; + struct net_device *dev; + u64 mac; + + pr_debug("%s: got port %08x\n", __func__, (u32)priv->ports[priv->cpu_port].dp); + dev = priv->ports[priv->cpu_port].dp->slave; + mac = ether_addr_to_u64(dev->dev_addr); + + for (i = 0; i < 15; i++) { + mac++; // BUG: VRRP for testing + sw_w32(mac >> 32, RTL839X_ROUTING_SA_CTRL + i * 8); + sw_w32(mac, RTL839X_ROUTING_SA_CTRL + i * 8 + 4); + } +} + +int rtl839x_l3_setup(struct rtl838x_switch_priv *priv) +{ + rtl839x_setup_port_macs(priv); + + return 0; +} + +void rtl839x_vlan_port_keep_tag_set(int port, bool keep_outer, bool keep_inner) +{ + sw_w32(FIELD_PREP(RTL839X_VLAN_PORT_TAG_STS_CTRL_OTAG_STS_MASK, + keep_outer ? RTL839X_VLAN_PORT_TAG_STS_TAGGED : RTL839X_VLAN_PORT_TAG_STS_UNTAG) | + FIELD_PREP(RTL839X_VLAN_PORT_TAG_STS_CTRL_ITAG_STS_MASK, + keep_inner ? RTL839X_VLAN_PORT_TAG_STS_TAGGED : RTL839X_VLAN_PORT_TAG_STS_UNTAG), + RTL839X_VLAN_PORT_TAG_STS_CTRL(port)); +} + +void rtl839x_vlan_port_pvidmode_set(int port, enum pbvlan_type type, enum pbvlan_mode mode) +{ + if (type == PBVLAN_TYPE_INNER) + sw_w32_mask(0x3, mode, RTL839X_VLAN_PORT_PB_VLAN + (port << 2)); + else + sw_w32_mask(0x3 << 14, mode << 14, RTL839X_VLAN_PORT_PB_VLAN + (port << 2)); +} + +void rtl839x_vlan_port_pvid_set(int port, enum pbvlan_type type, int pvid) +{ + if (type == PBVLAN_TYPE_INNER) + sw_w32_mask(0xfff << 2, pvid << 2, RTL839X_VLAN_PORT_PB_VLAN + (port << 2)); + else + sw_w32_mask(0xfff << 16, pvid << 16, RTL839X_VLAN_PORT_PB_VLAN + (port << 2)); +} + +static int rtl839x_set_ageing_time(unsigned long msec) +{ + int t = sw_r32(RTL839X_L2_CTRL_1); + + t &= 0x1FFFFF; + t = t * 3 / 5; /* Aging time in seconds. 0: L2 aging disabled */ + pr_debug("L2 AGING time: %d sec\n", t); + + t = (msec * 5 + 2000) / 3000; + t = t > 0x1FFFFF ? 0x1FFFFF : t; + sw_w32_mask(0x1FFFFF, t, RTL839X_L2_CTRL_1); + pr_debug("Dynamic aging for ports: %x\n", sw_r32(RTL839X_L2_PORT_AGING_OUT)); + + return 0; +} + +static void rtl839x_set_igr_filter(int port, enum igr_filter state) +{ + sw_w32_mask(0x3 << ((port & 0xf)<<1), state << ((port & 0xf)<<1), + RTL839X_VLAN_PORT_IGR_FLTR + (((port >> 4) << 2))); +} + +static void rtl839x_set_egr_filter(int port, enum egr_filter state) +{ + sw_w32_mask(0x1 << (port % 0x20), state << (port % 0x20), + RTL839X_VLAN_PORT_EGR_FLTR + (((port >> 5) << 2))); +} + +void rtl839x_set_distribution_algorithm(int group, int algoidx, u32 algomsk) +{ + sw_w32_mask(3 << ((group & 0xf) << 1), algoidx << ((group & 0xf) << 1), + RTL839X_TRK_HASH_IDX_CTRL + ((group >> 4) << 2)); + sw_w32(algomsk, RTL839X_TRK_HASH_CTRL + (algoidx << 2)); +} + +void rtl839x_set_receive_management_action(int port, rma_ctrl_t type, action_type_t action) +{ + switch(type) { + case BPDU: + sw_w32_mask(3 << ((port & 0xf) << 1), (action & 0x3) << ((port & 0xf) << 1), + RTL839X_RMA_BPDU_CTRL + ((port >> 4) << 2)); + break; + case PTP: + sw_w32_mask(3 << ((port & 0xf) << 1), (action & 0x3) << ((port & 0xf) << 1), + RTL839X_RMA_PTP_CTRL + ((port >> 4) << 2)); + break; + case LLTP: + sw_w32_mask(3 << ((port & 0xf) << 1), (action & 0x3) << ((port & 0xf) << 1), + RTL839X_RMA_LLTP_CTRL + ((port >> 4) << 2)); + break; + default: + break; + } +} + +const struct rtl838x_reg rtl839x_reg = { + .mask_port_reg_be = rtl839x_mask_port_reg_be, + .set_port_reg_be = rtl839x_set_port_reg_be, + .get_port_reg_be = rtl839x_get_port_reg_be, + .mask_port_reg_le = rtl839x_mask_port_reg_le, + .set_port_reg_le = rtl839x_set_port_reg_le, + .get_port_reg_le = rtl839x_get_port_reg_le, + .stat_port_rst = RTL839X_STAT_PORT_RST, + .stat_rst = RTL839X_STAT_RST, + .stat_port_std_mib = RTL839X_STAT_PORT_STD_MIB, + .traffic_enable = rtl839x_traffic_enable, + .traffic_disable = rtl839x_traffic_disable, + .traffic_get = rtl839x_traffic_get, + .traffic_set = rtl839x_traffic_set, + .port_iso_ctrl = rtl839x_port_iso_ctrl, + .l2_ctrl_0 = RTL839X_L2_CTRL_0, + .l2_ctrl_1 = RTL839X_L2_CTRL_1, + .l2_port_aging_out = RTL839X_L2_PORT_AGING_OUT, + .set_ageing_time = rtl839x_set_ageing_time, + .smi_poll_ctrl = RTL839X_SMI_PORT_POLLING_CTRL, + .l2_tbl_flush_ctrl = RTL839X_L2_TBL_FLUSH_CTRL, + .exec_tbl0_cmd = rtl839x_exec_tbl0_cmd, + .exec_tbl1_cmd = rtl839x_exec_tbl1_cmd, + .tbl_access_data_0 = rtl839x_tbl_access_data_0, + .isr_glb_src = RTL839X_ISR_GLB_SRC, + .isr_port_link_sts_chg = RTL839X_ISR_PORT_LINK_STS_CHG, + .imr_port_link_sts_chg = RTL839X_IMR_PORT_LINK_STS_CHG, + .imr_glb = RTL839X_IMR_GLB, + .vlan_tables_read = rtl839x_vlan_tables_read, + .vlan_set_tagged = rtl839x_vlan_set_tagged, + .vlan_set_untagged = rtl839x_vlan_set_untagged, + .vlan_profile_dump = rtl839x_vlan_profile_dump, + .vlan_profile_setup = rtl839x_vlan_profile_setup, + .vlan_fwd_on_inner = rtl839x_vlan_fwd_on_inner, + .vlan_port_keep_tag_set = rtl839x_vlan_port_keep_tag_set, + .vlan_port_pvidmode_set = rtl839x_vlan_port_pvidmode_set, + .vlan_port_pvid_set = rtl839x_vlan_port_pvid_set, + .set_vlan_igr_filter = rtl839x_set_igr_filter, + .set_vlan_egr_filter = rtl839x_set_egr_filter, + .enable_learning = rtl839x_enable_learning, + .enable_flood = rtl839x_enable_flood, + .enable_mcast_flood = rtl839x_enable_mcast_flood, + .enable_bcast_flood = rtl839x_enable_bcast_flood, + .stp_get = rtl839x_stp_get, + .stp_set = rtl839x_stp_set, + .mac_force_mode_ctrl = rtl839x_mac_force_mode_ctrl, + .mac_port_ctrl = rtl839x_mac_port_ctrl, + .l2_port_new_salrn = rtl839x_l2_port_new_salrn, + .l2_port_new_sa_fwd = rtl839x_l2_port_new_sa_fwd, + .mir_ctrl = RTL839X_MIR_CTRL, + .mir_dpm = RTL839X_MIR_DPM_CTRL, + .mir_spm = RTL839X_MIR_SPM_CTRL, + .mac_link_sts = RTL839X_MAC_LINK_STS, + .mac_link_dup_sts = RTL839X_MAC_LINK_DUP_STS, + .mac_link_spd_sts = rtl839x_mac_link_spd_sts, + .mac_rx_pause_sts = RTL839X_MAC_RX_PAUSE_STS, + .mac_tx_pause_sts = RTL839X_MAC_TX_PAUSE_STS, + .read_l2_entry_using_hash = rtl839x_read_l2_entry_using_hash, + .write_l2_entry_using_hash = rtl839x_write_l2_entry_using_hash, + .read_cam = rtl839x_read_cam, + .write_cam = rtl839x_write_cam, + .trk_mbr_ctr = rtl839x_trk_mbr_ctr, + .rma_bpdu_fld_pmask = RTL839X_RMA_BPDU_FLD_PMSK, + .spcl_trap_eapol_ctrl = RTL839X_SPCL_TRAP_EAPOL_CTRL, + .init_eee = rtl839x_init_eee, + .port_eee_set = rtl839x_port_eee_set, + .eee_port_ability = rtl839x_eee_port_ability, + .l2_hash_seed = rtl839x_l2_hash_seed, + .l2_hash_key = rtl839x_l2_hash_key, + .read_mcast_pmask = rtl839x_read_mcast_pmask, + .write_mcast_pmask = rtl839x_write_mcast_pmask, + .pie_init = rtl839x_pie_init, + .pie_rule_read = rtl839x_pie_rule_read, + .pie_rule_write = rtl839x_pie_rule_write, + .pie_rule_add = rtl839x_pie_rule_add, + .pie_rule_rm = rtl839x_pie_rule_rm, + .l2_learning_setup = rtl839x_l2_learning_setup, + .packet_cntr_read = rtl839x_packet_cntr_read, + .packet_cntr_clear = rtl839x_packet_cntr_clear, + .route_read = rtl839x_route_read, + .route_write = rtl839x_route_write, + .l3_setup = rtl839x_l3_setup, + .set_distribution_algorithm = rtl839x_set_distribution_algorithm, + .set_receive_management_action = rtl839x_set_receive_management_action, +}; diff --git a/target/linux/realtek/files-5.15/drivers/net/dsa/rtl83xx/rtl83xx.h b/target/linux/realtek/files-5.15/drivers/net/dsa/rtl83xx/rtl83xx.h new file mode 100644 index 0000000000..107016469c --- /dev/null +++ b/target/linux/realtek/files-5.15/drivers/net/dsa/rtl83xx/rtl83xx.h @@ -0,0 +1,137 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ + +#ifndef _NET_DSA_RTL83XX_H +#define _NET_DSA_RTL83XX_H + +#include +#include "rtl838x.h" + + +#define RTL8380_VERSION_A 'A' +#define RTL8390_VERSION_A 'A' +#define RTL8380_VERSION_B 'B' + +struct fdb_update_work { + struct work_struct work; + struct net_device *ndev; + u64 macs[]; +}; + +#define MIB_DESC(_size, _offset, _name) {.size = _size, .offset = _offset, .name = _name} +struct rtl83xx_mib_desc { + unsigned int size; + unsigned int offset; + const char *name; +}; + +/* API for switch table access */ +struct table_reg { + u16 addr; + u16 data; + u8 max_data; + u8 c_bit; + u8 t_bit; + u8 rmode; + u8 tbl; + struct mutex lock; +}; + +#define TBL_DESC(_addr, _data, _max_data, _c_bit, _t_bit, _rmode) \ + { .addr = _addr, .data = _data, .max_data = _max_data, .c_bit = _c_bit, \ + .t_bit = _t_bit, .rmode = _rmode \ + } + +typedef enum { + RTL8380_TBL_L2 = 0, + RTL8380_TBL_0, + RTL8380_TBL_1, + RTL8390_TBL_L2, + RTL8390_TBL_0, + RTL8390_TBL_1, + RTL8390_TBL_2, + RTL9300_TBL_L2, + RTL9300_TBL_0, + RTL9300_TBL_1, + RTL9300_TBL_2, + RTL9300_TBL_HSB, + RTL9300_TBL_HSA, + RTL9310_TBL_0, + RTL9310_TBL_1, + RTL9310_TBL_2, + RTL9310_TBL_3, + RTL9310_TBL_4, + RTL9310_TBL_5, + RTL_TBL_END +} rtl838x_tbl_reg_t; + +void rtl_table_init(void); +struct table_reg *rtl_table_get(rtl838x_tbl_reg_t r, int t); +void rtl_table_release(struct table_reg *r); +void rtl_table_read(struct table_reg *r, int idx); +void rtl_table_write(struct table_reg *r, int idx); +inline u16 rtl_table_data(struct table_reg *r, int i); +inline u32 rtl_table_data_r(struct table_reg *r, int i); +inline void rtl_table_data_w(struct table_reg *r, u32 v, int i); + +void __init rtl83xx_setup_qos(struct rtl838x_switch_priv *priv); + +int rtl83xx_packet_cntr_alloc(struct rtl838x_switch_priv *priv); + +int rtl83xx_port_is_under(const struct net_device * dev, struct rtl838x_switch_priv *priv); + +int read_phy(u32 port, u32 page, u32 reg, u32 *val); +int write_phy(u32 port, u32 page, u32 reg, u32 val); + +/* Port register accessor functions for the RTL839x and RTL931X SoCs */ +void rtl839x_mask_port_reg_be(u64 clear, u64 set, int reg); +u64 rtl839x_get_port_reg_be(int reg); +void rtl839x_set_port_reg_be(u64 set, int reg); +void rtl839x_mask_port_reg_le(u64 clear, u64 set, int reg); +void rtl839x_set_port_reg_le(u64 set, int reg); +u64 rtl839x_get_port_reg_le(int reg); + +/* Port register accessor functions for the RTL838x and RTL930X SoCs */ +void rtl838x_mask_port_reg(u64 clear, u64 set, int reg); +void rtl838x_set_port_reg(u64 set, int reg); +u64 rtl838x_get_port_reg(int reg); + +/* RTL838x-specific */ +u32 rtl838x_hash(struct rtl838x_switch_priv *priv, u64 seed); +irqreturn_t rtl838x_switch_irq(int irq, void *dev_id); +void rtl8380_get_version(struct rtl838x_switch_priv *priv); +void rtl838x_vlan_profile_dump(int index); +int rtl83xx_dsa_phy_read(struct dsa_switch *ds, int phy_addr, int phy_reg); +void rtl8380_sds_rst(int mac); +int rtl8380_sds_power(int mac, int val); +void rtl838x_print_matrix(void); + +/* RTL839x-specific */ +u32 rtl839x_hash(struct rtl838x_switch_priv *priv, u64 seed); +irqreturn_t rtl839x_switch_irq(int irq, void *dev_id); +void rtl8390_get_version(struct rtl838x_switch_priv *priv); +void rtl839x_vlan_profile_dump(int index); +int rtl83xx_dsa_phy_write(struct dsa_switch *ds, int phy_addr, int phy_reg, u16 val); +void rtl839x_exec_tbl2_cmd(u32 cmd); +void rtl839x_print_matrix(void); + +/* RTL930x-specific */ +u32 rtl930x_hash(struct rtl838x_switch_priv *priv, u64 seed); +irqreturn_t rtl930x_switch_irq(int irq, void *dev_id); +irqreturn_t rtl839x_switch_irq(int irq, void *dev_id); +void rtl930x_vlan_profile_dump(int index); +int rtl9300_sds_power(int mac, int val); +void rtl9300_sds_rst(int sds_num, u32 mode); +int rtl9300_serdes_setup(int sds_num, phy_interface_t phy_mode); +void rtl930x_print_matrix(void); + +/* RTL931x-specific */ +irqreturn_t rtl931x_switch_irq(int irq, void *dev_id); +int rtl931x_sds_cmu_band_get(int sds, phy_interface_t mode); +int rtl931x_sds_cmu_band_set(int sds, bool enable, u32 band, phy_interface_t mode); +void rtl931x_sds_init(u32 sds, phy_interface_t mode); + +int rtl83xx_lag_add(struct dsa_switch *ds, int group, int port, struct netdev_lag_upper_info *info); +int rtl83xx_lag_del(struct dsa_switch *ds, int group, int port); + +#endif /* _NET_DSA_RTL83XX_H */ + diff --git a/target/linux/realtek/files-5.15/drivers/net/dsa/rtl83xx/rtl930x.c b/target/linux/realtek/files-5.15/drivers/net/dsa/rtl83xx/rtl930x.c new file mode 100644 index 0000000000..5dde8353e2 --- /dev/null +++ b/target/linux/realtek/files-5.15/drivers/net/dsa/rtl83xx/rtl930x.c @@ -0,0 +1,2560 @@ +// SPDX-License-Identifier: GPL-2.0-only + +#include +#include + +#include "rtl83xx.h" + +#define RTL930X_VLAN_PORT_TAG_STS_INTERNAL 0x0 +#define RTL930X_VLAN_PORT_TAG_STS_UNTAG 0x1 +#define RTL930X_VLAN_PORT_TAG_STS_TAGGED 0x2 +#define RTL930X_VLAN_PORT_TAG_STS_PRIORITY_TAGGED 0x3 + +#define RTL930X_VLAN_PORT_TAG_STS_CTRL_BASE 0xCE24 +/* port 0-28 */ +#define RTL930X_VLAN_PORT_TAG_STS_CTRL(port) \ + RTL930X_VLAN_PORT_TAG_STS_CTRL_BASE + (port << 2) +#define RTL930X_VLAN_PORT_TAG_STS_CTRL_EGR_OTAG_STS_MASK GENMASK(7,6) +#define RTL930X_VLAN_PORT_TAG_STS_CTRL_EGR_ITAG_STS_MASK GENMASK(5,4) +#define RTL930X_VLAN_PORT_TAG_STS_CTRL_EGR_P_OTAG_KEEP_MASK GENMASK(3,3) +#define RTL930X_VLAN_PORT_TAG_STS_CTRL_EGR_P_ITAG_KEEP_MASK GENMASK(2,2) +#define RTL930X_VLAN_PORT_TAG_STS_CTRL_IGR_P_OTAG_KEEP_MASK GENMASK(1,1) +#define RTL930X_VLAN_PORT_TAG_STS_CTRL_IGR_P_ITAG_KEEP_MASK GENMASK(0,0) + +extern struct mutex smi_lock; +extern struct rtl83xx_soc_info soc_info; + +/* Definition of the RTL930X-specific template field IDs as used in the PIE */ +enum template_field_id { + TEMPLATE_FIELD_SPM0 = 0, // Source portmask ports 0-15 + TEMPLATE_FIELD_SPM1 = 1, // Source portmask ports 16-31 + TEMPLATE_FIELD_DMAC0 = 2, // Destination MAC [15:0] + TEMPLATE_FIELD_DMAC1 = 3, // Destination MAC [31:16] + TEMPLATE_FIELD_DMAC2 = 4, // Destination MAC [47:32] + TEMPLATE_FIELD_SMAC0 = 5, // Source MAC [15:0] + TEMPLATE_FIELD_SMAC1 = 6, // Source MAC [31:16] + TEMPLATE_FIELD_SMAC2 = 7, // Source MAC [47:32] + TEMPLATE_FIELD_ETHERTYPE = 8, // Ethernet frame type field + TEMPLATE_FIELD_OTAG = 9, + TEMPLATE_FIELD_ITAG = 10, + TEMPLATE_FIELD_SIP0 = 11, + TEMPLATE_FIELD_SIP1 = 12, + TEMPLATE_FIELD_DIP0 = 13, + TEMPLATE_FIELD_DIP1 = 14, + TEMPLATE_FIELD_IP_TOS_PROTO = 15, + TEMPLATE_FIELD_L4_SPORT = 16, + TEMPLATE_FIELD_L4_DPORT = 17, + TEMPLATE_FIELD_L34_HEADER = 18, + TEMPLATE_FIELD_TCP_INFO = 19, + TEMPLATE_FIELD_FIELD_SELECTOR_VALID = 20, + TEMPLATE_FIELD_FIELD_SELECTOR_0 = 21, + TEMPLATE_FIELD_FIELD_SELECTOR_1 = 22, + TEMPLATE_FIELD_FIELD_SELECTOR_2 = 23, + TEMPLATE_FIELD_FIELD_SELECTOR_3 = 24, + TEMPLATE_FIELD_FIELD_SELECTOR_4 = 25, + TEMPLATE_FIELD_FIELD_SELECTOR_5 = 26, + TEMPLATE_FIELD_SIP2 = 27, + TEMPLATE_FIELD_SIP3 = 28, + TEMPLATE_FIELD_SIP4 = 29, + TEMPLATE_FIELD_SIP5 = 30, + TEMPLATE_FIELD_SIP6 = 31, + TEMPLATE_FIELD_SIP7 = 32, + TEMPLATE_FIELD_DIP2 = 33, + TEMPLATE_FIELD_DIP3 = 34, + TEMPLATE_FIELD_DIP4 = 35, + TEMPLATE_FIELD_DIP5 = 36, + TEMPLATE_FIELD_DIP6 = 37, + TEMPLATE_FIELD_DIP7 = 38, + TEMPLATE_FIELD_PKT_INFO = 39, + TEMPLATE_FIELD_FLOW_LABEL = 40, + TEMPLATE_FIELD_DSAP_SSAP = 41, + TEMPLATE_FIELD_SNAP_OUI = 42, + TEMPLATE_FIELD_FWD_VID = 43, + TEMPLATE_FIELD_RANGE_CHK = 44, + TEMPLATE_FIELD_VLAN_GMSK = 45, // VLAN Group Mask/IP range check + TEMPLATE_FIELD_DLP = 46, + TEMPLATE_FIELD_META_DATA = 47, + TEMPLATE_FIELD_SRC_FWD_VID = 48, + TEMPLATE_FIELD_SLP = 49, +}; + +/* The meaning of TEMPLATE_FIELD_VLAN depends on phase and the configuration in + * RTL930X_PIE_CTRL. We use always the same definition and map to the inner VLAN tag: + */ +#define TEMPLATE_FIELD_VLAN TEMPLATE_FIELD_ITAG + +// Number of fixed templates predefined in the RTL9300 SoC +#define N_FIXED_TEMPLATES 5 +// RTL9300 specific predefined templates +static enum template_field_id fixed_templates[N_FIXED_TEMPLATES][N_FIXED_FIELDS] = +{ + { + TEMPLATE_FIELD_DMAC0, TEMPLATE_FIELD_DMAC1, TEMPLATE_FIELD_DMAC2, + TEMPLATE_FIELD_SMAC0, TEMPLATE_FIELD_SMAC1, TEMPLATE_FIELD_SMAC2, + TEMPLATE_FIELD_VLAN, TEMPLATE_FIELD_IP_TOS_PROTO, TEMPLATE_FIELD_DSAP_SSAP, + TEMPLATE_FIELD_ETHERTYPE, TEMPLATE_FIELD_SPM0, TEMPLATE_FIELD_SPM1 + }, { + TEMPLATE_FIELD_SIP0, TEMPLATE_FIELD_SIP1, TEMPLATE_FIELD_DIP0, + TEMPLATE_FIELD_DIP1, TEMPLATE_FIELD_IP_TOS_PROTO, TEMPLATE_FIELD_TCP_INFO, + TEMPLATE_FIELD_L4_SPORT, TEMPLATE_FIELD_L4_DPORT, TEMPLATE_FIELD_VLAN, + TEMPLATE_FIELD_RANGE_CHK, TEMPLATE_FIELD_SPM0, TEMPLATE_FIELD_SPM1 + }, { + TEMPLATE_FIELD_DMAC0, TEMPLATE_FIELD_DMAC1, TEMPLATE_FIELD_DMAC2, + TEMPLATE_FIELD_VLAN, TEMPLATE_FIELD_ETHERTYPE, TEMPLATE_FIELD_IP_TOS_PROTO, + TEMPLATE_FIELD_SIP0, TEMPLATE_FIELD_SIP1, TEMPLATE_FIELD_DIP0, + TEMPLATE_FIELD_DIP1, TEMPLATE_FIELD_L4_SPORT, TEMPLATE_FIELD_L4_DPORT + }, { + TEMPLATE_FIELD_DIP0, TEMPLATE_FIELD_DIP1, TEMPLATE_FIELD_DIP2, + TEMPLATE_FIELD_DIP3, TEMPLATE_FIELD_DIP4, TEMPLATE_FIELD_DIP5, + TEMPLATE_FIELD_DIP6, TEMPLATE_FIELD_DIP7, TEMPLATE_FIELD_IP_TOS_PROTO, + TEMPLATE_FIELD_TCP_INFO, TEMPLATE_FIELD_L4_SPORT, TEMPLATE_FIELD_L4_DPORT + }, { + TEMPLATE_FIELD_SIP0, TEMPLATE_FIELD_SIP1, TEMPLATE_FIELD_SIP2, + TEMPLATE_FIELD_SIP3, TEMPLATE_FIELD_SIP4, TEMPLATE_FIELD_SIP5, + TEMPLATE_FIELD_SIP6, TEMPLATE_FIELD_SIP7, TEMPLATE_FIELD_VLAN, + TEMPLATE_FIELD_RANGE_CHK, TEMPLATE_FIELD_SPM1, TEMPLATE_FIELD_SPM1 + }, +}; + +void rtl930x_print_matrix(void) +{ + int i; + struct table_reg *r = rtl_table_get(RTL9300_TBL_0, 6); + + for (i = 0; i < 29; i++) { + rtl_table_read(r, i); + pr_debug("> %08x\n", sw_r32(rtl_table_data(r, 0))); + } + rtl_table_release(r); +} + +inline void rtl930x_exec_tbl0_cmd(u32 cmd) +{ + sw_w32(cmd, RTL930X_TBL_ACCESS_CTRL_0); + do { } while (sw_r32(RTL930X_TBL_ACCESS_CTRL_0) & (1 << 17)); +} + +inline void rtl930x_exec_tbl1_cmd(u32 cmd) +{ + sw_w32(cmd, RTL930X_TBL_ACCESS_CTRL_1); + do { } while (sw_r32(RTL930X_TBL_ACCESS_CTRL_1) & (1 << 17)); +} + +inline int rtl930x_tbl_access_data_0(int i) +{ + return RTL930X_TBL_ACCESS_DATA_0(i); +} + +static inline int rtl930x_l2_port_new_salrn(int p) +{ + return RTL930X_L2_PORT_SALRN(p); +} + +static inline int rtl930x_l2_port_new_sa_fwd(int p) +{ + // TODO: The definition of the fields changed, because of the master-cpu in a stack + return RTL930X_L2_PORT_NEW_SA_FWD(p); +} + +inline static int rtl930x_trk_mbr_ctr(int group) +{ + return RTL930X_TRK_MBR_CTRL + (group << 2); +} + +static void rtl930x_vlan_tables_read(u32 vlan, struct rtl838x_vlan_info *info) +{ + u32 v, w; + // Read VLAN table (1) via register 0 + struct table_reg *r = rtl_table_get(RTL9300_TBL_0, 1); + + rtl_table_read(r, vlan); + v = sw_r32(rtl_table_data(r, 0)); + w = sw_r32(rtl_table_data(r, 1)); + pr_debug("VLAN_READ %d: %08x %08x\n", vlan, v, w); + rtl_table_release(r); + + info->tagged_ports = v >> 3; + info->profile_id = (w >> 24) & 7; + info->hash_mc_fid = !!(w & BIT(27)); + info->hash_uc_fid = !!(w & BIT(28)); + info->fid = ((v & 0x7) << 3) | ((w >> 29) & 0x7); + + // Read UNTAG table via table register 2 + r = rtl_table_get(RTL9300_TBL_2, 0); + rtl_table_read(r, vlan); + v = sw_r32(rtl_table_data(r, 0)); + rtl_table_release(r); + + info->untagged_ports = v >> 3; +} + +static void rtl930x_vlan_set_tagged(u32 vlan, struct rtl838x_vlan_info *info) +{ + u32 v, w; + // Access VLAN table (1) via register 0 + struct table_reg *r = rtl_table_get(RTL9300_TBL_0, 1); + + v = info->tagged_ports << 3; + v |= ((u32)info->fid) >> 3; + + w = ((u32)info->fid) << 29; + w |= info->hash_mc_fid ? BIT(27) : 0; + w |= info->hash_uc_fid ? BIT(28) : 0; + w |= info->profile_id << 24; + + sw_w32(v, rtl_table_data(r, 0)); + sw_w32(w, rtl_table_data(r, 1)); + + rtl_table_write(r, vlan); + rtl_table_release(r); +} + +void rtl930x_vlan_profile_dump(int profile) +{ + u32 p[5]; + + if (profile < 0 || profile > 7) + return; + + p[0] = sw_r32(RTL930X_VLAN_PROFILE_SET(profile)); + p[1] = sw_r32(RTL930X_VLAN_PROFILE_SET(profile) + 4); + p[2] = sw_r32(RTL930X_VLAN_PROFILE_SET(profile) + 8) & 0x1FFFFFFF; + p[3] = sw_r32(RTL930X_VLAN_PROFILE_SET(profile) + 12) & 0x1FFFFFFF; + p[4] = sw_r32(RTL930X_VLAN_PROFILE_SET(profile) + 16) & 0x1FFFFFFF; + + pr_info("VLAN %d: L2 learn: %d; Unknown MC PMasks: L2 %0x, IPv4 %0x, IPv6: %0x", + profile, p[0] & (3 << 21), p[2], p[3], p[4]); + pr_info(" Routing enabled: IPv4 UC %c, IPv6 UC %c, IPv4 MC %c, IPv6 MC %c\n", + p[0] & BIT(17) ? 'y' : 'n', p[0] & BIT(16) ? 'y' : 'n', + p[0] & BIT(13) ? 'y' : 'n', p[0] & BIT(12) ? 'y' : 'n'); + pr_info(" Bridge enabled: IPv4 MC %c, IPv6 MC %c,\n", + p[0] & BIT(15) ? 'y' : 'n', p[0] & BIT(14) ? 'y' : 'n'); + pr_info("VLAN profile %d: raw %08x %08x %08x %08x %08x\n", + profile, p[0], p[1], p[2], p[3], p[4]); +} + +static void rtl930x_vlan_set_untagged(u32 vlan, u64 portmask) +{ + struct table_reg *r = rtl_table_get(RTL9300_TBL_2, 0); + + sw_w32(portmask << 3, rtl_table_data(r, 0)); + rtl_table_write(r, vlan); + rtl_table_release(r); +} + +/* Sets the L2 forwarding to be based on either the inner VLAN tag or the outer + */ +static void rtl930x_vlan_fwd_on_inner(int port, bool is_set) +{ + // Always set all tag modes to fwd based on either inner or outer tag + if (is_set) + sw_w32_mask(0, 0xf, RTL930X_VLAN_PORT_FWD + (port << 2)); + else + sw_w32_mask(0xf, 0, RTL930X_VLAN_PORT_FWD + (port << 2)); +} + +static void rtl930x_vlan_profile_setup(int profile) +{ + u32 p[5]; + + pr_info("In %s\n", __func__); + p[0] = sw_r32(RTL930X_VLAN_PROFILE_SET(profile)); + p[1] = sw_r32(RTL930X_VLAN_PROFILE_SET(profile) + 4); + + // Enable routing of Ipv4/6 Unicast and IPv4/6 Multicast traffic + p[0] |= BIT(17) | BIT(16) | BIT(13) | BIT(12); + p[2] = 0x1fffffff; // L2 unknown MC flooding portmask all ports, including the CPU-port + p[3] = 0x1fffffff; // IPv4 unknown MC flooding portmask + p[4] = 0x1fffffff; // IPv6 unknown MC flooding portmask + + sw_w32(p[0], RTL930X_VLAN_PROFILE_SET(profile)); + sw_w32(p[1], RTL930X_VLAN_PROFILE_SET(profile) + 4); + sw_w32(p[2], RTL930X_VLAN_PROFILE_SET(profile) + 8); + sw_w32(p[3], RTL930X_VLAN_PROFILE_SET(profile) + 12); + sw_w32(p[4], RTL930X_VLAN_PROFILE_SET(profile) + 16); +} + +static void rtl930x_l2_learning_setup(void) +{ + // Portmask for flooding broadcast traffic + sw_w32(0x1fffffff, RTL930X_L2_BC_FLD_PMSK); + + // Portmask for flooding unicast traffic with unknown destination + sw_w32(0x1fffffff, RTL930X_L2_UNKN_UC_FLD_PMSK); + + // Limit learning to maximum: 32k entries, after that just flood (bits 0-1) + sw_w32((0x7fff << 2) | 0, RTL930X_L2_LRN_CONSTRT_CTRL); +} + +static void rtl930x_stp_get(struct rtl838x_switch_priv *priv, u16 msti, u32 port_state[]) +{ + int i; + u32 cmd = 1 << 17 /* Execute cmd */ + | 0 << 16 /* Read */ + | 4 << 12 /* Table type 0b10 */ + | (msti & 0xfff); + priv->r->exec_tbl0_cmd(cmd); + + for (i = 0; i < 2; i++) + port_state[i] = sw_r32(RTL930X_TBL_ACCESS_DATA_0(i)); + pr_debug("MSTI: %d STATE: %08x, %08x\n", msti, port_state[0], port_state[1]); +} + +static void rtl930x_stp_set(struct rtl838x_switch_priv *priv, u16 msti, u32 port_state[]) +{ + int i; + u32 cmd = 1 << 17 /* Execute cmd */ + | 1 << 16 /* Write */ + | 4 << 12 /* Table type 4 */ + | (msti & 0xfff); + + for (i = 0; i < 2; i++) + sw_w32(port_state[i], RTL930X_TBL_ACCESS_DATA_0(i)); + priv->r->exec_tbl0_cmd(cmd); +} + +static inline int rtl930x_mac_force_mode_ctrl(int p) +{ + return RTL930X_MAC_FORCE_MODE_CTRL + (p << 2); +} + +static inline int rtl930x_mac_port_ctrl(int p) +{ + return RTL930X_MAC_L2_PORT_CTRL(p); +} + +static inline int rtl930x_mac_link_spd_sts(int p) +{ + return RTL930X_MAC_LINK_SPD_STS(p); +} + +static u64 rtl930x_l2_hash_seed(u64 mac, u32 vid) +{ + u64 v = vid; + + v <<= 48; + v |= mac; + + return v; +} + +/* + * Calculate both the block 0 and the block 1 hash by applyingthe same hash + * algorithm as the one used currently by the ASIC to the seed, and return + * both hashes in the lower and higher word of the return value since only 12 bit of + * the hash are significant + */ +static u32 rtl930x_l2_hash_key(struct rtl838x_switch_priv *priv, u64 seed) +{ + u32 k0, k1, h1, h2, h; + + k0 = (u32) (((seed >> 55) & 0x1f) ^ ((seed >> 44) & 0x7ff) + ^ ((seed >> 33) & 0x7ff) ^ ((seed >> 22) & 0x7ff) + ^ ((seed >> 11) & 0x7ff) ^ (seed & 0x7ff)); + + h1 = (seed >> 11) & 0x7ff; + h1 = ((h1 & 0x1f) << 6) | ((h1 >> 5) & 0x3f); + + h2 = (seed >> 33) & 0x7ff; + h2 = ((h2 & 0x3f) << 5)| ((h2 >> 6) & 0x3f); + + k1 = (u32) (((seed << 55) & 0x1f) ^ ((seed >> 44) & 0x7ff) ^ h2 + ^ ((seed >> 22) & 0x7ff) ^ h1 + ^ (seed & 0x7ff)); + + // Algorithm choice for block 0 + if (sw_r32(RTL930X_L2_CTRL) & BIT(0)) + h = k1; + else + h = k0; + + /* Algorithm choice for block 1 + * Since k0 and k1 are < 2048, adding 2048 will offset the hash into the second + * half of hash-space + * 2048 is in fact the hash-table size 16384 divided by 4 hashes per bucket + * divided by 2 to divide the hash space in 2 + */ + if (sw_r32(RTL930X_L2_CTRL) & BIT(1)) + h |= (k1 + 2048) << 16; + else + h |= (k0 + 2048) << 16; + + return h; +} + +/* + * Fills an L2 entry structure from the SoC registers + */ +static void rtl930x_fill_l2_entry(u32 r[], struct rtl838x_l2_entry *e) +{ + pr_debug("In %s valid?\n", __func__); + e->valid = !!(r[2] & BIT(31)); + if (!e->valid) + return; + + pr_debug("In %s is valid\n", __func__); + e->is_ip_mc = false; + e->is_ipv6_mc = false; + + // TODO: Is there not a function to copy directly MAC memory? + e->mac[0] = (r[0] >> 24); + e->mac[1] = (r[0] >> 16); + e->mac[2] = (r[0] >> 8); + e->mac[3] = r[0]; + e->mac[4] = (r[1] >> 24); + e->mac[5] = (r[1] >> 16); + + e->next_hop = !!(r[2] & BIT(12)); + e->rvid = r[1] & 0xfff; + + /* Is it a unicast entry? check multicast bit */ + if (!(e->mac[0] & 1)) { + e->type = L2_UNICAST; + e->is_static = !!(r[2] & BIT(14)); + e->port = (r[2] >> 20) & 0x3ff; + // Check for trunk port + if (r[2] & BIT(30)) { + e->is_trunk = true; + e->stack_dev = (e->port >> 9) & 1; + e->trunk = e->port & 0x3f; + } else { + e->is_trunk = false; + e->stack_dev = (e->port >> 6) & 0xf; + e->port = e->port & 0x3f; + } + + e->block_da = !!(r[2] & BIT(15)); + e->block_sa = !!(r[2] & BIT(16)); + e->suspended = !!(r[2] & BIT(13)); + e->age = (r[2] >> 17) & 3; + e->valid = true; + // the UC_VID field in hardware is used for the VID or for the route id + if (e->next_hop) { + e->nh_route_id = r[2] & 0x7ff; + e->vid = 0; + } else { + e->vid = r[2] & 0xfff; + e->nh_route_id = 0; + } + } else { + e->valid = true; + e->type = L2_MULTICAST; + e->mc_portmask_index = (r[2] >> 16) & 0x3ff; + } +} + +/* + * Fills the 3 SoC table registers r[] with the information of in the rtl838x_l2_entry + */ +static void rtl930x_fill_l2_row(u32 r[], struct rtl838x_l2_entry *e) +{ + u32 port; + + if (!e->valid) { + r[0] = r[1] = r[2] = 0; + return; + } + + r[2] = BIT(31); // Set valid bit + + r[0] = ((u32)e->mac[0]) << 24 | ((u32)e->mac[1]) << 16 + | ((u32)e->mac[2]) << 8 | ((u32)e->mac[3]); + r[1] = ((u32)e->mac[4]) << 24 | ((u32)e->mac[5]) << 16; + + r[2] |= e->next_hop ? BIT(12) : 0; + + if (e->type == L2_UNICAST) { + r[2] |= e->is_static ? BIT(14) : 0; + r[1] |= e->rvid & 0xfff; + r[2] |= (e->port & 0x3ff) << 20; + if (e->is_trunk) { + r[2] |= BIT(30); + port = e->stack_dev << 9 | (e->port & 0x3f); + } else { + port = (e->stack_dev & 0xf) << 6; + port |= e->port & 0x3f; + } + r[2] |= port << 20; + r[2] |= e->block_da ? BIT(15) : 0; + r[2] |= e->block_sa ? BIT(17) : 0; + r[2] |= e->suspended ? BIT(13) : 0; + r[2] |= (e->age & 0x3) << 17; + // the UC_VID field in hardware is used for the VID or for the route id + if (e->next_hop) + r[2] |= e->nh_route_id & 0x7ff; + else + r[2] |= e->vid & 0xfff; + } else { // L2_MULTICAST + r[2] |= (e->mc_portmask_index & 0x3ff) << 16; + r[2] |= e->mc_mac_index & 0x7ff; + } +} + +/* + * Read an L2 UC or MC entry out of a hash bucket of the L2 forwarding table + * hash is the id of the bucket and pos is the position of the entry in that bucket + * The data read from the SoC is filled into rtl838x_l2_entry + */ +static u64 rtl930x_read_l2_entry_using_hash(u32 hash, u32 pos, struct rtl838x_l2_entry *e) +{ + u32 r[3]; + struct table_reg *q = rtl_table_get(RTL9300_TBL_L2, 0); + u32 idx; + int i; + u64 mac; + u64 seed; + + pr_debug("%s: hash %08x, pos: %d\n", __func__, hash, pos); + + /* On the RTL93xx, 2 different hash algorithms are used making it a total of + * 8 buckets that need to be searched, 4 for each hash-half + * Use second hash space when bucket is between 4 and 8 */ + if (pos >= 4) { + pos -= 4; + hash >>= 16; + } else { + hash &= 0xffff; + } + + idx = (0 << 14) | (hash << 2) | pos; // Search SRAM, with hash and at pos in bucket + pr_debug("%s: NOW hash %08x, pos: %d\n", __func__, hash, pos); + + rtl_table_read(q, idx); + for (i = 0; i < 3; i++) + r[i] = sw_r32(rtl_table_data(q, i)); + + rtl_table_release(q); + + rtl930x_fill_l2_entry(r, e); + + pr_debug("%s: valid: %d, nh: %d\n", __func__, e->valid, e->next_hop); + if (!e->valid) + return 0; + + mac = ((u64)e->mac[0]) << 40 | ((u64)e->mac[1]) << 32 | ((u64)e->mac[2]) << 24 + | ((u64)e->mac[3]) << 16 | ((u64)e->mac[4]) << 8 | ((u64)e->mac[5]); + + seed = rtl930x_l2_hash_seed(mac, e->rvid); + pr_debug("%s: mac %016llx, seed %016llx\n", __func__, mac, seed); + // return vid with concatenated mac as unique id + return seed; +} + +static void rtl930x_write_l2_entry_using_hash(u32 hash, u32 pos, struct rtl838x_l2_entry *e) +{ + u32 r[3]; + struct table_reg *q = rtl_table_get(RTL9300_TBL_L2, 0); + u32 idx = (0 << 14) | (hash << 2) | pos; // Access SRAM, with hash and at pos in bucket + int i; + + pr_debug("%s: hash %d, pos %d\n", __func__, hash, pos); + pr_debug("%s: index %d -> mac %02x:%02x:%02x:%02x:%02x:%02x\n", __func__, idx, + e->mac[0], e->mac[1], e->mac[2], e->mac[3],e->mac[4],e->mac[5]); + + rtl930x_fill_l2_row(r, e); + + for (i= 0; i < 3; i++) + sw_w32(r[i], rtl_table_data(q, i)); + + rtl_table_write(q, idx); + rtl_table_release(q); +} + +static u64 rtl930x_read_cam(int idx, struct rtl838x_l2_entry *e) +{ + u32 r[3]; + struct table_reg *q = rtl_table_get(RTL9300_TBL_L2, 1); + int i; + + rtl_table_read(q, idx); + for (i= 0; i < 3; i++) + r[i] = sw_r32(rtl_table_data(q, i)); + + rtl_table_release(q); + + rtl930x_fill_l2_entry(r, e); + if (!e->valid) + return 0; + + // return mac with concatenated vid as unique id + return ((u64)r[0] << 28) | ((r[1] & 0xffff0000) >> 4) | e->vid; +} + +static void rtl930x_write_cam(int idx, struct rtl838x_l2_entry *e) +{ + u32 r[3]; + struct table_reg *q = rtl_table_get(RTL9300_TBL_L2, 1); // Access L2 Table 1 + int i; + + rtl930x_fill_l2_row(r, e); + + for (i= 0; i < 3; i++) + sw_w32(r[i], rtl_table_data(q, i)); + + rtl_table_write(q, idx); + rtl_table_release(q); +} + +static u64 rtl930x_read_mcast_pmask(int idx) +{ + u32 portmask; + // Read MC_PORTMASK (2) via register RTL9300_TBL_L2 + struct table_reg *q = rtl_table_get(RTL9300_TBL_L2, 2); + + rtl_table_read(q, idx); + portmask = sw_r32(rtl_table_data(q, 0)); + portmask >>= 3; + rtl_table_release(q); + + pr_debug("%s: Index idx %d has portmask %08x\n", __func__, idx, portmask); + return portmask; +} + +static void rtl930x_write_mcast_pmask(int idx, u64 portmask) +{ + u32 pm = portmask; + + // Access MC_PORTMASK (2) via register RTL9300_TBL_L2 + struct table_reg *q = rtl_table_get(RTL9300_TBL_L2, 2); + + pr_debug("%s: Index idx %d has portmask %08x\n", __func__, idx, pm); + pm <<= 3; + sw_w32(pm, rtl_table_data(q, 0)); + rtl_table_write(q, idx); + rtl_table_release(q); +} + +u64 rtl930x_traffic_get(int source) +{ + u32 v; + struct table_reg *r = rtl_table_get(RTL9300_TBL_0, 6); + + rtl_table_read(r, source); + v = sw_r32(rtl_table_data(r, 0)); + rtl_table_release(r); + return v >> 3; +} + +/* + * Enable traffic between a source port and a destination port matrix + */ +void rtl930x_traffic_set(int source, u64 dest_matrix) +{ + struct table_reg *r = rtl_table_get(RTL9300_TBL_0, 6); + + sw_w32((dest_matrix << 3), rtl_table_data(r, 0)); + rtl_table_write(r, source); + rtl_table_release(r); +} + +void rtl930x_traffic_enable(int source, int dest) +{ + struct table_reg *r = rtl_table_get(RTL9300_TBL_0, 6); + rtl_table_read(r, source); + sw_w32_mask(0, BIT(dest + 3), rtl_table_data(r, 0)); + rtl_table_write(r, source); + rtl_table_release(r); +} + +void rtl930x_traffic_disable(int source, int dest) +{ + struct table_reg *r = rtl_table_get(RTL9300_TBL_0, 6); + rtl_table_read(r, source); + sw_w32_mask(BIT(dest + 3), 0, rtl_table_data(r, 0)); + rtl_table_write(r, source); + rtl_table_release(r); +} + +void rtl9300_dump_debug(void) +{ + int i; + u16 r = RTL930X_STAT_PRVTE_DROP_COUNTER0; + + for (i = 0; i < 10; i ++) { + pr_info("# %d %08x %08x %08x %08x %08x %08x %08x %08x\n", i * 8, + sw_r32(r), sw_r32(r + 4), sw_r32(r + 8), sw_r32(r + 12), + sw_r32(r + 16), sw_r32(r + 20), sw_r32(r + 24), sw_r32(r + 28)); + r += 32; + } + pr_info("# %08x %08x %08x %08x %08x\n", + sw_r32(r), sw_r32(r + 4), sw_r32(r + 8), sw_r32(r + 12), sw_r32(r + 16)); + rtl930x_print_matrix(); + pr_info("RTL930X_L2_PORT_SABLK_CTRL: %08x, RTL930X_L2_PORT_DABLK_CTRL %08x\n", + sw_r32(RTL930X_L2_PORT_SABLK_CTRL), sw_r32(RTL930X_L2_PORT_DABLK_CTRL) + + ); +} + +irqreturn_t rtl930x_switch_irq(int irq, void *dev_id) +{ + struct dsa_switch *ds = dev_id; + u32 ports = sw_r32(RTL930X_ISR_PORT_LINK_STS_CHG); + u32 link; + int i; + + /* Clear status */ + sw_w32(ports, RTL930X_ISR_PORT_LINK_STS_CHG); + + for (i = 0; i < 28; i++) { + if (ports & BIT(i)) { + /* Read the register twice because of issues with latency at least + * with the external RTL8226 PHY on the XGS1210 */ + link = sw_r32(RTL930X_MAC_LINK_STS); + link = sw_r32(RTL930X_MAC_LINK_STS); + if (link & BIT(i)) + dsa_port_phylink_mac_change(ds, i, true); + else + dsa_port_phylink_mac_change(ds, i, false); + } + } + + return IRQ_HANDLED; +} + +int rtl930x_write_phy(u32 port, u32 page, u32 reg, u32 val) +{ + u32 v; + int err = 0; + + pr_debug("%s: port %d, page: %d, reg: %x, val: %x\n", __func__, port, page, reg, val); + + if (port > 63 || page > 4095 || reg > 31) + return -ENOTSUPP; + + val &= 0xffff; + mutex_lock(&smi_lock); + + sw_w32(BIT(port), RTL930X_SMI_ACCESS_PHY_CTRL_0); + sw_w32_mask(0xffff << 16, val << 16, RTL930X_SMI_ACCESS_PHY_CTRL_2); + v = reg << 20 | page << 3 | 0x1f << 15 | BIT(2) | BIT(0); + sw_w32(v, RTL930X_SMI_ACCESS_PHY_CTRL_1); + + do { + v = sw_r32(RTL930X_SMI_ACCESS_PHY_CTRL_1); + } while (v & 0x1); + + if (v & 0x2) + err = -EIO; + + mutex_unlock(&smi_lock); + + return err; +} + +int rtl930x_read_phy(u32 port, u32 page, u32 reg, u32 *val) +{ + u32 v; + int err = 0; + + if (port > 63 || page > 4095 || reg > 31) + return -ENOTSUPP; + + mutex_lock(&smi_lock); + + sw_w32_mask(0xffff << 16, port << 16, RTL930X_SMI_ACCESS_PHY_CTRL_2); + v = reg << 20 | page << 3 | 0x1f << 15 | 1; + sw_w32(v, RTL930X_SMI_ACCESS_PHY_CTRL_1); + + do { + v = sw_r32(RTL930X_SMI_ACCESS_PHY_CTRL_1); + } while ( v & 0x1); + + if (v & BIT(25)) { + pr_debug("Error reading phy %d, register %d\n", port, reg); + err = -EIO; + } + *val = (sw_r32(RTL930X_SMI_ACCESS_PHY_CTRL_2) & 0xffff); + + pr_debug("%s: port %d, page: %d, reg: %x, val: %x\n", __func__, port, page, reg, *val); + + mutex_unlock(&smi_lock); + + return err; +} + +/* + * Write to an mmd register of the PHY + */ +int rtl930x_write_mmd_phy(u32 port, u32 devnum, u32 regnum, u32 val) +{ + int err = 0; + u32 v; + + mutex_lock(&smi_lock); + + // Set PHY to access + sw_w32(BIT(port), RTL930X_SMI_ACCESS_PHY_CTRL_0); + + // Set data to write + sw_w32_mask(0xffff << 16, val << 16, RTL930X_SMI_ACCESS_PHY_CTRL_2); + + // Set MMD device number and register to write to + sw_w32(devnum << 16 | (regnum & 0xffff), RTL930X_SMI_ACCESS_PHY_CTRL_3); + + v = BIT(2) | BIT(1) | BIT(0); // WRITE | MMD-access | EXEC + sw_w32(v, RTL930X_SMI_ACCESS_PHY_CTRL_1); + + do { + v = sw_r32(RTL930X_SMI_ACCESS_PHY_CTRL_1); + } while (v & BIT(0)); + + pr_debug("%s: port %d, regnum: %x, val: %x (err %d)\n", __func__, port, regnum, val, err); + mutex_unlock(&smi_lock); + return err; +} + +/* + * Read an mmd register of the PHY + */ +int rtl930x_read_mmd_phy(u32 port, u32 devnum, u32 regnum, u32 *val) +{ + int err = 0; + u32 v; + + mutex_lock(&smi_lock); + + // Set PHY to access + sw_w32_mask(0xffff << 16, port << 16, RTL930X_SMI_ACCESS_PHY_CTRL_2); + + // Set MMD device number and register to write to + sw_w32(devnum << 16 | (regnum & 0xffff), RTL930X_SMI_ACCESS_PHY_CTRL_3); + + v = BIT(1) | BIT(0); // MMD-access | EXEC + sw_w32(v, RTL930X_SMI_ACCESS_PHY_CTRL_1); + + do { + v = sw_r32(RTL930X_SMI_ACCESS_PHY_CTRL_1); + } while (v & BIT(0)); + // There is no error-checking via BIT 25 of v, as it does not seem to be set correctly + *val = (sw_r32(RTL930X_SMI_ACCESS_PHY_CTRL_2) & 0xffff); + pr_debug("%s: port %d, regnum: %x, val: %x (err %d)\n", __func__, port, regnum, *val, err); + + mutex_unlock(&smi_lock); + + return err; +} + +/* + * Calculate both the block 0 and the block 1 hash, and return in + * lower and higher word of the return value since only 12 bit of + * the hash are significant + */ +u32 rtl930x_hash(struct rtl838x_switch_priv *priv, u64 seed) +{ + u32 k0, k1, h1, h2, h; + + k0 = (u32) (((seed >> 55) & 0x1f) ^ ((seed >> 44) & 0x7ff) + ^ ((seed >> 33) & 0x7ff) ^ ((seed >> 22) & 0x7ff) + ^ ((seed >> 11) & 0x7ff) ^ (seed & 0x7ff)); + + h1 = (seed >> 11) & 0x7ff; + h1 = ((h1 & 0x1f) << 6) | ((h1 >> 5) & 0x3f); + + h2 = (seed >> 33) & 0x7ff; + h2 = ((h2 & 0x3f) << 5)| ((h2 >> 6) & 0x3f); + + k1 = (u32) (((seed << 55) & 0x1f) ^ ((seed >> 44) & 0x7ff) ^ h2 + ^ ((seed >> 22) & 0x7ff) ^ h1 + ^ (seed & 0x7ff)); + + // Algorithm choice for block 0 + if (sw_r32(RTL930X_L2_CTRL) & BIT(0)) + h = k1; + else + h = k0; + + /* Algorithm choice for block 1 + * Since k0 and k1 are < 2048, adding 2048 will offset the hash into the second + * half of hash-space + * 2048 is in fact the hash-table size 16384 divided by 4 hashes per bucket + * divided by 2 to divide the hash space in 2 + */ + if (sw_r32(RTL930X_L2_CTRL) & BIT(1)) + h |= (k1 + 2048) << 16; + else + h |= (k0 + 2048) << 16; + + return h; +} + +/* + * Enables or disables the EEE/EEEP capability of a port + */ +void rtl930x_port_eee_set(struct rtl838x_switch_priv *priv, int port, bool enable) +{ + u32 v; + + // This works only for Ethernet ports, and on the RTL930X, ports from 26 are SFP + if (port >= 26) + return; + + pr_debug("In %s: setting port %d to %d\n", __func__, port, enable); + v = enable ? 0x3f : 0x0; + + // Set EEE/EEEP state for 100, 500, 1000MBit and 2.5, 5 and 10GBit + sw_w32_mask(0, v << 10, rtl930x_mac_force_mode_ctrl(port)); + + // Set TX/RX EEE state + v = enable ? 0x3 : 0x0; + sw_w32(v, RTL930X_EEE_CTRL(port)); + + priv->ports[port].eee_enabled = enable; +} + +/* + * Get EEE own capabilities and negotiation result + */ +int rtl930x_eee_port_ability(struct rtl838x_switch_priv *priv, struct ethtool_eee *e, int port) +{ + u32 link, a; + + if (port >= 26) + return -ENOTSUPP; + + pr_info("In %s, port %d\n", __func__, port); + link = sw_r32(RTL930X_MAC_LINK_STS); + link = sw_r32(RTL930X_MAC_LINK_STS); + if (!(link & BIT(port))) + return 0; + + pr_info("Setting advertised\n"); + if (sw_r32(rtl930x_mac_force_mode_ctrl(port)) & BIT(10)) + e->advertised |= ADVERTISED_100baseT_Full; + + if (sw_r32(rtl930x_mac_force_mode_ctrl(port)) & BIT(12)) + e->advertised |= ADVERTISED_1000baseT_Full; + + if (priv->ports[port].is2G5 && sw_r32(rtl930x_mac_force_mode_ctrl(port)) & BIT(13)) { + pr_info("ADVERTISING 2.5G EEE\n"); + e->advertised |= ADVERTISED_2500baseX_Full; + } + + if (priv->ports[port].is10G && sw_r32(rtl930x_mac_force_mode_ctrl(port)) & BIT(15)) + e->advertised |= ADVERTISED_10000baseT_Full; + + a = sw_r32(RTL930X_MAC_EEE_ABLTY); + a = sw_r32(RTL930X_MAC_EEE_ABLTY); + pr_info("Link partner: %08x\n", a); + if (a & BIT(port)) { + e->lp_advertised = ADVERTISED_100baseT_Full; + e->lp_advertised |= ADVERTISED_1000baseT_Full; + if (priv->ports[port].is2G5) + e->lp_advertised |= ADVERTISED_2500baseX_Full; + if (priv->ports[port].is10G) + e->lp_advertised |= ADVERTISED_10000baseT_Full; + } + + // Read 2x to clear latched state + a = sw_r32(RTL930X_EEEP_PORT_CTRL(port)); + a = sw_r32(RTL930X_EEEP_PORT_CTRL(port)); + pr_info("%s RTL930X_EEEP_PORT_CTRL: %08x\n", __func__, a); + + return 0; +} + +static void rtl930x_init_eee(struct rtl838x_switch_priv *priv, bool enable) +{ + int i; + + pr_info("Setting up EEE, state: %d\n", enable); + + // Setup EEE on all ports + for (i = 0; i < priv->cpu_port; i++) { + if (priv->ports[i].phy) + rtl930x_port_eee_set(priv, i, enable); + } + + priv->eee_enabled = enable; +} +#define HASH_PICK(val, lsb, len) ((val & (((1 << len) - 1) << lsb)) >> lsb) + +static u32 rtl930x_l3_hash4(u32 ip, int algorithm, bool move_dip) +{ + u32 rows[4]; + u32 hash; + u32 s0, s1, pH; + + memset(rows, 0, sizeof(rows)); + + rows[0] = HASH_PICK(ip, 27, 5); + rows[1] = HASH_PICK(ip, 18, 9); + rows[2] = HASH_PICK(ip, 9, 9); + + if (!move_dip) + rows[3] = HASH_PICK(ip, 0, 9); + + if (!algorithm) { + hash = rows[0] ^ rows[1] ^ rows[2] ^ rows[3]; + } else { + s0 = rows[0] + rows[1] + rows[2]; + s1 = (s0 & 0x1ff) + ((s0 & (0x1ff << 9)) >> 9); + pH = (s1 & 0x1ff) + ((s1 & (0x1ff << 9)) >> 9); + hash = pH ^ rows[3]; + } + return hash; +} + +static u32 rtl930x_l3_hash6(struct in6_addr *ip6, int algorithm, bool move_dip) +{ + u32 rows[16]; + u32 hash; + u32 s0, s1, pH; + + rows[0] = (HASH_PICK(ip6->s6_addr[0], 6, 2) << 0); + rows[1] = (HASH_PICK(ip6->s6_addr[0], 0, 6) << 3) | HASH_PICK(ip6->s6_addr[1], 5, 3); + rows[2] = (HASH_PICK(ip6->s6_addr[1], 0, 5) << 4) | HASH_PICK(ip6->s6_addr[2], 4, 4); + rows[3] = (HASH_PICK(ip6->s6_addr[2], 0, 4) << 5) | HASH_PICK(ip6->s6_addr[3], 3, 5); + rows[4] = (HASH_PICK(ip6->s6_addr[3], 0, 3) << 6) | HASH_PICK(ip6->s6_addr[4], 2, 6); + rows[5] = (HASH_PICK(ip6->s6_addr[4], 0, 2) << 7) | HASH_PICK(ip6->s6_addr[5], 1, 7); + rows[6] = (HASH_PICK(ip6->s6_addr[5], 0, 1) << 8) | HASH_PICK(ip6->s6_addr[6], 0, 8); + rows[7] = (HASH_PICK(ip6->s6_addr[7], 0, 8) << 1) | HASH_PICK(ip6->s6_addr[8], 7, 1); + rows[8] = (HASH_PICK(ip6->s6_addr[8], 0, 7) << 2) | HASH_PICK(ip6->s6_addr[9], 6, 2); + rows[9] = (HASH_PICK(ip6->s6_addr[9], 0, 6) << 3) | HASH_PICK(ip6->s6_addr[10], 5, 3); + rows[10] = (HASH_PICK(ip6->s6_addr[10], 0, 5) << 4) | HASH_PICK(ip6->s6_addr[11], 4, 4); + if (!algorithm) { + rows[11] = (HASH_PICK(ip6->s6_addr[11], 0, 4) << 5) + | (HASH_PICK(ip6->s6_addr[12], 3, 5) << 0); + rows[12] = (HASH_PICK(ip6->s6_addr[12], 0, 3) << 6) + | (HASH_PICK(ip6->s6_addr[13], 2, 6) << 0); + rows[13] = (HASH_PICK(ip6->s6_addr[13], 0, 2) << 7) + | (HASH_PICK(ip6->s6_addr[14], 1, 7) << 0); + if (!move_dip) { + rows[14] = (HASH_PICK(ip6->s6_addr[14], 0, 1) << 8) + | (HASH_PICK(ip6->s6_addr[15], 0, 8) << 0); + } + hash = rows[0] ^ rows[1] ^ rows[2] ^ rows[3] ^ rows[4] ^ rows[5] ^ rows[6] + ^ rows[7] ^ rows[8] ^ rows[9] ^ rows[10] ^ rows[11] ^ rows[12] + ^ rows[13] ^ rows[14]; + } else { + rows[11] = (HASH_PICK(ip6->s6_addr[11], 0, 4) << 5); + rows[12] = (HASH_PICK(ip6->s6_addr[12], 3, 5) << 0); + rows[13] = (HASH_PICK(ip6->s6_addr[12], 0, 3) << 6) + | HASH_PICK(ip6->s6_addr[13], 2, 6); + rows[14] = (HASH_PICK(ip6->s6_addr[13], 0, 2) << 7) + | HASH_PICK(ip6->s6_addr[14], 1, 7); + if (!move_dip) { + rows[15] = (HASH_PICK(ip6->s6_addr[14], 0, 1) << 8) + | (HASH_PICK(ip6->s6_addr[15], 0, 8) << 0); + } + s0 = rows[12] + rows[13] + rows[14]; + s1 = (s0 & 0x1ff) + ((s0 & (0x1ff << 9)) >> 9); + pH = (s1 & 0x1ff) + ((s1 & (0x1ff << 9)) >> 9); + hash = rows[0] ^ rows[1] ^ rows[2] ^ rows[3] ^ rows[4] ^ rows[5] ^ rows[6] + ^ rows[7] ^ rows[8] ^ rows[9] ^ rows[10] ^ rows[11] ^ pH ^ rows[15]; + } + return hash; +} + +/* + * Read a prefix route entry from the L3_PREFIX_ROUTE_IPUC table + * We currently only support IPv4 and IPv6 unicast route + */ +static void rtl930x_route_read(int idx, struct rtl83xx_route *rt) +{ + u32 v, ip4_m; + bool host_route, default_route; + struct in6_addr ip6_m; + + // Read L3_PREFIX_ROUTE_IPUC table (2) via register RTL9300_TBL_1 + struct table_reg *r = rtl_table_get(RTL9300_TBL_1, 2); + + rtl_table_read(r, idx); + // The table has a size of 11 registers + rt->attr.valid = !!(sw_r32(rtl_table_data(r, 0)) & BIT(31)); + if (!rt->attr.valid) + goto out; + + rt->attr.type = (sw_r32(rtl_table_data(r, 0)) >> 29) & 0x3; + + v = sw_r32(rtl_table_data(r, 10)); + host_route = !!(v & BIT(21)); + default_route = !!(v & BIT(20)); + rt->prefix_len = -1; + pr_info("%s: host route %d, default_route %d\n", __func__, host_route, default_route); + + switch (rt->attr.type) { + case 0: // IPv4 Unicast route + rt->dst_ip = sw_r32(rtl_table_data(r, 4)); + ip4_m = sw_r32(rtl_table_data(r, 9)); + pr_info("%s: Read ip4 mask: %08x\n", __func__, ip4_m); + rt->prefix_len = host_route ? 32 : -1; + rt->prefix_len = (rt->prefix_len < 0 && default_route) ? 0 : -1; + if (rt->prefix_len < 0) + rt->prefix_len = inet_mask_len(ip4_m); + break; + case 2: // IPv6 Unicast route + ipv6_addr_set(&rt->dst_ip6, + sw_r32(rtl_table_data(r, 1)), sw_r32(rtl_table_data(r, 2)), + sw_r32(rtl_table_data(r, 3)), sw_r32(rtl_table_data(r, 4))); + ipv6_addr_set(&ip6_m, + sw_r32(rtl_table_data(r, 6)), sw_r32(rtl_table_data(r, 7)), + sw_r32(rtl_table_data(r, 8)), sw_r32(rtl_table_data(r, 9))); + rt->prefix_len = host_route ? 128 : 0; + rt->prefix_len = (rt->prefix_len < 0 && default_route) ? 0 : -1; + if (rt->prefix_len < 0) + rt->prefix_len = find_last_bit((unsigned long int *)&ip6_m.s6_addr32, + 128); + break; + case 1: // IPv4 Multicast route + case 3: // IPv6 Multicast route + pr_warn("%s: route type not supported\n", __func__); + goto out; + } + + rt->attr.hit = !!(v & BIT(22)); + rt->attr.action = (v >> 18) & 3; + rt->nh.id = (v >> 7) & 0x7ff; + rt->attr.ttl_dec = !!(v & BIT(6)); + rt->attr.ttl_check = !!(v & BIT(5)); + rt->attr.dst_null = !!(v & BIT(4)); + rt->attr.qos_as = !!(v & BIT(3)); + rt->attr.qos_prio = v & 0x7; + pr_info("%s: index %d is valid: %d\n", __func__, idx, rt->attr.valid); + pr_info("%s: next_hop: %d, hit: %d, action :%d, ttl_dec %d, ttl_check %d, dst_null %d\n", + __func__, rt->nh.id, rt->attr.hit, rt->attr.action, + rt->attr.ttl_dec, rt->attr.ttl_check, rt->attr.dst_null); + pr_info("%s: GW: %pI4, prefix_len: %d\n", __func__, &rt->dst_ip, rt->prefix_len); +out: + rtl_table_release(r); +} + +static void rtl930x_net6_mask(int prefix_len, struct in6_addr *ip6_m) +{ + int o, b; + // Define network mask + o = prefix_len >> 3; + b = prefix_len & 0x7; + memset(ip6_m->s6_addr, 0xff, o); + ip6_m->s6_addr[o] |= b ? 0xff00 >> b : 0x00; +} + +/* + * Read a host route entry from the table using its index + * We currently only support IPv4 and IPv6 unicast route + */ +static void rtl930x_host_route_read(int idx, struct rtl83xx_route *rt) +{ + u32 v; + // Read L3_HOST_ROUTE_IPUC table (1) via register RTL9300_TBL_1 + struct table_reg *r = rtl_table_get(RTL9300_TBL_1, 1); + + idx = ((idx / 6) * 8) + (idx % 6); + + pr_debug("In %s, physical index %d\n", __func__, idx); + rtl_table_read(r, idx); + // The table has a size of 5 (for UC, 11 for MC) registers + v = sw_r32(rtl_table_data(r, 0)); + rt->attr.valid = !!(v & BIT(31)); + if (!rt->attr.valid) + goto out; + rt->attr.type = (v >> 29) & 0x3; + switch (rt->attr.type) { + case 0: // IPv4 Unicast route + rt->dst_ip = sw_r32(rtl_table_data(r, 4)); + break; + case 2: // IPv6 Unicast route + ipv6_addr_set(&rt->dst_ip6, + sw_r32(rtl_table_data(r, 3)), sw_r32(rtl_table_data(r, 2)), + sw_r32(rtl_table_data(r, 1)), sw_r32(rtl_table_data(r, 0))); + break; + case 1: // IPv4 Multicast route + case 3: // IPv6 Multicast route + pr_warn("%s: route type not supported\n", __func__); + goto out; + } + + rt->attr.hit = !!(v & BIT(20)); + rt->attr.dst_null = !!(v & BIT(19)); + rt->attr.action = (v >> 17) & 3; + rt->nh.id = (v >> 6) & 0x7ff; + rt->attr.ttl_dec = !!(v & BIT(5)); + rt->attr.ttl_check = !!(v & BIT(4)); + rt->attr.qos_as = !!(v & BIT(3)); + rt->attr.qos_prio = v & 0x7; + pr_debug("%s: index %d is valid: %d\n", __func__, idx, rt->attr.valid); + pr_debug("%s: next_hop: %d, hit: %d, action :%d, ttl_dec %d, ttl_check %d, dst_null %d\n", + __func__, rt->nh.id, rt->attr.hit, rt->attr.action, rt->attr.ttl_dec, rt->attr.ttl_check, + rt->attr.dst_null); + pr_debug("%s: Destination: %pI4\n", __func__, &rt->dst_ip); + +out: + rtl_table_release(r); +} + +/* + * Write a host route entry from the table using its index + * We currently only support IPv4 and IPv6 unicast route + */ +static void rtl930x_host_route_write(int idx, struct rtl83xx_route *rt) +{ + u32 v; + // Access L3_HOST_ROUTE_IPUC table (1) via register RTL9300_TBL_1 + struct table_reg *r = rtl_table_get(RTL9300_TBL_1, 1); + // The table has a size of 5 (for UC, 11 for MC) registers + + idx = ((idx / 6) * 8) + (idx % 6); + + pr_debug("%s: index %d is valid: %d\n", __func__, idx, rt->attr.valid); + pr_debug("%s: next_hop: %d, hit: %d, action :%d, ttl_dec %d, ttl_check %d, dst_null %d\n", + __func__, rt->nh.id, rt->attr.hit, rt->attr.action, rt->attr.ttl_dec, rt->attr.ttl_check, + rt->attr.dst_null); + pr_debug("%s: GW: %pI4, prefix_len: %d\n", __func__, &rt->dst_ip, rt->prefix_len); + + v = BIT(31); // Entry is valid + v |= (rt->attr.type & 0x3) << 29; + v |= rt->attr.hit ? BIT(20) : 0; + v |= rt->attr.dst_null ? BIT(19) : 0; + v |= (rt->attr.action & 0x3) << 17; + v |= (rt->nh.id & 0x7ff) << 6; + v |= rt->attr.ttl_dec ? BIT(5) : 0; + v |= rt->attr.ttl_check ? BIT(4) : 0; + v |= rt->attr.qos_as ? BIT(3) : 0; + v |= rt->attr.qos_prio & 0x7; + + sw_w32(v, rtl_table_data(r, 0)); + switch (rt->attr.type) { + case 0: // IPv4 Unicast route + sw_w32(0, rtl_table_data(r, 1)); + sw_w32(0, rtl_table_data(r, 2)); + sw_w32(0, rtl_table_data(r, 3)); + sw_w32(rt->dst_ip, rtl_table_data(r, 4)); + break; + case 2: // IPv6 Unicast route + sw_w32(rt->dst_ip6.s6_addr32[0], rtl_table_data(r, 1)); + sw_w32(rt->dst_ip6.s6_addr32[1], rtl_table_data(r, 2)); + sw_w32(rt->dst_ip6.s6_addr32[2], rtl_table_data(r, 3)); + sw_w32(rt->dst_ip6.s6_addr32[3], rtl_table_data(r, 4)); + break; + case 1: // IPv4 Multicast route + case 3: // IPv6 Multicast route + pr_warn("%s: route type not supported\n", __func__); + goto out; + } + + rtl_table_write(r, idx); + +out: + rtl_table_release(r); +} + +/* + * Look up the index of a prefix route in the routing table CAM for unicast IPv4/6 routes + * using hardware offload. + */ +static int rtl930x_route_lookup_hw(struct rtl83xx_route *rt) +{ + u32 ip4_m, v; + struct in6_addr ip6_m; + int i; + + if (rt->attr.type == 1 || rt->attr.type == 3) // Hardware only supports UC routes + return -1; + + sw_w32_mask(0x3 << 19, rt->attr.type, RTL930X_L3_HW_LU_KEY_CTRL); + if (rt->attr.type) { // IPv6 + rtl930x_net6_mask(rt->prefix_len, &ip6_m); + for (i = 0; i < 4; i++) + sw_w32(rt->dst_ip6.s6_addr32[0] & ip6_m.s6_addr32[0], + RTL930X_L3_HW_LU_KEY_IP_CTRL + (i << 2)); + } else { // IPv4 + ip4_m = inet_make_mask(rt->prefix_len); + sw_w32(0, RTL930X_L3_HW_LU_KEY_IP_CTRL); + sw_w32(0, RTL930X_L3_HW_LU_KEY_IP_CTRL + 4); + sw_w32(0, RTL930X_L3_HW_LU_KEY_IP_CTRL + 8); + v = rt->dst_ip & ip4_m; + pr_info("%s: searching for %pI4\n", __func__, &v); + sw_w32(v, RTL930X_L3_HW_LU_KEY_IP_CTRL + 12); + } + + // Execute CAM lookup in SoC + sw_w32(BIT(15), RTL930X_L3_HW_LU_CTRL); + + // Wait until execute bit clears and result is ready + do { + v = sw_r32(RTL930X_L3_HW_LU_CTRL); + } while (v & BIT(15)); + + pr_info("%s: found: %d, index: %d\n", __func__, !!(v & BIT(14)), v & 0x1ff); + + // Test if search successful (BIT 14 set) + if (v & BIT(14)) + return v & 0x1ff; + + return -1; +} + +static int rtl930x_find_l3_slot(struct rtl83xx_route *rt, bool must_exist) +{ + int t, s, slot_width, algorithm, addr, idx; + u32 hash; + struct rtl83xx_route route_entry; + + // IPv6 entries take up 3 slots + slot_width = (rt->attr.type == 0) || (rt->attr.type == 2) ? 1 : 3; + + for (t = 0; t < 2; t++) { + algorithm = (sw_r32(RTL930X_L3_HOST_TBL_CTRL) >> (2 + t)) & 0x1; + hash = rtl930x_l3_hash4(rt->dst_ip, algorithm, false); + + pr_debug("%s: table %d, algorithm %d, hash %04x\n", __func__, t, algorithm, hash); + + for (s = 0; s < 6; s += slot_width) { + addr = (t << 12) | ((hash & 0x1ff) << 3) | s; + pr_debug("%s physical address %d\n", __func__, addr); + idx = ((addr / 8) * 6) + (addr % 8); + pr_debug("%s logical address %d\n", __func__, idx); + + rtl930x_host_route_read(idx, &route_entry); + pr_debug("%s route valid %d, route dest: %pI4, hit %d\n", __func__, + rt->attr.valid, &rt->dst_ip, rt->attr.hit); + if (!must_exist && rt->attr.valid) + return idx; + if (must_exist && route_entry.dst_ip == rt->dst_ip) + return idx; + } + } + + return -1; +} + +/* + * Write a prefix route into the routing table CAM at position idx + * Currently only IPv4 and IPv6 unicast routes are supported + */ +static void rtl930x_route_write(int idx, struct rtl83xx_route *rt) +{ + u32 v, ip4_m; + struct in6_addr ip6_m; + // Access L3_PREFIX_ROUTE_IPUC table (2) via register RTL9300_TBL_1 + // The table has a size of 11 registers (20 for MC) + struct table_reg *r = rtl_table_get(RTL9300_TBL_1, 2); + + pr_debug("%s: index %d is valid: %d\n", __func__, idx, rt->attr.valid); + pr_debug("%s: nexthop: %d, hit: %d, action :%d, ttl_dec %d, ttl_check %d, dst_null %d\n", + __func__, rt->nh.id, rt->attr.hit, rt->attr.action, + rt->attr.ttl_dec, rt->attr.ttl_check, rt->attr.dst_null); + pr_debug("%s: GW: %pI4, prefix_len: %d\n", __func__, &rt->dst_ip, rt->prefix_len); + + v = rt->attr.valid ? BIT(31) : 0; + v |= (rt->attr.type & 0x3) << 29; + sw_w32(v, rtl_table_data(r, 0)); + + v = rt->attr.hit ? BIT(22) : 0; + v |= (rt->attr.action & 0x3) << 18; + v |= (rt->nh.id & 0x7ff) << 7; + v |= rt->attr.ttl_dec ? BIT(6) : 0; + v |= rt->attr.ttl_check ? BIT(5) : 0; + v |= rt->attr.dst_null ? BIT(6) : 0; + v |= rt->attr.qos_as ? BIT(6) : 0; + v |= rt->attr.qos_prio & 0x7; + v |= rt->prefix_len == 0 ? BIT(20) : 0; // set default route bit + + // set bit mask for entry type always to 0x3 + sw_w32(0x3 << 29, rtl_table_data(r, 5)); + + switch (rt->attr.type) { + case 0: // IPv4 Unicast route + sw_w32(0, rtl_table_data(r, 1)); + sw_w32(0, rtl_table_data(r, 2)); + sw_w32(0, rtl_table_data(r, 3)); + sw_w32(rt->dst_ip, rtl_table_data(r, 4)); + + v |= rt->prefix_len == 32 ? BIT(21) : 0; // set host-route bit + ip4_m = inet_make_mask(rt->prefix_len); + sw_w32(0, rtl_table_data(r, 6)); + sw_w32(0, rtl_table_data(r, 7)); + sw_w32(0, rtl_table_data(r, 8)); + sw_w32(ip4_m, rtl_table_data(r, 9)); + break; + case 2: // IPv6 Unicast route + sw_w32(rt->dst_ip6.s6_addr32[0], rtl_table_data(r, 1)); + sw_w32(rt->dst_ip6.s6_addr32[1], rtl_table_data(r, 2)); + sw_w32(rt->dst_ip6.s6_addr32[2], rtl_table_data(r, 3)); + sw_w32(rt->dst_ip6.s6_addr32[3], rtl_table_data(r, 4)); + + v |= rt->prefix_len == 128 ? BIT(21) : 0; // set host-route bit + + rtl930x_net6_mask(rt->prefix_len, &ip6_m); + + sw_w32(ip6_m.s6_addr32[0], rtl_table_data(r, 6)); + sw_w32(ip6_m.s6_addr32[1], rtl_table_data(r, 7)); + sw_w32(ip6_m.s6_addr32[2], rtl_table_data(r, 8)); + sw_w32(ip6_m.s6_addr32[3], rtl_table_data(r, 9)); + break; + case 1: // IPv4 Multicast route + case 3: // IPv6 Multicast route + pr_warn("%s: route type not supported\n", __func__); + rtl_table_release(r); + return; + } + sw_w32(v, rtl_table_data(r, 10)); + + pr_debug("%s: %08x %08x %08x %08x %08x %08x %08x %08x %08x %08x %08x\n", __func__, + sw_r32(rtl_table_data(r, 0)), sw_r32(rtl_table_data(r, 1)), sw_r32(rtl_table_data(r, 2)), + sw_r32(rtl_table_data(r, 3)), sw_r32(rtl_table_data(r, 4)), sw_r32(rtl_table_data(r, 5)), + sw_r32(rtl_table_data(r, 6)), sw_r32(rtl_table_data(r, 7)), sw_r32(rtl_table_data(r, 8)), + sw_r32(rtl_table_data(r, 9)), sw_r32(rtl_table_data(r, 10))); + + rtl_table_write(r, idx); + rtl_table_release(r); +} + + +/* + * Get the destination MAC and L3 egress interface ID of a nexthop entry from + * the SoC's L3_NEXTHOP table + */ +static void rtl930x_get_l3_nexthop(int idx, u16 *dmac_id, u16 *interface) +{ + u32 v; + // Read L3_NEXTHOP table (3) via register RTL9300_TBL_1 + struct table_reg *r = rtl_table_get(RTL9300_TBL_1, 3); + + rtl_table_read(r, idx); + // The table has a size of 1 register + v = sw_r32(rtl_table_data(r, 0)); + rtl_table_release(r); + + *dmac_id = (v >> 7) & 0x7fff; + *interface = v & 0x7f; +} + +static int rtl930x_l3_mtu_del(struct rtl838x_switch_priv *priv, int mtu) +{ + int i; + + for (i = 0; i < MAX_INTF_MTUS; i++) { + if (mtu == priv->intf_mtus[i]) + break; + } + if (i >= MAX_INTF_MTUS || !priv->intf_mtu_count[i]) { + pr_err("%s: No MTU slot found for MTU: %d\n", __func__, mtu); + return -EINVAL; + } + + priv->intf_mtu_count[i]--; +} + +static int rtl930x_l3_mtu_add(struct rtl838x_switch_priv *priv, int mtu) +{ + int i, free_mtu; + int mtu_id; + + // Try to find an existing mtu-value or a free slot + free_mtu = MAX_INTF_MTUS; + for (i = 0; i < MAX_INTF_MTUS && priv->intf_mtus[i] != mtu; i++) { + if ((!priv->intf_mtu_count[i]) && (free_mtu == MAX_INTF_MTUS)) + free_mtu = i; + } + i = (i < MAX_INTF_MTUS) ? i : free_mtu; + if (i < MAX_INTF_MTUS) { + mtu_id = i; + } else { + pr_err("%s: No free MTU slot available!\n", __func__); + return -EINVAL; + } + + priv->intf_mtus[i] = mtu; + pr_info("Writing MTU %d to slot %d\n", priv->intf_mtus[i], i); + // Set MTU-value of the slot TODO: distinguish between IPv4/IPv6 routes / slots + sw_w32_mask(0xffff << ((i % 2) * 16), priv->intf_mtus[i] << ((i % 2) * 16), + RTL930X_L3_IP_MTU_CTRL(i)); + sw_w32_mask(0xffff << ((i % 2) * 16), priv->intf_mtus[i] << ((i % 2) * 16), + RTL930X_L3_IP6_MTU_CTRL(i)); + + priv->intf_mtu_count[i]++; + + return mtu_id; +} + +/* + * Creates an interface for a route by setting up the HW tables in the SoC + */ +static int rtl930x_l3_intf_add(struct rtl838x_switch_priv *priv, struct rtl838x_l3_intf *intf) +{ + int i, intf_id, mtu_id; + // number of MTU-values < 16384 + + // Use the same IPv6 mtu as the ip4 mtu for this route if unset + intf->ip6_mtu = intf->ip6_mtu ? intf->ip6_mtu : intf->ip4_mtu; + + mtu_id = rtl930x_l3_mtu_add(priv, intf->ip4_mtu); + pr_info("%s: added mtu %d with mtu-id %d\n", __func__, intf->ip4_mtu, mtu_id); + if (mtu_id < 0) + return -ENOSPC; + intf->ip4_mtu_id = mtu_id; + intf->ip6_mtu_id = mtu_id; + + for (i = 0; i < MAX_INTERFACES; i++) { + if (!priv->interfaces[i]) + break; + } + if (i >= MAX_INTERFACES) { + pr_err("%s: cannot find free interface entry\n", __func__); + return -EINVAL; + } + intf_id = i; + priv->interfaces[i] = kzalloc(sizeof(struct rtl838x_l3_intf), GFP_KERNEL); + if (!priv->interfaces[i]) { + pr_err("%s: no memory to allocate new interface\n", __func__); + return -ENOMEM; + } +} + +/* + * Set the destination MAC and L3 egress interface ID for a nexthop entry in the SoC's + * L3_NEXTHOP table. The nexthop entry is identified by idx. + * dmac_id is the reference to the L2 entry in the L2 forwarding table, special values are + * 0x7ffe: TRAP2CPU + * 0x7ffd: TRAP2MASTERCPU + * 0x7fff: DMAC_ID_DROP + */ +static void rtl930x_set_l3_nexthop(int idx, u16 dmac_id, u16 interface) +{ + // Access L3_NEXTHOP table (3) via register RTL9300_TBL_1 + struct table_reg *r = rtl_table_get(RTL9300_TBL_1, 3); + + pr_info("%s: Writing to L3_NEXTHOP table, index %d, dmac_id %d, interface %d\n", + __func__, idx, dmac_id, interface); + sw_w32(((dmac_id & 0x7fff) << 7) | (interface & 0x7f), rtl_table_data(r, 0)); + + pr_info("%s: %08x\n", __func__, sw_r32(rtl_table_data(r,0))); + rtl_table_write(r, idx); + rtl_table_release(r); +} + +static void rtl930x_pie_lookup_enable(struct rtl838x_switch_priv *priv, int index) +{ + int block = index / PIE_BLOCK_SIZE; + + sw_w32_mask(0, BIT(block), RTL930X_PIE_BLK_LOOKUP_CTRL); +} + +/* + * Reads the intermediate representation of the templated match-fields of the + * PIE rule in the pie_rule structure and fills in the raw data fields in the + * raw register space r[]. + * The register space configuration size is identical for the RTL8380/90 and RTL9300, + * however the RTL9310 has 2 more registers / fields and the physical field-ids are different + * on all SoCs + * On the RTL9300 the mask fields are not word-aligend! + */ +static void rtl930x_write_pie_templated(u32 r[], struct pie_rule *pr, enum template_field_id t[]) +{ + int i; + enum template_field_id field_type; + u16 data, data_m; + + for (i = 0; i < N_FIXED_FIELDS; i++) { + field_type = t[i]; + data = data_m = 0; + + switch (field_type) { + case TEMPLATE_FIELD_SPM0: + data = pr->spm; + data_m = pr->spm_m; + break; + case TEMPLATE_FIELD_SPM1: + data = pr->spm >> 16; + data_m = pr->spm_m >> 16; + break; + case TEMPLATE_FIELD_OTAG: + data = pr->otag; + data_m = pr->otag_m; + break; + case TEMPLATE_FIELD_SMAC0: + data = pr->smac[4]; + data = (data << 8) | pr->smac[5]; + data_m = pr->smac_m[4]; + data_m = (data_m << 8) | pr->smac_m[5]; + break; + case TEMPLATE_FIELD_SMAC1: + data = pr->smac[2]; + data = (data << 8) | pr->smac[3]; + data_m = pr->smac_m[2]; + data_m = (data_m << 8) | pr->smac_m[3]; + break; + case TEMPLATE_FIELD_SMAC2: + data = pr->smac[0]; + data = (data << 8) | pr->smac[1]; + data_m = pr->smac_m[0]; + data_m = (data_m << 8) | pr->smac_m[1]; + break; + case TEMPLATE_FIELD_DMAC0: + data = pr->dmac[4]; + data = (data << 8) | pr->dmac[5]; + data_m = pr->dmac_m[4]; + data_m = (data_m << 8) | pr->dmac_m[5]; + break; + case TEMPLATE_FIELD_DMAC1: + data = pr->dmac[2]; + data = (data << 8) | pr->dmac[3]; + data_m = pr->dmac_m[2]; + data_m = (data_m << 8) | pr->dmac_m[3]; + break; + case TEMPLATE_FIELD_DMAC2: + data = pr->dmac[0]; + data = (data << 8) | pr->dmac[1]; + data_m = pr->dmac_m[0]; + data_m = (data_m << 8) | pr->dmac_m[1]; + break; + case TEMPLATE_FIELD_ETHERTYPE: + data = pr->ethertype; + data_m = pr->ethertype_m; + break; + case TEMPLATE_FIELD_ITAG: + data = pr->itag; + data_m = pr->itag_m; + break; + case TEMPLATE_FIELD_SIP0: + if (pr->is_ipv6) { + data = pr->sip6.s6_addr16[7]; + data_m = pr->sip6_m.s6_addr16[7]; + } else { + data = pr->sip; + data_m = pr->sip_m; + } + break; + case TEMPLATE_FIELD_SIP1: + if (pr->is_ipv6) { + data = pr->sip6.s6_addr16[6]; + data_m = pr->sip6_m.s6_addr16[6]; + } else { + data = pr->sip >> 16; + data_m = pr->sip_m >> 16; + } + break; + + case TEMPLATE_FIELD_SIP2: + case TEMPLATE_FIELD_SIP3: + case TEMPLATE_FIELD_SIP4: + case TEMPLATE_FIELD_SIP5: + case TEMPLATE_FIELD_SIP6: + case TEMPLATE_FIELD_SIP7: + data = pr->sip6.s6_addr16[5 - (field_type - TEMPLATE_FIELD_SIP2)]; + data_m = pr->sip6_m.s6_addr16[5 - (field_type - TEMPLATE_FIELD_SIP2)]; + break; + + case TEMPLATE_FIELD_DIP0: + if (pr->is_ipv6) { + data = pr->dip6.s6_addr16[7]; + data_m = pr->dip6_m.s6_addr16[7]; + } else { + data = pr->dip; + data_m = pr->dip_m; + } + break; + + case TEMPLATE_FIELD_DIP1: + if (pr->is_ipv6) { + data = pr->dip6.s6_addr16[6]; + data_m = pr->dip6_m.s6_addr16[6]; + } else { + data = pr->dip >> 16; + data_m = pr->dip_m >> 16; + } + break; + + case TEMPLATE_FIELD_DIP2: + case TEMPLATE_FIELD_DIP3: + case TEMPLATE_FIELD_DIP4: + case TEMPLATE_FIELD_DIP5: + case TEMPLATE_FIELD_DIP6: + case TEMPLATE_FIELD_DIP7: + data = pr->dip6.s6_addr16[5 - (field_type - TEMPLATE_FIELD_DIP2)]; + data_m = pr->dip6_m.s6_addr16[5 - (field_type - TEMPLATE_FIELD_DIP2)]; + break; + + case TEMPLATE_FIELD_IP_TOS_PROTO: + data = pr->tos_proto; + data_m = pr->tos_proto_m; + break; + case TEMPLATE_FIELD_L4_SPORT: + data = pr->sport; + data_m = pr->sport_m; + break; + case TEMPLATE_FIELD_L4_DPORT: + data = pr->dport; + data_m = pr->dport_m; + break; + case TEMPLATE_FIELD_DSAP_SSAP: + data = pr->dsap_ssap; + data_m = pr->dsap_ssap_m; + break; + case TEMPLATE_FIELD_TCP_INFO: + data = pr->tcp_info; + data_m = pr->tcp_info_m; + break; + case TEMPLATE_FIELD_RANGE_CHK: + pr_warn("Warning: TEMPLATE_FIELD_RANGE_CHK: not configured\n"); + break; + default: + pr_info("%s: unknown field %d\n", __func__, field_type); + } + + // On the RTL9300, the mask fields are not word aligned! + if (!(i % 2)) { + r[5 - i / 2] = data; + r[12 - i / 2] |= ((u32)data_m << 8); + } else { + r[5 - i / 2] |= ((u32)data) << 16; + r[12 - i / 2] |= ((u32)data_m) << 24; + r[11 - i / 2] |= ((u32)data_m) >> 8; + } + } +} + +static void rtl930x_read_pie_fixed_fields(u32 r[], struct pie_rule *pr) +{ + pr->stacking_port = r[6] & BIT(31); + pr->spn = (r[6] >> 24) & 0x7f; + pr->mgnt_vlan = r[6] & BIT(23); + if (pr->phase == PHASE_IACL) + pr->dmac_hit_sw = r[6] & BIT(22); + else + pr->content_too_deep = r[6] & BIT(22); + pr->not_first_frag = r[6] & BIT(21); + pr->frame_type_l4 = (r[6] >> 18) & 7; + pr->frame_type = (r[6] >> 16) & 3; + pr->otag_fmt = (r[6] >> 15) & 1; + pr->itag_fmt = (r[6] >> 14) & 1; + pr->otag_exist = (r[6] >> 13) & 1; + pr->itag_exist = (r[6] >> 12) & 1; + pr->frame_type_l2 = (r[6] >> 10) & 3; + pr->igr_normal_port = (r[6] >> 9) & 1; + pr->tid = (r[6] >> 8) & 1; + + pr->stacking_port_m = r[12] & BIT(7); + pr->spn_m = r[12] & 0x7f; + pr->mgnt_vlan_m = r[13] & BIT(31); + if (pr->phase == PHASE_IACL) + pr->dmac_hit_sw_m = r[13] & BIT(30); + else + pr->content_too_deep_m = r[13] & BIT(30); + pr->not_first_frag_m = r[13] & BIT(29); + pr->frame_type_l4_m = (r[13] >> 26) & 7; + pr->frame_type_m = (r[13] >> 24) & 3; + pr->otag_fmt_m = r[13] & BIT(23); + pr->itag_fmt_m = r[13] & BIT(22); + pr->otag_exist_m = r[13] & BIT(21); + pr->itag_exist_m = r[13] & BIT (20); + pr->frame_type_l2_m = (r[13] >> 18) & 3; + pr->igr_normal_port_m = r[13] & BIT(17); + pr->tid_m = (r[13] >> 16) & 1; + + pr->valid = r[13] & BIT(15); + pr->cond_not = r[13] & BIT(14); + pr->cond_and1 = r[13] & BIT(13); + pr->cond_and2 = r[13] & BIT(12); +} + +static void rtl930x_write_pie_fixed_fields(u32 r[], struct pie_rule *pr) +{ + r[6] = pr->stacking_port ? BIT(31) : 0; + r[6] |= ((u32) (pr->spn & 0x7f)) << 24; + r[6] |= pr->mgnt_vlan ? BIT(23) : 0; + if (pr->phase == PHASE_IACL) + r[6] |= pr->dmac_hit_sw ? BIT(22) : 0; + else + r[6] |= pr->content_too_deep ? BIT(22) : 0; + r[6] |= pr->not_first_frag ? BIT(21) : 0; + r[6] |= ((u32) (pr->frame_type_l4 & 0x7)) << 18; + r[6] |= ((u32) (pr->frame_type & 0x3)) << 16; + r[6] |= pr->otag_fmt ? BIT(15) : 0; + r[6] |= pr->itag_fmt ? BIT(14) : 0; + r[6] |= pr->otag_exist ? BIT(13) : 0; + r[6] |= pr->itag_exist ? BIT(12) : 0; + r[6] |= ((u32) (pr->frame_type_l2 & 0x3)) << 10; + r[6] |= pr->igr_normal_port ? BIT(9) : 0; + r[6] |= ((u32) (pr->tid & 0x1)) << 8; + + r[12] |= pr->stacking_port_m ? BIT(7) : 0; + r[12] |= (u32) (pr->spn_m & 0x7f); + r[13] |= pr->mgnt_vlan_m ? BIT(31) : 0; + if (pr->phase == PHASE_IACL) + r[13] |= pr->dmac_hit_sw_m ? BIT(30) : 0; + else + r[13] |= pr->content_too_deep_m ? BIT(30) : 0; + r[13] |= pr->not_first_frag_m ? BIT(29) : 0; + r[13] |= ((u32) (pr->frame_type_l4_m & 0x7)) << 26; + r[13] |= ((u32) (pr->frame_type_m & 0x3)) << 24; + r[13] |= pr->otag_fmt_m ? BIT(23) : 0; + r[13] |= pr->itag_fmt_m ? BIT(22) : 0; + r[13] |= pr->otag_exist_m ? BIT(21) : 0; + r[13] |= pr->itag_exist_m ? BIT(20) : 0; + r[13] |= ((u32) (pr->frame_type_l2_m & 0x3)) << 18; + r[13] |= pr->igr_normal_port_m ? BIT(17) : 0; + r[13] |= ((u32) (pr->tid_m & 0x1)) << 16; + + r[13] |= pr->valid ? BIT(15) : 0; + r[13] |= pr->cond_not ? BIT(14) : 0; + r[13] |= pr->cond_and1 ? BIT(13) : 0; + r[13] |= pr->cond_and2 ? BIT(12) : 0; +} + +static void rtl930x_write_pie_action(u32 r[], struct pie_rule *pr) +{ + // Either drop or forward + if (pr->drop) { + r[14] |= BIT(24) | BIT(25) | BIT(26); // Do Green, Yellow and Red drops + // Actually DROP, not PERMIT in Green / Yellow / Red + r[14] |= BIT(23) | BIT(22) | BIT(20); + } else { + r[14] |= pr->fwd_sel ? BIT(27) : 0; + r[14] |= pr->fwd_act << 18; + r[14] |= BIT(14); // We overwrite any drop + } + if (pr->phase == PHASE_VACL) + r[14] |= pr->fwd_sa_lrn ? BIT(15) : 0; + r[13] |= pr->bypass_sel ? BIT(5) : 0; + r[13] |= pr->nopri_sel ? BIT(4) : 0; + r[13] |= pr->tagst_sel ? BIT(3) : 0; + r[13] |= pr->ovid_sel ? BIT(1) : 0; + r[14] |= pr->ivid_sel ? BIT(31) : 0; + r[14] |= pr->meter_sel ? BIT(30) : 0; + r[14] |= pr->mir_sel ? BIT(29) : 0; + r[14] |= pr->log_sel ? BIT(28) : 0; + + r[14] |= ((u32)(pr->fwd_data & 0x3fff)) << 3; + r[15] |= pr->log_octets ? BIT(31) : 0; + r[15] |= (u32)(pr->meter_data) << 23; + + r[15] |= ((u32)(pr->ivid_act) << 21) & 0x3; + r[15] |= ((u32)(pr->ivid_data) << 9) & 0xfff; + r[16] |= ((u32)(pr->ovid_act) << 30) & 0x3; + r[16] |= ((u32)(pr->ovid_data) & 0xfff) << 16; + r[16] |= (pr->mir_data & 0x3) << 6; + r[17] |= ((u32)(pr->tagst_data) & 0xf) << 28; + r[17] |= ((u32)(pr->nopri_data) & 0x7) << 25; + r[17] |= pr->bypass_ibc_sc ? BIT(16) : 0; +} + +void rtl930x_pie_rule_dump_raw(u32 r[]) +{ + pr_info("Raw IACL table entry:\n"); + pr_info("r 0 - 7: %08x %08x %08x %08x %08x %08x %08x %08x\n", + r[0], r[1], r[2], r[3], r[4], r[5], r[6], r[7]); + pr_info("r 8 - 15: %08x %08x %08x %08x %08x %08x %08x %08x\n", + r[8], r[9], r[10], r[11], r[12], r[13], r[14], r[15]); + pr_info("r 16 - 18: %08x %08x %08x\n", r[16], r[17], r[18]); + pr_info("Match : %08x %08x %08x %08x %08x %08x\n", r[0], r[1], r[2], r[3], r[4], r[5]); + pr_info("Fixed : %06x\n", r[6] >> 8); + pr_info("Match M: %08x %08x %08x %08x %08x %08x\n", + (r[6] << 24) | (r[7] >> 8), (r[7] << 24) | (r[8] >> 8), (r[8] << 24) | (r[9] >> 8), + (r[9] << 24) | (r[10] >> 8), (r[10] << 24) | (r[11] >> 8), + (r[11] << 24) | (r[12] >> 8)); + pr_info("R[13]: %08x\n", r[13]); + pr_info("Fixed M: %06x\n", ((r[12] << 16) | (r[13] >> 16)) & 0xffffff); + pr_info("Valid / not / and1 / and2 : %1x\n", (r[13] >> 12) & 0xf); + pr_info("r 13-16: %08x %08x %08x %08x\n", r[13], r[14], r[15], r[16]); +} + +static int rtl930x_pie_rule_write(struct rtl838x_switch_priv *priv, int idx, struct pie_rule *pr) +{ + // Access IACL table (2) via register 0 + struct table_reg *q = rtl_table_get(RTL9300_TBL_0, 2); + u32 r[19]; + int i; + int block = idx / PIE_BLOCK_SIZE; + u32 t_select = sw_r32(RTL930X_PIE_BLK_TMPLTE_CTRL(block)); + + pr_debug("%s: %d, t_select: %08x\n", __func__, idx, t_select); + + for (i = 0; i < 19; i++) + r[i] = 0; + + if (!pr->valid) { + rtl_table_write(q, idx); + rtl_table_release(q); + return 0; + } + rtl930x_write_pie_fixed_fields(r, pr); + + pr_debug("%s: template %d\n", __func__, (t_select >> (pr->tid * 4)) & 0xf); + rtl930x_write_pie_templated(r, pr, fixed_templates[(t_select >> (pr->tid * 4)) & 0xf]); + + rtl930x_write_pie_action(r, pr); + +// rtl930x_pie_rule_dump_raw(r); + + for (i = 0; i < 19; i++) + sw_w32(r[i], rtl_table_data(q, i)); + + rtl_table_write(q, idx); + rtl_table_release(q); + + return 0; +} + +static bool rtl930x_pie_templ_has(int t, enum template_field_id field_type) +{ + int i; + enum template_field_id ft; + + for (i = 0; i < N_FIXED_FIELDS; i++) { + ft = fixed_templates[t][i]; + if (field_type == ft) + return true; + } + + return false; +} + +/* + * Verify that the rule pr is compatible with a given template t in block block + * Note that this function is SoC specific since the values of e.g. TEMPLATE_FIELD_SIP0 + * depend on the SoC + */ +static int rtl930x_pie_verify_template(struct rtl838x_switch_priv *priv, + struct pie_rule *pr, int t, int block) +{ + int i; + + if (!pr->is_ipv6 && pr->sip_m && !rtl930x_pie_templ_has(t, TEMPLATE_FIELD_SIP0)) + return -1; + + if (!pr->is_ipv6 && pr->dip_m && !rtl930x_pie_templ_has(t, TEMPLATE_FIELD_DIP0)) + return -1; + + if (pr->is_ipv6) { + if ((pr->sip6_m.s6_addr32[0] || pr->sip6_m.s6_addr32[1] + || pr->sip6_m.s6_addr32[2] || pr->sip6_m.s6_addr32[3]) + && !rtl930x_pie_templ_has(t, TEMPLATE_FIELD_SIP2)) + return -1; + if ((pr->dip6_m.s6_addr32[0] || pr->dip6_m.s6_addr32[1] + || pr->dip6_m.s6_addr32[2] || pr->dip6_m.s6_addr32[3]) + && !rtl930x_pie_templ_has(t, TEMPLATE_FIELD_DIP2)) + return -1; + } + + if (ether_addr_to_u64(pr->smac) && !rtl930x_pie_templ_has(t, TEMPLATE_FIELD_SMAC0)) + return -1; + + if (ether_addr_to_u64(pr->dmac) && !rtl930x_pie_templ_has(t, TEMPLATE_FIELD_DMAC0)) + return -1; + + // TODO: Check more + + i = find_first_zero_bit(&priv->pie_use_bm[block * 4], PIE_BLOCK_SIZE); + + if (i >= PIE_BLOCK_SIZE) + return -1; + + return i + PIE_BLOCK_SIZE * block; +} + +static int rtl930x_pie_rule_add(struct rtl838x_switch_priv *priv, struct pie_rule *pr) +{ + int idx, block, j, t; + int min_block = 0; + int max_block = priv->n_pie_blocks / 2; + + if (pr->is_egress) { + min_block = max_block; + max_block = priv->n_pie_blocks; + } + pr_debug("In %s\n", __func__); + + mutex_lock(&priv->pie_mutex); + + for (block = min_block; block < max_block; block++) { + for (j = 0; j < 2; j++) { + t = (sw_r32(RTL930X_PIE_BLK_TMPLTE_CTRL(block)) >> (j * 4)) & 0xf; + pr_debug("Testing block %d, template %d, template id %d\n", block, j, t); + pr_debug("%s: %08x\n", + __func__, sw_r32(RTL930X_PIE_BLK_TMPLTE_CTRL(block))); + idx = rtl930x_pie_verify_template(priv, pr, t, block); + if (idx >= 0) + break; + } + if (j < 2) + break; + } + + if (block >= priv->n_pie_blocks) { + mutex_unlock(&priv->pie_mutex); + return -EOPNOTSUPP; + } + + pr_debug("Using block: %d, index %d, template-id %d\n", block, idx, j); + set_bit(idx, priv->pie_use_bm); + + pr->valid = true; + pr->tid = j; // Mapped to template number + pr->tid_m = 0x1; + pr->id = idx; + + rtl930x_pie_lookup_enable(priv, idx); + rtl930x_pie_rule_write(priv, idx, pr); + + mutex_unlock(&priv->pie_mutex); + return 0; +} + +/* + * Delete a range of Packet Inspection Engine rules + */ +static int rtl930x_pie_rule_del(struct rtl838x_switch_priv *priv, int index_from, int index_to) +{ + u32 v = (index_from << 1)| (index_to << 12 ) | BIT(0); + + pr_debug("%s: from %d to %d\n", __func__, index_from, index_to); + mutex_lock(&priv->reg_mutex); + + // Write from-to and execute bit into control register + sw_w32(v, RTL930X_PIE_CLR_CTRL); + + // Wait until command has completed + do { + } while (sw_r32(RTL930X_PIE_CLR_CTRL) & BIT(0)); + + mutex_unlock(&priv->reg_mutex); + return 0; +} + +static void rtl930x_pie_rule_rm(struct rtl838x_switch_priv *priv, struct pie_rule *pr) +{ + int idx = pr->id; + + rtl930x_pie_rule_del(priv, idx, idx); + clear_bit(idx, priv->pie_use_bm); +} + +static void rtl930x_pie_init(struct rtl838x_switch_priv *priv) +{ + int i; + u32 template_selectors; + + mutex_init(&priv->pie_mutex); + + pr_info("%s\n", __func__); + // Enable ACL lookup on all ports, including CPU_PORT + for (i = 0; i <= priv->cpu_port; i++) + sw_w32(1, RTL930X_ACL_PORT_LOOKUP_CTRL(i)); + + // Include IPG in metering + sw_w32_mask(0, 1, RTL930X_METER_GLB_CTRL); + + // Delete all present rules, block size is 128 on all SoC families + rtl930x_pie_rule_del(priv, 0, priv->n_pie_blocks * 128 - 1); + + // Assign blocks 0-7 to VACL phase (bit = 0), blocks 8-15 to IACL (bit = 1) + sw_w32(0xff00, RTL930X_PIE_BLK_PHASE_CTRL); + + // Enable predefined templates 0, 1 for first quarter of all blocks + template_selectors = 0 | (1 << 4); + for (i = 0; i < priv->n_pie_blocks / 4; i++) + sw_w32(template_selectors, RTL930X_PIE_BLK_TMPLTE_CTRL(i)); + + // Enable predefined templates 2, 3 for second quarter of all blocks + template_selectors = 2 | (3 << 4); + for (i = priv->n_pie_blocks / 4; i < priv->n_pie_blocks / 2; i++) + sw_w32(template_selectors, RTL930X_PIE_BLK_TMPLTE_CTRL(i)); + + // Enable predefined templates 0, 1 for third half of all blocks + template_selectors = 0 | (1 << 4); + for (i = priv->n_pie_blocks / 2; i < priv->n_pie_blocks * 3 / 4; i++) + sw_w32(template_selectors, RTL930X_PIE_BLK_TMPLTE_CTRL(i)); + + // Enable predefined templates 2, 3 for fourth quater of all blocks + template_selectors = 2 | (3 << 4); + for (i = priv->n_pie_blocks * 3 / 4; i < priv->n_pie_blocks; i++) + sw_w32(template_selectors, RTL930X_PIE_BLK_TMPLTE_CTRL(i)); + +} + +/* + * Sets up an egress interface for L3 actions + * Actions for ip4/6_icmp_redirect, ip4/6_pbr_icmp_redirect are: + * 0: FORWARD, 1: DROP, 2: TRAP2CPU, 3: COPY2CPU, 4: TRAP2MASTERCPU 5: COPY2MASTERCPU + * 6: HARDDROP + * idx is the index in the HW interface table: idx < 0x80 + */ +static void rtl930x_set_l3_egress_intf(int idx, struct rtl838x_l3_intf *intf) +{ + u32 u, v; + // Read L3_EGR_INTF table (4) via register RTL9300_TBL_1 + struct table_reg *r = rtl_table_get(RTL9300_TBL_1, 4); + + // The table has 2 registers + u = (intf->vid & 0xfff) << 9; + u |= (intf->smac_idx & 0x3f) << 3; + u |= (intf->ip4_mtu_id & 0x7); + + v = (intf->ip6_mtu_id & 0x7) << 28; + v |= (intf->ttl_scope & 0xff) << 20; + v |= (intf->hl_scope & 0xff) << 12; + v |= (intf->ip4_icmp_redirect & 0x7) << 9; + v |= (intf->ip6_icmp_redirect & 0x7)<< 6; + v |= (intf->ip4_pbr_icmp_redirect & 0x7) << 3; + v |= (intf->ip6_pbr_icmp_redirect & 0x7); + + sw_w32(u, rtl_table_data(r, 0)); + sw_w32(v, rtl_table_data(r, 1)); + + pr_info("%s writing to index %d: %08x %08x\n", __func__, idx, u, v); + rtl_table_write(r, idx & 0x7f); + rtl_table_release(r); +} + +/* + * Reads a MAC entry for L3 termination as entry point for routing + * from the hardware table + * idx is the index into the L3_ROUTER_MAC table + */ +static void rtl930x_get_l3_router_mac(u32 idx, struct rtl93xx_rt_mac *m) +{ + u32 v, w; + // Read L3_ROUTER_MAC table (0) via register RTL9300_TBL_1 + struct table_reg *r = rtl_table_get(RTL9300_TBL_1, 0); + + rtl_table_read(r, idx); + // The table has a size of 7 registers, 64 entries + v = sw_r32(rtl_table_data(r, 0)); + w = sw_r32(rtl_table_data(r, 3)); + m->valid = !!(v & BIT(20)); + if (!m->valid) + goto out; + + m->p_type = !!(v & BIT(19)); + m->p_id = (v >> 13) & 0x3f; // trunk id of port + m->vid = v & 0xfff; + m->vid_mask = w & 0xfff; + m->action = sw_r32(rtl_table_data(r, 6)) & 0x7; + m->mac_mask = ((((u64)sw_r32(rtl_table_data(r, 5))) << 32) & 0xffffffffffffULL) + | (sw_r32(rtl_table_data(r, 4))); + m->mac = ((((u64)sw_r32(rtl_table_data(r, 1))) << 32) & 0xffffffffffffULL) + | (sw_r32(rtl_table_data(r, 2))); + // Bits L3_INTF and BMSK_L3_INTF are 0 + +out: + rtl_table_release(r); +} + +/* + * Writes a MAC entry for L3 termination as entry point for routing + * into the hardware table + * idx is the index into the L3_ROUTER_MAC table + */ +static void rtl930x_set_l3_router_mac(u32 idx, struct rtl93xx_rt_mac *m) +{ + u32 v, w; + // Read L3_ROUTER_MAC table (0) via register RTL9300_TBL_1 + struct table_reg *r = rtl_table_get(RTL9300_TBL_1, 0); + + // The table has a size of 7 registers, 64 entries + v = BIT(20); // mac entry valid, port type is 0: individual + v |= (m->p_id & 0x3f) << 13; + v |= (m->vid & 0xfff); // Set the interface_id to the vlan id + + w = m->vid_mask; + w |= (m->p_id_mask & 0x3f) << 13; + + sw_w32(v, rtl_table_data(r, 0)); + sw_w32(w, rtl_table_data(r, 3)); + + // Set MAC address, L3_INTF (bit 12 in register 1) needs to be 0 + sw_w32((u32)(m->mac), rtl_table_data(r, 2)); + sw_w32(m->mac >> 32, rtl_table_data(r, 1)); + + // Set MAC address mask, BMSK_L3_INTF (bit 12 in register 5) needs to be 0 + sw_w32((u32)(m->mac_mask >> 32), rtl_table_data(r, 4)); + sw_w32((u32)m->mac_mask, rtl_table_data(r, 5)); + + sw_w32(m->action & 0x7, rtl_table_data(r, 6)); + + pr_debug("%s writing index %d: %08x %08x %08x %08x %08x %08x %08x\n", __func__, idx, + sw_r32(rtl_table_data(r, 0)), sw_r32(rtl_table_data(r, 1)), sw_r32(rtl_table_data(r, 2)), + sw_r32(rtl_table_data(r, 3)), sw_r32(rtl_table_data(r, 4)), sw_r32(rtl_table_data(r, 5)), + sw_r32(rtl_table_data(r, 6)) + ); + rtl_table_write(r, idx); + rtl_table_release(r); +} + +/* + * Get the Destination-MAC of an L3 egress interface or the Source MAC for routed packets + * from the SoC's L3_EGR_INTF_MAC table + * Indexes 0-2047 are DMACs, 2048+ are SMACs + */ +static u64 rtl930x_get_l3_egress_mac(u32 idx) +{ + u64 mac; + // Read L3_EGR_INTF_MAC table (2) via register RTL9300_TBL_2 + struct table_reg *r = rtl_table_get(RTL9300_TBL_2, 2); + + rtl_table_read(r, idx); + // The table has a size of 2 registers + mac = sw_r32(rtl_table_data(r, 0)); + mac <<= 32; + mac |= sw_r32(rtl_table_data(r, 1)); + rtl_table_release(r); + + return mac; +} +/* + * Set the Destination-MAC of a route or the Source MAC of an L3 egress interface + * in the SoC's L3_EGR_INTF_MAC table + * Indexes 0-2047 are DMACs, 2048+ are SMACs + */ +static void rtl930x_set_l3_egress_mac(u32 idx, u64 mac) +{ + // Access L3_EGR_INTF_MAC table (2) via register RTL9300_TBL_2 + struct table_reg *r = rtl_table_get(RTL9300_TBL_2, 2); + + // The table has a size of 2 registers + sw_w32(mac >> 32, rtl_table_data(r, 0)); + sw_w32(mac, rtl_table_data(r, 1)); + + pr_debug("%s: setting index %d to %016llx\n", __func__, idx, mac); + rtl_table_write(r, idx); + rtl_table_release(r); +} + +/* + * Configure L3 routing settings of the device: + * - MTUs + * - Egress interface + * - The router's MAC address on which routed packets are expected + * - MAC addresses used as source macs of routed packets + */ +int rtl930x_l3_setup(struct rtl838x_switch_priv *priv) +{ + int i; + + // Setup MTU with id 0 for default interface + for (i = 0; i < MAX_INTF_MTUS; i++) + priv->intf_mtu_count[i] = priv->intf_mtus[i] = 0; + + priv->intf_mtu_count[0] = 0; // Needs to stay forever + priv->intf_mtus[0] = DEFAULT_MTU; + sw_w32_mask(0xffff, DEFAULT_MTU, RTL930X_L3_IP_MTU_CTRL(0)); + sw_w32_mask(0xffff, DEFAULT_MTU, RTL930X_L3_IP6_MTU_CTRL(0)); + priv->intf_mtus[1] = DEFAULT_MTU; + sw_w32_mask(0xffff0000, DEFAULT_MTU << 16, RTL930X_L3_IP_MTU_CTRL(0)); + sw_w32_mask(0xffff0000, DEFAULT_MTU << 16, RTL930X_L3_IP6_MTU_CTRL(0)); + + sw_w32_mask(0xffff, DEFAULT_MTU, RTL930X_L3_IP_MTU_CTRL(1)); + sw_w32_mask(0xffff, DEFAULT_MTU, RTL930X_L3_IP6_MTU_CTRL(1)); + sw_w32_mask(0xffff0000, DEFAULT_MTU << 16, RTL930X_L3_IP_MTU_CTRL(1)); + sw_w32_mask(0xffff0000, DEFAULT_MTU << 16, RTL930X_L3_IP6_MTU_CTRL(1)); + + // Clear all source port MACs + for (i = 0; i < MAX_SMACS; i++) + rtl930x_set_l3_egress_mac(L3_EGRESS_DMACS + i, 0ULL); + + // Configure the default L3 hash algorithm + sw_w32_mask(BIT(2), 0, RTL930X_L3_HOST_TBL_CTRL); // Algorithm selection 0 = 0 + sw_w32_mask(0, BIT(3), RTL930X_L3_HOST_TBL_CTRL); // Algorithm selection 1 = 1 + + pr_info("L3_IPUC_ROUTE_CTRL %08x, IPMC_ROUTE %08x, IP6UC_ROUTE %08x, IP6MC_ROUTE %08x\n", + sw_r32(RTL930X_L3_IPUC_ROUTE_CTRL), sw_r32(RTL930X_L3_IPMC_ROUTE_CTRL), + sw_r32(RTL930X_L3_IP6UC_ROUTE_CTRL), sw_r32(RTL930X_L3_IP6MC_ROUTE_CTRL)); + sw_w32_mask(0, 1, RTL930X_L3_IPUC_ROUTE_CTRL); + sw_w32_mask(0, 1, RTL930X_L3_IP6UC_ROUTE_CTRL); + sw_w32_mask(0, 1, RTL930X_L3_IPMC_ROUTE_CTRL); + sw_w32_mask(0, 1, RTL930X_L3_IP6MC_ROUTE_CTRL); + + sw_w32(0x00002001, RTL930X_L3_IPUC_ROUTE_CTRL); + sw_w32(0x00014581, RTL930X_L3_IP6UC_ROUTE_CTRL); + sw_w32(0x00000501, RTL930X_L3_IPMC_ROUTE_CTRL); + sw_w32(0x00012881, RTL930X_L3_IP6MC_ROUTE_CTRL); + + pr_info("L3_IPUC_ROUTE_CTRL %08x, IPMC_ROUTE %08x, IP6UC_ROUTE %08x, IP6MC_ROUTE %08x\n", + sw_r32(RTL930X_L3_IPUC_ROUTE_CTRL), sw_r32(RTL930X_L3_IPMC_ROUTE_CTRL), + sw_r32(RTL930X_L3_IP6UC_ROUTE_CTRL), sw_r32(RTL930X_L3_IP6MC_ROUTE_CTRL)); + + // Trap non-ip traffic to the CPU-port (e.g. ARP so we stay reachable) + sw_w32_mask(0x3 << 8, 0x1 << 8, RTL930X_L3_IP_ROUTE_CTRL); + pr_info("L3_IP_ROUTE_CTRL %08x\n", sw_r32(RTL930X_L3_IP_ROUTE_CTRL)); + + // PORT_ISO_RESTRICT_ROUTE_CTRL ? + + // Do not use prefix route 0 because of HW limitations + set_bit(0, priv->route_use_bm); + + return 0; +} + +static u32 rtl930x_packet_cntr_read(int counter) +{ + u32 v; + + // Read LOG table (3) via register RTL9300_TBL_0 + struct table_reg *r = rtl_table_get(RTL9300_TBL_0, 3); + + pr_debug("In %s, id %d\n", __func__, counter); + rtl_table_read(r, counter / 2); + + pr_debug("Registers: %08x %08x\n", + sw_r32(rtl_table_data(r, 0)), sw_r32(rtl_table_data(r, 1))); + // The table has a size of 2 registers + if (counter % 2) + v = sw_r32(rtl_table_data(r, 0)); + else + v = sw_r32(rtl_table_data(r, 1)); + + rtl_table_release(r); + + return v; +} + +static void rtl930x_packet_cntr_clear(int counter) +{ + // Access LOG table (3) via register RTL9300_TBL_0 + struct table_reg *r = rtl_table_get(RTL9300_TBL_0, 3); + + pr_info("In %s, id %d\n", __func__, counter); + // The table has a size of 2 registers + if (counter % 2) + sw_w32(0, rtl_table_data(r, 0)); + else + sw_w32(0, rtl_table_data(r, 1)); + + rtl_table_write(r, counter / 2); + + rtl_table_release(r); +} + +void rtl930x_vlan_port_keep_tag_set(int port, bool keep_outer, bool keep_inner) +{ + sw_w32(FIELD_PREP(RTL930X_VLAN_PORT_TAG_STS_CTRL_EGR_OTAG_STS_MASK, + keep_outer ? RTL930X_VLAN_PORT_TAG_STS_TAGGED : RTL930X_VLAN_PORT_TAG_STS_UNTAG) | + FIELD_PREP(RTL930X_VLAN_PORT_TAG_STS_CTRL_EGR_ITAG_STS_MASK, + keep_inner ? RTL930X_VLAN_PORT_TAG_STS_TAGGED : RTL930X_VLAN_PORT_TAG_STS_UNTAG), + RTL930X_VLAN_PORT_TAG_STS_CTRL(port)); +} + +void rtl930x_vlan_port_pvidmode_set(int port, enum pbvlan_type type, enum pbvlan_mode mode) +{ + if (type == PBVLAN_TYPE_INNER) + sw_w32_mask(0x3, mode, RTL930X_VLAN_PORT_PB_VLAN + (port << 2)); + else + sw_w32_mask(0x3 << 14, mode << 14 ,RTL930X_VLAN_PORT_PB_VLAN + (port << 2)); +} + +void rtl930x_vlan_port_pvid_set(int port, enum pbvlan_type type, int pvid) +{ + if (type == PBVLAN_TYPE_INNER) + sw_w32_mask(0xfff << 2, pvid << 2, RTL930X_VLAN_PORT_PB_VLAN + (port << 2)); + else + sw_w32_mask(0xfff << 16, pvid << 16, RTL930X_VLAN_PORT_PB_VLAN + (port << 2)); +} + +static int rtl930x_set_ageing_time(unsigned long msec) +{ + int t = sw_r32(RTL930X_L2_AGE_CTRL); + + t &= 0x1FFFFF; + t = (t * 7) / 10; + pr_debug("L2 AGING time: %d sec\n", t); + + t = (msec / 100 + 6) / 7; + t = t > 0x1FFFFF ? 0x1FFFFF : t; + sw_w32_mask(0x1FFFFF, t, RTL930X_L2_AGE_CTRL); + pr_debug("Dynamic aging for ports: %x\n", sw_r32(RTL930X_L2_PORT_AGE_CTRL)); + + return 0; +} + +static void rtl930x_set_igr_filter(int port, enum igr_filter state) +{ + sw_w32_mask(0x3 << ((port & 0xf)<<1), state << ((port & 0xf)<<1), + RTL930X_VLAN_PORT_IGR_FLTR + (((port >> 4) << 2))); +} + +static void rtl930x_set_egr_filter(int port, enum egr_filter state) +{ + sw_w32_mask(0x1 << (port % 0x1D), state << (port % 0x1D), + RTL930X_VLAN_PORT_EGR_FLTR + (((port / 29) << 2))); +} + +void rtl930x_set_distribution_algorithm(int group, int algoidx, u32 algomsk) +{ + u32 l3shift = 0; + u32 newmask = 0; + + /* TODO: for now we set algoidx to 0 */ + algoidx = 0; + if (algomsk & TRUNK_DISTRIBUTION_ALGO_SIP_BIT) { + l3shift = 4; + newmask |= TRUNK_DISTRIBUTION_ALGO_L3_SIP_BIT; + } + if (algomsk & TRUNK_DISTRIBUTION_ALGO_DIP_BIT) { + l3shift = 4; + newmask |= TRUNK_DISTRIBUTION_ALGO_L3_DIP_BIT; + } + if (algomsk & TRUNK_DISTRIBUTION_ALGO_SRC_L4PORT_BIT) { + l3shift = 4; + newmask |= TRUNK_DISTRIBUTION_ALGO_L3_SRC_L4PORT_BIT; + } + if (algomsk & TRUNK_DISTRIBUTION_ALGO_SRC_L4PORT_BIT) { + l3shift = 4; + newmask |= TRUNK_DISTRIBUTION_ALGO_L3_SRC_L4PORT_BIT; + } + + if (l3shift == 4) { + if (algomsk & TRUNK_DISTRIBUTION_ALGO_SMAC_BIT) + newmask |= TRUNK_DISTRIBUTION_ALGO_L3_SMAC_BIT; + + if (algomsk & TRUNK_DISTRIBUTION_ALGO_DMAC_BIT) + newmask |= TRUNK_DISTRIBUTION_ALGO_L3_DMAC_BIT; + } else { + if (algomsk & TRUNK_DISTRIBUTION_ALGO_SMAC_BIT) + newmask |= TRUNK_DISTRIBUTION_ALGO_L2_SMAC_BIT; + if (algomsk & TRUNK_DISTRIBUTION_ALGO_DMAC_BIT) + newmask |= TRUNK_DISTRIBUTION_ALGO_L2_DMAC_BIT; + } + + sw_w32(newmask << l3shift, RTL930X_TRK_HASH_CTRL + (algoidx << 2)); +} + +static void rtl930x_led_init(struct rtl838x_switch_priv *priv) +{ + int i, pos; + u32 v, pm = 0, set; + u32 setlen; + const __be32 *led_set; + char set_name[9]; + struct device_node *node; + + pr_info("%s called\n", __func__); + node = of_find_compatible_node(NULL, NULL, "realtek,rtl9300-leds"); + if (!node) { + pr_info("%s No compatible LED node found\n", __func__); + return; + } + + for (i= 0; i < priv->cpu_port; i++) { + pos = (i << 1) % 32; + sw_w32_mask(0x3 << pos, 0, RTL930X_LED_PORT_FIB_SET_SEL_CTRL(i)); + sw_w32_mask(0x3 << pos, 0, RTL930X_LED_PORT_COPR_SET_SEL_CTRL(i)); + + if (!priv->ports[i].phy) + continue; + + v = 0x1; + if (priv->ports[i].is10G) + v = 0x3; + if (priv->ports[i].phy_is_integrated) + v = 0x1; + sw_w32_mask(0x3 << pos, v << pos, RTL930X_LED_PORT_NUM_CTRL(i)); + + pm |= BIT(i); + + set = priv->ports[i].led_set; + sw_w32_mask(0, set << pos, RTL930X_LED_PORT_COPR_SET_SEL_CTRL(i)); + sw_w32_mask(0, set << pos, RTL930X_LED_PORT_FIB_SET_SEL_CTRL(i)); + } + + for (i = 0; i < 4; i++) { + sprintf(set_name, "led_set%d", i); + led_set = of_get_property(node, set_name, &setlen); + if (!led_set || setlen != 16) + break; + v = be32_to_cpup(led_set) << 16 | be32_to_cpup(led_set + 1); + sw_w32(v, RTL930X_LED_SET0_0_CTRL - 4 - i * 8); + v = be32_to_cpup(led_set + 2) << 16 | be32_to_cpup(led_set + 3); + sw_w32(v, RTL930X_LED_SET0_0_CTRL - i * 8); + } + + // Set LED mode to serial (0x1) + sw_w32_mask(0x3, 0x1, RTL930X_LED_GLB_CTRL); + + // Set port type masks + sw_w32(pm, RTL930X_LED_PORT_COPR_MASK_CTRL); + sw_w32(pm, RTL930X_LED_PORT_FIB_MASK_CTRL); + sw_w32(pm, RTL930X_LED_PORT_COMBO_MASK_CTRL); + + for (i = 0; i < 24; i++) + pr_info("%s %08x: %08x\n",__func__, 0xbb00cc00 + i * 4, sw_r32(0xcc00 + i * 4)); +} + +const struct rtl838x_reg rtl930x_reg = { + .mask_port_reg_be = rtl838x_mask_port_reg, + .set_port_reg_be = rtl838x_set_port_reg, + .get_port_reg_be = rtl838x_get_port_reg, + .mask_port_reg_le = rtl838x_mask_port_reg, + .set_port_reg_le = rtl838x_set_port_reg, + .get_port_reg_le = rtl838x_get_port_reg, + .stat_port_rst = RTL930X_STAT_PORT_RST, + .stat_rst = RTL930X_STAT_RST, + .stat_port_std_mib = RTL930X_STAT_PORT_MIB_CNTR, + .traffic_enable = rtl930x_traffic_enable, + .traffic_disable = rtl930x_traffic_disable, + .traffic_get = rtl930x_traffic_get, + .traffic_set = rtl930x_traffic_set, + .l2_ctrl_0 = RTL930X_L2_CTRL, + .l2_ctrl_1 = RTL930X_L2_AGE_CTRL, + .l2_port_aging_out = RTL930X_L2_PORT_AGE_CTRL, + .set_ageing_time = rtl930x_set_ageing_time, + .smi_poll_ctrl = RTL930X_SMI_POLL_CTRL, // TODO: Difference to RTL9300_SMI_PRVTE_POLLING_CTRL + .l2_tbl_flush_ctrl = RTL930X_L2_TBL_FLUSH_CTRL, + .exec_tbl0_cmd = rtl930x_exec_tbl0_cmd, + .exec_tbl1_cmd = rtl930x_exec_tbl1_cmd, + .tbl_access_data_0 = rtl930x_tbl_access_data_0, + .isr_glb_src = RTL930X_ISR_GLB, + .isr_port_link_sts_chg = RTL930X_ISR_PORT_LINK_STS_CHG, + .imr_port_link_sts_chg = RTL930X_IMR_PORT_LINK_STS_CHG, + .imr_glb = RTL930X_IMR_GLB, + .vlan_tables_read = rtl930x_vlan_tables_read, + .vlan_set_tagged = rtl930x_vlan_set_tagged, + .vlan_set_untagged = rtl930x_vlan_set_untagged, + .vlan_profile_dump = rtl930x_vlan_profile_dump, + .vlan_profile_setup = rtl930x_vlan_profile_setup, + .vlan_fwd_on_inner = rtl930x_vlan_fwd_on_inner, + .set_vlan_igr_filter = rtl930x_set_igr_filter, + .set_vlan_egr_filter = rtl930x_set_egr_filter, + .stp_get = rtl930x_stp_get, + .stp_set = rtl930x_stp_set, + .mac_force_mode_ctrl = rtl930x_mac_force_mode_ctrl, + .mac_port_ctrl = rtl930x_mac_port_ctrl, + .l2_port_new_salrn = rtl930x_l2_port_new_salrn, + .l2_port_new_sa_fwd = rtl930x_l2_port_new_sa_fwd, + .mir_ctrl = RTL930X_MIR_CTRL, + .mir_dpm = RTL930X_MIR_DPM_CTRL, + .mir_spm = RTL930X_MIR_SPM_CTRL, + .mac_link_sts = RTL930X_MAC_LINK_STS, + .mac_link_dup_sts = RTL930X_MAC_LINK_DUP_STS, + .mac_link_spd_sts = rtl930x_mac_link_spd_sts, + .mac_rx_pause_sts = RTL930X_MAC_RX_PAUSE_STS, + .mac_tx_pause_sts = RTL930X_MAC_TX_PAUSE_STS, + .read_l2_entry_using_hash = rtl930x_read_l2_entry_using_hash, + .write_l2_entry_using_hash = rtl930x_write_l2_entry_using_hash, + .read_cam = rtl930x_read_cam, + .write_cam = rtl930x_write_cam, + .vlan_port_keep_tag_set = rtl930x_vlan_port_keep_tag_set, + .vlan_port_pvidmode_set = rtl930x_vlan_port_pvidmode_set, + .vlan_port_pvid_set = rtl930x_vlan_port_pvid_set, + .trk_mbr_ctr = rtl930x_trk_mbr_ctr, + .rma_bpdu_fld_pmask = RTL930X_RMA_BPDU_FLD_PMSK, + .init_eee = rtl930x_init_eee, + .port_eee_set = rtl930x_port_eee_set, + .eee_port_ability = rtl930x_eee_port_ability, + .l2_hash_seed = rtl930x_l2_hash_seed, + .l2_hash_key = rtl930x_l2_hash_key, + .read_mcast_pmask = rtl930x_read_mcast_pmask, + .write_mcast_pmask = rtl930x_write_mcast_pmask, + .pie_init = rtl930x_pie_init, + .pie_rule_write = rtl930x_pie_rule_write, + .pie_rule_add = rtl930x_pie_rule_add, + .pie_rule_rm = rtl930x_pie_rule_rm, + .l2_learning_setup = rtl930x_l2_learning_setup, + .packet_cntr_read = rtl930x_packet_cntr_read, + .packet_cntr_clear = rtl930x_packet_cntr_clear, + .route_read = rtl930x_route_read, + .route_write = rtl930x_route_write, + .host_route_write = rtl930x_host_route_write, + .l3_setup = rtl930x_l3_setup, + .set_l3_nexthop = rtl930x_set_l3_nexthop, + .get_l3_nexthop = rtl930x_get_l3_nexthop, + .get_l3_egress_mac = rtl930x_get_l3_egress_mac, + .set_l3_egress_mac = rtl930x_set_l3_egress_mac, + .find_l3_slot = rtl930x_find_l3_slot, + .route_lookup_hw = rtl930x_route_lookup_hw, + .get_l3_router_mac = rtl930x_get_l3_router_mac, + .set_l3_router_mac = rtl930x_set_l3_router_mac, + .set_l3_egress_intf = rtl930x_set_l3_egress_intf, + .set_distribution_algorithm = rtl930x_set_distribution_algorithm, + .led_init = rtl930x_led_init, +}; diff --git a/target/linux/realtek/files-5.15/drivers/net/dsa/rtl83xx/rtl931x.c b/target/linux/realtek/files-5.15/drivers/net/dsa/rtl83xx/rtl931x.c new file mode 100644 index 0000000000..ee8d6c2c73 --- /dev/null +++ b/target/linux/realtek/files-5.15/drivers/net/dsa/rtl83xx/rtl931x.c @@ -0,0 +1,1701 @@ +// SPDX-License-Identifier: GPL-2.0-only + +#include +#include "rtl83xx.h" + +#define RTL931X_VLAN_PORT_TAG_STS_INTERNAL 0x0 +#define RTL931X_VLAN_PORT_TAG_STS_UNTAG 0x1 +#define RTL931X_VLAN_PORT_TAG_STS_TAGGED 0x2 +#define RTL931X_VLAN_PORT_TAG_STS_PRIORITY_TAGGED 0x3 + +#define RTL931X_VLAN_PORT_TAG_CTRL_BASE 0x4860 +/* port 0-56 */ +#define RTL931X_VLAN_PORT_TAG_CTRL(port) \ + RTL931X_VLAN_PORT_TAG_CTRL_BASE + (port << 2) +#define RTL931X_VLAN_PORT_TAG_EGR_OTAG_STS_MASK GENMASK(13,12) +#define RTL931X_VLAN_PORT_TAG_EGR_ITAG_STS_MASK GENMASK(11,10) +#define RTL931X_VLAN_PORT_TAG_EGR_OTAG_KEEP_MASK GENMASK(9,9) +#define RTL931X_VLAN_PORT_TAG_EGR_ITAG_KEEP_MASK GENMASK(8,8) +#define RTL931X_VLAN_PORT_TAG_IGR_OTAG_KEEP_MASK GENMASK(7,7) +#define RTL931X_VLAN_PORT_TAG_IGR_ITAG_KEEP_MASK GENMASK(6,6) +#define RTL931X_VLAN_PORT_TAG_OTPID_IDX_MASK GENMASK(5,4) +#define RTL931X_VLAN_PORT_TAG_OTPID_KEEP_MASK GENMASK(3,3) +#define RTL931X_VLAN_PORT_TAG_ITPID_IDX_MASK GENMASK(2,1) +#define RTL931X_VLAN_PORT_TAG_ITPID_KEEP_MASK GENMASK(0,0) + +extern struct mutex smi_lock; +extern struct rtl83xx_soc_info soc_info; + +/* Definition of the RTL931X-specific template field IDs as used in the PIE */ +enum template_field_id { + TEMPLATE_FIELD_SPM0 = 1, + TEMPLATE_FIELD_SPM1 = 2, + TEMPLATE_FIELD_SPM2 = 3, + TEMPLATE_FIELD_SPM3 = 4, + TEMPLATE_FIELD_DMAC0 = 9, + TEMPLATE_FIELD_DMAC1 = 10, + TEMPLATE_FIELD_DMAC2 = 11, + TEMPLATE_FIELD_SMAC0 = 12, + TEMPLATE_FIELD_SMAC1 = 13, + TEMPLATE_FIELD_SMAC2 = 14, + TEMPLATE_FIELD_ETHERTYPE = 15, + TEMPLATE_FIELD_OTAG = 16, + TEMPLATE_FIELD_ITAG = 17, + TEMPLATE_FIELD_SIP0 = 18, + TEMPLATE_FIELD_SIP1 = 19, + TEMPLATE_FIELD_DIP0 = 20, + TEMPLATE_FIELD_DIP1 = 21, + TEMPLATE_FIELD_IP_TOS_PROTO = 22, + TEMPLATE_FIELD_L4_SPORT = 23, + TEMPLATE_FIELD_L4_DPORT = 24, + TEMPLATE_FIELD_L34_HEADER = 25, + TEMPLATE_FIELD_TCP_INFO = 26, + TEMPLATE_FIELD_SIP2 = 34, + TEMPLATE_FIELD_SIP3 = 35, + TEMPLATE_FIELD_SIP4 = 36, + TEMPLATE_FIELD_SIP5 = 37, + TEMPLATE_FIELD_SIP6 = 38, + TEMPLATE_FIELD_SIP7 = 39, + TEMPLATE_FIELD_DIP2 = 42, + TEMPLATE_FIELD_DIP3 = 43, + TEMPLATE_FIELD_DIP4 = 44, + TEMPLATE_FIELD_DIP5 = 45, + TEMPLATE_FIELD_DIP6 = 46, + TEMPLATE_FIELD_DIP7 = 47, + TEMPLATE_FIELD_FLOW_LABEL = 49, + TEMPLATE_FIELD_DSAP_SSAP = 50, + TEMPLATE_FIELD_FWD_VID = 52, + TEMPLATE_FIELD_RANGE_CHK = 53, + TEMPLATE_FIELD_SLP = 55, + TEMPLATE_FIELD_DLP = 56, + TEMPLATE_FIELD_META_DATA = 57, + TEMPLATE_FIELD_FIRST_MPLS1 = 60, + TEMPLATE_FIELD_FIRST_MPLS2 = 61, + TEMPLATE_FIELD_DPM3 = 8, +}; + +/* The meaning of TEMPLATE_FIELD_VLAN depends on phase and the configuration in + * RTL931X_PIE_CTRL. We use always the same definition and map to the inner VLAN tag: + */ +#define TEMPLATE_FIELD_VLAN TEMPLATE_FIELD_ITAG + +// Number of fixed templates predefined in the RTL9300 SoC +#define N_FIXED_TEMPLATES 5 +// RTL931x specific predefined templates +static enum template_field_id fixed_templates[N_FIXED_TEMPLATES][N_FIXED_FIELDS_RTL931X] = +{ + { + TEMPLATE_FIELD_DMAC0, TEMPLATE_FIELD_DMAC1, TEMPLATE_FIELD_DMAC2, + TEMPLATE_FIELD_SMAC0, TEMPLATE_FIELD_SMAC1, TEMPLATE_FIELD_SMAC2, + TEMPLATE_FIELD_VLAN, TEMPLATE_FIELD_IP_TOS_PROTO, TEMPLATE_FIELD_DSAP_SSAP, + TEMPLATE_FIELD_ETHERTYPE, TEMPLATE_FIELD_SPM0, TEMPLATE_FIELD_SPM1, + TEMPLATE_FIELD_SPM2, TEMPLATE_FIELD_SPM3 + }, { + TEMPLATE_FIELD_SIP0, TEMPLATE_FIELD_SIP1, TEMPLATE_FIELD_DIP0, + TEMPLATE_FIELD_DIP1, TEMPLATE_FIELD_IP_TOS_PROTO, TEMPLATE_FIELD_TCP_INFO, + TEMPLATE_FIELD_L4_SPORT, TEMPLATE_FIELD_L4_DPORT, TEMPLATE_FIELD_VLAN, + TEMPLATE_FIELD_RANGE_CHK, TEMPLATE_FIELD_SPM0, TEMPLATE_FIELD_SPM1, + TEMPLATE_FIELD_SPM2, TEMPLATE_FIELD_SPM3 + }, { + TEMPLATE_FIELD_DMAC0, TEMPLATE_FIELD_DMAC1, TEMPLATE_FIELD_DMAC2, + TEMPLATE_FIELD_VLAN, TEMPLATE_FIELD_ETHERTYPE, TEMPLATE_FIELD_IP_TOS_PROTO, + TEMPLATE_FIELD_SIP0, TEMPLATE_FIELD_SIP1, TEMPLATE_FIELD_DIP0, + TEMPLATE_FIELD_DIP1, TEMPLATE_FIELD_L4_SPORT, TEMPLATE_FIELD_L4_DPORT, + TEMPLATE_FIELD_META_DATA, TEMPLATE_FIELD_SLP + }, { + TEMPLATE_FIELD_DIP0, TEMPLATE_FIELD_DIP1, TEMPLATE_FIELD_DIP2, + TEMPLATE_FIELD_DIP3, TEMPLATE_FIELD_DIP4, TEMPLATE_FIELD_DIP5, + TEMPLATE_FIELD_DIP6, TEMPLATE_FIELD_DIP7, TEMPLATE_FIELD_IP_TOS_PROTO, + TEMPLATE_FIELD_TCP_INFO, TEMPLATE_FIELD_L4_SPORT, TEMPLATE_FIELD_L4_DPORT, + TEMPLATE_FIELD_RANGE_CHK, TEMPLATE_FIELD_SLP + }, { + TEMPLATE_FIELD_SIP0, TEMPLATE_FIELD_SIP1, TEMPLATE_FIELD_SIP2, + TEMPLATE_FIELD_SIP3, TEMPLATE_FIELD_SIP4, TEMPLATE_FIELD_SIP5, + TEMPLATE_FIELD_SIP6, TEMPLATE_FIELD_SIP7, TEMPLATE_FIELD_META_DATA, + TEMPLATE_FIELD_VLAN, TEMPLATE_FIELD_SPM0, TEMPLATE_FIELD_SPM1, + TEMPLATE_FIELD_SPM2, TEMPLATE_FIELD_SPM3 + }, +}; + +inline void rtl931x_exec_tbl0_cmd(u32 cmd) +{ + sw_w32(cmd, RTL931X_TBL_ACCESS_CTRL_0); + do { } while (sw_r32(RTL931X_TBL_ACCESS_CTRL_0) & (1 << 20)); +} + +inline void rtl931x_exec_tbl1_cmd(u32 cmd) +{ + sw_w32(cmd, RTL931X_TBL_ACCESS_CTRL_1); + do { } while (sw_r32(RTL931X_TBL_ACCESS_CTRL_1) & (1 << 17)); +} + +inline int rtl931x_tbl_access_data_0(int i) +{ + return RTL931X_TBL_ACCESS_DATA_0(i); +} + +void rtl931x_vlan_profile_dump(int index) +{ + u64 profile[4]; + + if (index < 0 || index > 15) + return; + + profile[0] = sw_r32(RTL931X_VLAN_PROFILE_SET(index)); + profile[1] = (sw_r32(RTL931X_VLAN_PROFILE_SET(index) + 4) & 0x1FFFFFFFULL) << 32 + | (sw_r32(RTL931X_VLAN_PROFILE_SET(index) + 8) & 0xFFFFFFFF); + profile[2] = (sw_r32(RTL931X_VLAN_PROFILE_SET(index) + 16) & 0x1FFFFFFFULL) << 32 + | (sw_r32(RTL931X_VLAN_PROFILE_SET(index) + 12) & 0xFFFFFFFF); + profile[3] = (sw_r32(RTL931X_VLAN_PROFILE_SET(index) + 20) & 0x1FFFFFFFULL) << 32 + | (sw_r32(RTL931X_VLAN_PROFILE_SET(index) + 24) & 0xFFFFFFFF); + + pr_info("VLAN %d: L2 learning: %d, L2 Unknown MultiCast Field %llx, \ + IPv4 Unknown MultiCast Field %llx, IPv6 Unknown MultiCast Field: %llx", + index, (u32) (profile[0] & (3 << 14)), profile[1], profile[2], profile[3]); +} + +static void rtl931x_stp_get(struct rtl838x_switch_priv *priv, u16 msti, u32 port_state[]) +{ + int i; + u32 cmd = 1 << 20 /* Execute cmd */ + | 0 << 19 /* Read */ + | 5 << 15 /* Table type 0b101 */ + | (msti & 0x3fff); + priv->r->exec_tbl0_cmd(cmd); + + for (i = 0; i < 4; i++) + port_state[i] = sw_r32(priv->r->tbl_access_data_0(i)); +} + +static void rtl931x_stp_set(struct rtl838x_switch_priv *priv, u16 msti, u32 port_state[]) +{ + int i; + u32 cmd = 1 << 20 /* Execute cmd */ + | 1 << 19 /* Write */ + | 5 << 15 /* Table type 0b101 */ + | (msti & 0x3fff); + for (i = 0; i < 4; i++) + sw_w32(port_state[i], priv->r->tbl_access_data_0(i)); + priv->r->exec_tbl0_cmd(cmd); +} + +inline static int rtl931x_trk_mbr_ctr(int group) +{ + return RTL931X_TRK_MBR_CTRL + (group << 2); +} + +static void rtl931x_vlan_tables_read(u32 vlan, struct rtl838x_vlan_info *info) +{ + u32 v, w, x, y; + // Read VLAN table (3) via register 0 + struct table_reg *r = rtl_table_get(RTL9310_TBL_0, 3); + + rtl_table_read(r, vlan); + v = sw_r32(rtl_table_data(r, 0)); + w = sw_r32(rtl_table_data(r, 1)); + x = sw_r32(rtl_table_data(r, 2)); + y = sw_r32(rtl_table_data(r, 3)); + rtl_table_release(r); + + pr_debug("VLAN_READ %d: %08x %08x %08x %08x\n", vlan, v, w, x, y); + info->tagged_ports = ((u64) v) << 25 | (w >> 7); + info->profile_id = (x >> 16) & 0xf; + info->fid = w & 0x7f; // AKA MSTI depending on context + info->hash_uc_fid = !!(x & BIT(31)); + info->hash_mc_fid = !!(x & BIT(30)); + info->if_id = (x >> 20) & 0x3ff; + info->profile_id = (x >> 16) & 0xf; + info->multicast_grp_mask = x & 0xffff; + if (x & BIT(31)) + info->l2_tunnel_list_id = y >> 18; + else + info->l2_tunnel_list_id = -1; + pr_debug("%s read tagged %016llx, profile-id %d, uc %d, mc %d, intf-id %d\n", __func__, + info->tagged_ports, info->profile_id, info->hash_uc_fid, info->hash_mc_fid, + info->if_id); + + // Read UNTAG table via table register 3 + r = rtl_table_get(RTL9310_TBL_3, 0); + rtl_table_read(r, vlan); + v = ((u64)sw_r32(rtl_table_data(r, 0))) << 25; + v |= sw_r32(rtl_table_data(r, 1)) >> 7; + rtl_table_release(r); + + info->untagged_ports = v; +} + +static void rtl931x_vlan_set_tagged(u32 vlan, struct rtl838x_vlan_info *info) +{ + u32 v, w, x, y; + // Access VLAN table (1) via register 0 + struct table_reg *r = rtl_table_get(RTL9310_TBL_0, 3); + + v = info->tagged_ports >> 25; + w = (info->tagged_ports & 0x1fffff) << 7; + w |= info->fid & 0x7f; + x = info->hash_uc_fid ? BIT(31) : 0; + x |= info->hash_mc_fid ? BIT(30) : 0; + x |= info->if_id & 0x3ff << 20; + x |= (info->profile_id & 0xf) << 16; + x |= info->multicast_grp_mask & 0xffff; + if (info->l2_tunnel_list_id >= 0) { + y = info->l2_tunnel_list_id << 18; + y |= BIT(31); + } else { + y = 0; + } + + sw_w32(v, rtl_table_data(r, 0)); + sw_w32(w, rtl_table_data(r, 1)); + sw_w32(x, rtl_table_data(r, 2)); + sw_w32(y, rtl_table_data(r, 3)); + + rtl_table_write(r, vlan); + rtl_table_release(r); +} + +static void rtl931x_vlan_set_untagged(u32 vlan, u64 portmask) +{ + struct table_reg *r = rtl_table_get(RTL9310_TBL_3, 0); + + rtl839x_set_port_reg_be(portmask << 7, rtl_table_data(r, 0)); + rtl_table_write(r, vlan); + rtl_table_release(r); +} + +static inline int rtl931x_mac_force_mode_ctrl(int p) +{ + return RTL931X_MAC_FORCE_MODE_CTRL + (p << 2); +} + +static inline int rtl931x_mac_link_spd_sts(int p) +{ + return RTL931X_MAC_LINK_SPD_STS + (((p >> 3) << 2)); +} + +static inline int rtl931x_mac_port_ctrl(int p) +{ + return RTL931X_MAC_L2_PORT_CTRL + (p << 7); +} + +static inline int rtl931x_l2_port_new_salrn(int p) +{ + return RTL931X_L2_PORT_NEW_SALRN(p); +} + +static inline int rtl931x_l2_port_new_sa_fwd(int p) +{ + return RTL931X_L2_PORT_NEW_SA_FWD(p); +} + +irqreturn_t rtl931x_switch_irq(int irq, void *dev_id) +{ + struct dsa_switch *ds = dev_id; + u32 status = sw_r32(RTL931X_ISR_GLB_SRC); + u64 ports = rtl839x_get_port_reg_le(RTL931X_ISR_PORT_LINK_STS_CHG); + u64 link; + int i; + + /* Clear status */ + rtl839x_set_port_reg_le(ports, RTL931X_ISR_PORT_LINK_STS_CHG); + pr_debug("RTL931X Link change: status: %x, ports %016llx\n", status, ports); + + link = rtl839x_get_port_reg_le(RTL931X_MAC_LINK_STS); + // Must re-read this to get correct status + link = rtl839x_get_port_reg_le(RTL931X_MAC_LINK_STS); + pr_debug("RTL931X Link change: status: %x, link status %016llx\n", status, link); + + for (i = 0; i < 56; i++) { + if (ports & BIT_ULL(i)) { + if (link & BIT_ULL(i)) { + pr_info("%s port %d up\n", __func__, i); + dsa_port_phylink_mac_change(ds, i, true); + } else { + pr_info("%s port %d down\n", __func__, i); + dsa_port_phylink_mac_change(ds, i, false); + } + } + } + return IRQ_HANDLED; +} + +int rtl931x_write_phy(u32 port, u32 page, u32 reg, u32 val) +{ + u32 v; + int err = 0; + + val &= 0xffff; + if (port > 63 || page > 4095 || reg > 31) + return -ENOTSUPP; + + mutex_lock(&smi_lock); + pr_debug("%s: writing to phy %d %d %d %d\n", __func__, port, page, reg, val); + /* Clear both port registers */ + sw_w32(0, RTL931X_SMI_INDRT_ACCESS_CTRL_2); + sw_w32(0, RTL931X_SMI_INDRT_ACCESS_CTRL_2 + 4); + sw_w32_mask(0, BIT(port % 32), RTL931X_SMI_INDRT_ACCESS_CTRL_2 + (port / 32) * 4); + + sw_w32_mask(0xffff, val, RTL931X_SMI_INDRT_ACCESS_CTRL_3); + + v = reg << 6 | page << 11 ; + sw_w32(v, RTL931X_SMI_INDRT_ACCESS_CTRL_0); + + sw_w32(0x1ff, RTL931X_SMI_INDRT_ACCESS_CTRL_1); + + v |= BIT(4) | 1; /* Write operation and execute */ + sw_w32(v, RTL931X_SMI_INDRT_ACCESS_CTRL_0); + + do { + } while (sw_r32(RTL931X_SMI_INDRT_ACCESS_CTRL_0) & 0x1); + + if (sw_r32(RTL931X_SMI_INDRT_ACCESS_CTRL_0) & 0x2) + err = -EIO; + + mutex_unlock(&smi_lock); + return err; +} + +int rtl931x_read_phy(u32 port, u32 page, u32 reg, u32 *val) +{ + u32 v; + + if (port > 63 || page > 4095 || reg > 31) + return -ENOTSUPP; + + mutex_lock(&smi_lock); + + sw_w32(port << 5, RTL931X_SMI_INDRT_ACCESS_BC_PHYID_CTRL); + + v = reg << 6 | page << 11 | 1; + sw_w32(v, RTL931X_SMI_INDRT_ACCESS_CTRL_0); + + do { + } while (sw_r32(RTL931X_SMI_INDRT_ACCESS_CTRL_0) & 0x1); + + v = sw_r32(RTL931X_SMI_INDRT_ACCESS_CTRL_0); + *val = sw_r32(RTL931X_SMI_INDRT_ACCESS_CTRL_3); + *val = (*val & 0xffff0000) >> 16; + + pr_debug("%s: port %d, page: %d, reg: %x, val: %x, v: %08x\n", + __func__, port, page, reg, *val, v); + + mutex_unlock(&smi_lock); + return 0; +} + +/* + * Read an mmd register of the PHY + */ +int rtl931x_read_mmd_phy(u32 port, u32 devnum, u32 regnum, u32 *val) +{ + int err = 0; + u32 v; + /* Select PHY register type + * If select 1G/10G MMD register type, registers EXT_PAGE, MAIN_PAGE and REG settings are don’t care. + * 0x0 Normal register (Clause 22) + * 0x1: 1G MMD register (MMD via Clause 22 registers 13 and 14) + * 0x2: 10G MMD register (MMD via Clause 45) + */ + int type = (regnum & MII_ADDR_C45)?2:1; + + mutex_lock(&smi_lock); + + // Set PHY to access via port-number + sw_w32(port << 5, RTL931X_SMI_INDRT_ACCESS_BC_PHYID_CTRL); + + // Set MMD device number and register to write to + sw_w32(devnum << 16 | mdiobus_c45_regad(regnum), RTL931X_SMI_INDRT_ACCESS_MMD_CTRL); + + v = type << 2 | BIT(0); // MMD-access-type | EXEC + sw_w32(v, RTL931X_SMI_INDRT_ACCESS_CTRL_0); + + do { + v = sw_r32(RTL931X_SMI_INDRT_ACCESS_CTRL_0); + } while (v & BIT(0)); + + // Check for error condition + if (v & BIT(1)) + err = -EIO; + + *val = sw_r32(RTL931X_SMI_INDRT_ACCESS_CTRL_3) >> 16; + + pr_debug("%s: port %d, dev: %x, regnum: %x, val: %x (err %d)\n", __func__, + port, devnum, mdiobus_c45_regad(regnum), *val, err); + + mutex_unlock(&smi_lock); + + return err; +} + +/* + * Write to an mmd register of the PHY + */ +int rtl931x_write_mmd_phy(u32 port, u32 devnum, u32 regnum, u32 val) +{ + int err = 0; + u32 v; + int type = (regnum & MII_ADDR_C45)?2:1; + u64 pm; + + mutex_lock(&smi_lock); + + // Set PHY to access via port-mask + pm = (u64)1 << port; + sw_w32((u32)pm, RTL931X_SMI_INDRT_ACCESS_CTRL_2); + sw_w32((u32)(pm >> 32), RTL931X_SMI_INDRT_ACCESS_CTRL_2 + 4); + + // Set data to write + sw_w32_mask(0xffff, val, RTL931X_SMI_INDRT_ACCESS_CTRL_3); + + // Set MMD device number and register to write to + sw_w32(devnum << 16 | mdiobus_c45_regad(regnum), RTL931X_SMI_INDRT_ACCESS_MMD_CTRL); + + v = BIT(4) | type << 2 | BIT(0); // WRITE | MMD-access-type | EXEC + sw_w32(v, RTL931X_SMI_INDRT_ACCESS_CTRL_0); + + do { + v = sw_r32(RTL931X_SMI_INDRT_ACCESS_CTRL_0); + } while (v & BIT(0)); + + pr_debug("%s: port %d, dev: %x, regnum: %x, val: %x (err %d)\n", __func__, + port, devnum, mdiobus_c45_regad(regnum), val, err); + mutex_unlock(&smi_lock); + return err; +} + +void rtl931x_print_matrix(void) +{ + volatile u64 *ptr = RTL838X_SW_BASE + RTL839X_PORT_ISO_CTRL(0); + int i; + + for (i = 0; i < 52; i += 4) + pr_info("> %16llx %16llx %16llx %16llx\n", + ptr[i + 0], ptr[i + 1], ptr[i + 2], ptr[i + 3]); + pr_info("CPU_PORT> %16llx\n", ptr[52]); +} + +void rtl931x_set_receive_management_action(int port, rma_ctrl_t type, action_type_t action) +{ + u32 value = 0; + + /* hack for value mapping */ + if (type == GRATARP && action == COPY2CPU) + action = TRAP2MASTERCPU; + + switch(action) { + case FORWARD: + value = 0; + break; + case DROP: + value = 1; + break; + case TRAP2CPU: + value = 2; + break; + case TRAP2MASTERCPU: + value = 3; + break; + case FLOODALL: + value = 4; + break; + default: + break; + } + + switch(type) { + case BPDU: + sw_w32_mask(7 << ((port % 10) * 3), value << ((port % 10) * 3), RTL931X_RMA_BPDU_CTRL + ((port / 10) << 2)); + break; + case PTP: + //udp + sw_w32_mask(3 << 2, value << 2, RTL931X_RMA_PTP_CTRL + (port << 2)); + //eth2 + sw_w32_mask(3, value, RTL931X_RMA_PTP_CTRL + (port << 2)); + break; + case PTP_UDP: + sw_w32_mask(3 << 2, value << 2, RTL931X_RMA_PTP_CTRL + (port << 2)); + break; + case PTP_ETH2: + sw_w32_mask(3, value, RTL931X_RMA_PTP_CTRL + (port << 2)); + break; + case LLTP: + sw_w32_mask(7 << ((port % 10) * 3), value << ((port % 10) * 3), RTL931X_RMA_LLTP_CTRL + ((port / 10) << 2)); + break; + case EAPOL: + sw_w32_mask(7 << ((port % 10) * 3), value << ((port % 10) * 3), RTL931X_RMA_EAPOL_CTRL + ((port / 10) << 2)); + break; + case GRATARP: + sw_w32_mask(3 << ((port & 0xf) << 1), value << ((port & 0xf) << 1), RTL931X_TRAP_ARP_GRAT_PORT_ACT + ((port >> 4) << 2)); + break; + } +} + +u64 rtl931x_traffic_get(int source) +{ + u32 v; + struct table_reg *r = rtl_table_get(RTL9310_TBL_0, 6); + + rtl_table_read(r, source); + v = sw_r32(rtl_table_data(r, 0)); + rtl_table_release(r); + return v >> 3; +} + +/* + * Enable traffic between a source port and a destination port matrix + */ +void rtl931x_traffic_set(int source, u64 dest_matrix) +{ + struct table_reg *r = rtl_table_get(RTL9310_TBL_0, 6); + + sw_w32((dest_matrix << 3), rtl_table_data(r, 0)); + rtl_table_write(r, source); + rtl_table_release(r); +} + +void rtl931x_traffic_enable(int source, int dest) +{ + struct table_reg *r = rtl_table_get(RTL9310_TBL_0, 6); + rtl_table_read(r, source); + sw_w32_mask(0, BIT(dest + 3), rtl_table_data(r, 0)); + rtl_table_write(r, source); + rtl_table_release(r); +} + +void rtl931x_traffic_disable(int source, int dest) +{ + struct table_reg *r = rtl_table_get(RTL9310_TBL_0, 6); + rtl_table_read(r, source); + sw_w32_mask(BIT(dest + 3), 0, rtl_table_data(r, 0)); + rtl_table_write(r, source); + rtl_table_release(r); +} + +static u64 rtl931x_l2_hash_seed(u64 mac, u32 vid) +{ + u64 v = vid; + + v <<= 48; + v |= mac; + + return v; +} + +/* + * Calculate both the block 0 and the block 1 hash by applyingthe same hash + * algorithm as the one used currently by the ASIC to the seed, and return + * both hashes in the lower and higher word of the return value since only 12 bit of + * the hash are significant. + */ +static u32 rtl931x_l2_hash_key(struct rtl838x_switch_priv *priv, u64 seed) +{ + u32 h, h0, h1, h2, h3, h4, k0, k1; + + h0 = seed & 0xfff; + h1 = (seed >> 12) & 0xfff; + h2 = (seed >> 24) & 0xfff; + h3 = (seed >> 36) & 0xfff; + h4 = (seed >> 48) & 0xfff; + h4 = ((h4 & 0x7) << 9) | ((h4 >> 3) & 0x1ff); + k0 = h0 ^ h1 ^ h2 ^ h3 ^ h4; + + h0 = seed & 0xfff; + h0 = ((h0 & 0x1ff) << 3) | ((h0 >> 9) & 0x7); + h1 = (seed >> 12) & 0xfff; + h1 = ((h1 & 0x3f) << 6) | ((h1 >> 6) & 0x3f); + h2 = (seed >> 24) & 0xfff; + h3 = (seed >> 36) & 0xfff; + h3 = ((h3 & 0x3f) << 6) | ((h3 >> 6) & 0x3f); + h4 = (seed >> 48) & 0xfff; + k1 = h0 ^ h1 ^ h2 ^ h3 ^ h4; + + // Algorithm choice for block 0 + if (sw_r32(RTL931X_L2_CTRL) & BIT(0)) + h = k1; + else + h = k0; + + /* Algorithm choice for block 1 + * Since k0 and k1 are < 4096, adding 4096 will offset the hash into the second + * half of hash-space + * 4096 is in fact the hash-table size 32768 divided by 4 hashes per bucket + * divided by 2 to divide the hash space in 2 + */ + if (sw_r32(RTL931X_L2_CTRL) & BIT(1)) + h |= (k1 + 4096) << 16; + else + h |= (k0 + 4096) << 16; + + return h; +} + +/* + * Fills an L2 entry structure from the SoC registers + */ +static void rtl931x_fill_l2_entry(u32 r[], struct rtl838x_l2_entry *e) +{ + pr_debug("In %s valid?\n", __func__); + e->valid = !!(r[0] & BIT(31)); + if (!e->valid) + return; + + pr_debug("%s: entry valid, raw: %08x %08x %08x %08x\n", __func__, r[0], r[1], r[2], r[3]); + e->is_ip_mc = false; + e->is_ipv6_mc = false; + + e->mac[0] = r[0] >> 8; + e->mac[1] = r[0]; + e->mac[2] = r[1] >> 24; + e->mac[3] = r[1] >> 16; + e->mac[4] = r[1] >> 8; + e->mac[5] = r[1]; + + e->is_open_flow = !!(r[0] & BIT(30)); + e->is_pe_forward = !!(r[0] & BIT(29)); + e->next_hop = !!(r[2] & BIT(30)); + e->rvid = (r[0] >> 16) & 0xfff; + + /* Is it a unicast entry? check multicast bit */ + if (!(e->mac[0] & 1)) { + e->type = L2_UNICAST; + e->is_l2_tunnel = !!(r[2] & BIT(31)); + e->is_static = !!(r[2] & BIT(13)); + e->port = (r[2] >> 19) & 0x3ff; + // Check for trunk port + if (r[2] & BIT(29)) { + e->is_trunk = true; + e->stack_dev = (e->port >> 9) & 1; + e->trunk = e->port & 0x3f; + } else { + e->is_trunk = false; + e->stack_dev = (e->port >> 6) & 0xf; + e->port = e->port & 0x3f; + } + + e->block_da = !!(r[2] & BIT(14)); + e->block_sa = !!(r[2] & BIT(15)); + e->suspended = !!(r[2] & BIT(12)); + e->age = (r[2] >> 16) & 3; + + // the UC_VID field in hardware is used for the VID or for the route id + if (e->next_hop) { + e->nh_route_id = r[2] & 0x7ff; + e->vid = 0; + } else { + e->vid = r[2] & 0xfff; + e->nh_route_id = 0; + } + if (e->is_l2_tunnel) + e->l2_tunnel_id = ((r[2] & 0xff) << 4) | (r[3] >> 28); + // TODO: Implement VLAN conversion + } else { + e->type = L2_MULTICAST; + e->is_local_forward = !!(r[2] & BIT(31)); + e->is_remote_forward = !!(r[2] & BIT(17)); + e->mc_portmask_index = (r[2] >> 18) & 0xfff; + e->l2_tunnel_list_id = (r[2] >> 4) & 0x1fff; + } +} + +/* + * Fills the 3 SoC table registers r[] with the information of in the rtl838x_l2_entry + */ +static void rtl931x_fill_l2_row(u32 r[], struct rtl838x_l2_entry *e) +{ + u32 port; + + if (!e->valid) { + r[0] = r[1] = r[2] = 0; + return; + } + + r[2] = BIT(31); // Set valid bit + + r[0] = ((u32)e->mac[0]) << 24 | ((u32)e->mac[1]) << 16 + | ((u32)e->mac[2]) << 8 | ((u32)e->mac[3]); + r[1] = ((u32)e->mac[4]) << 24 | ((u32)e->mac[5]) << 16; + + r[2] |= e->next_hop ? BIT(12) : 0; + + if (e->type == L2_UNICAST) { + r[2] |= e->is_static ? BIT(14) : 0; + r[1] |= e->rvid & 0xfff; + r[2] |= (e->port & 0x3ff) << 20; + if (e->is_trunk) { + r[2] |= BIT(30); + port = e->stack_dev << 9 | (e->port & 0x3f); + } else { + port = (e->stack_dev & 0xf) << 6; + port |= e->port & 0x3f; + } + r[2] |= port << 20; + r[2] |= e->block_da ? BIT(15) : 0; + r[2] |= e->block_sa ? BIT(17) : 0; + r[2] |= e->suspended ? BIT(13) : 0; + r[2] |= (e->age & 0x3) << 17; + // the UC_VID field in hardware is used for the VID or for the route id + if (e->next_hop) + r[2] |= e->nh_route_id & 0x7ff; + else + r[2] |= e->vid & 0xfff; + } else { // L2_MULTICAST + r[2] |= (e->mc_portmask_index & 0x3ff) << 16; + r[2] |= e->mc_mac_index & 0x7ff; + } +} + +/* + * Read an L2 UC or MC entry out of a hash bucket of the L2 forwarding table + * hash is the id of the bucket and pos is the position of the entry in that bucket + * The data read from the SoC is filled into rtl838x_l2_entry + */ +static u64 rtl931x_read_l2_entry_using_hash(u32 hash, u32 pos, struct rtl838x_l2_entry *e) +{ + u32 r[4]; + struct table_reg *q = rtl_table_get(RTL9310_TBL_0, 0); + u32 idx; + int i; + u64 mac; + u64 seed; + + pr_debug("%s: hash %08x, pos: %d\n", __func__, hash, pos); + + /* On the RTL93xx, 2 different hash algorithms are used making it a total of + * 8 buckets that need to be searched, 4 for each hash-half + * Use second hash space when bucket is between 4 and 8 */ + if (pos >= 4) { + pos -= 4; + hash >>= 16; + } else { + hash &= 0xffff; + } + + idx = (0 << 14) | (hash << 2) | pos; // Search SRAM, with hash and at pos in bucket + pr_debug("%s: NOW hash %08x, pos: %d\n", __func__, hash, pos); + + rtl_table_read(q, idx); + for (i = 0; i < 4; i++) + r[i] = sw_r32(rtl_table_data(q, i)); + + rtl_table_release(q); + + rtl931x_fill_l2_entry(r, e); + + pr_debug("%s: valid: %d, nh: %d\n", __func__, e->valid, e->next_hop); + if (!e->valid) + return 0; + + mac = ((u64)e->mac[0]) << 40 | ((u64)e->mac[1]) << 32 | ((u64)e->mac[2]) << 24 + | ((u64)e->mac[3]) << 16 | ((u64)e->mac[4]) << 8 | ((u64)e->mac[5]); + + seed = rtl931x_l2_hash_seed(mac, e->rvid); + pr_debug("%s: mac %016llx, seed %016llx\n", __func__, mac, seed); + // return vid with concatenated mac as unique id + return seed; +} + +static u64 rtl931x_read_cam(int idx, struct rtl838x_l2_entry *e) +{ + return 0; +} + +static void rtl931x_write_cam(int idx, struct rtl838x_l2_entry *e) +{ +} + +static void rtl931x_write_l2_entry_using_hash(u32 hash, u32 pos, struct rtl838x_l2_entry *e) +{ + u32 r[4]; + struct table_reg *q = rtl_table_get(RTL9310_TBL_0, 0); + u32 idx = (0 << 14) | (hash << 2) | pos; // Access SRAM, with hash and at pos in bucket + int i; + + pr_info("%s: hash %d, pos %d\n", __func__, hash, pos); + pr_info("%s: index %d -> mac %02x:%02x:%02x:%02x:%02x:%02x\n", __func__, idx, + e->mac[0], e->mac[1], e->mac[2], e->mac[3],e->mac[4],e->mac[5]); + + rtl931x_fill_l2_row(r, e); + pr_info("%s: %d: %08x %08x %08x\n", __func__, idx, r[0], r[1], r[2]); + + for (i= 0; i < 4; i++) + sw_w32(r[i], rtl_table_data(q, i)); + + rtl_table_write(q, idx); + rtl_table_release(q); + +} + +static void rtl931x_vlan_fwd_on_inner(int port, bool is_set) +{ + // Always set all tag modes to fwd based on either inner or outer tag + if (is_set) + sw_w32_mask(0, 0xf, RTL931X_VLAN_PORT_FWD + (port << 2)); + else + sw_w32_mask(0xf, 0, RTL931X_VLAN_PORT_FWD + (port << 2)); +} + +static void rtl931x_vlan_profile_setup(int profile) +{ + u32 p[7]; + int i; + + pr_info("In %s\n", __func__); + + if (profile > 15) + return; + + p[0] = sw_r32(RTL931X_VLAN_PROFILE_SET(profile)); + + // Enable routing of Ipv4/6 Unicast and IPv4/6 Multicast traffic + //p[0] |= BIT(17) | BIT(16) | BIT(13) | BIT(12); + p[0] |= 0x3 << 11; // COPY2CPU + + p[1] = 0x1FFFFFF; // L2 unknwon MC flooding portmask all ports, including the CPU-port + p[2] = 0xFFFFFFFF; + p[3] = 0x1FFFFFF; // IPv4 unknwon MC flooding portmask + p[4] = 0xFFFFFFFF; + p[5] = 0x1FFFFFF; // IPv6 unknwon MC flooding portmask + p[6] = 0xFFFFFFFF; + + for (i = 0; i < 7; i++) + sw_w32(p[i], RTL931X_VLAN_PROFILE_SET(profile) + i * 4); + pr_info("Leaving %s\n", __func__); +} + +static void rtl931x_l2_learning_setup(void) +{ + // Portmask for flooding broadcast traffic + rtl839x_set_port_reg_be(0x1FFFFFFFFFFFFFF, RTL931X_L2_BC_FLD_PMSK); + + // Portmask for flooding unicast traffic with unknown destination + rtl839x_set_port_reg_be(0x1FFFFFFFFFFFFFF, RTL931X_L2_UNKN_UC_FLD_PMSK); + + // Limit learning to maximum: 64k entries, after that just flood (bits 0-2) + sw_w32((0xffff << 3) | FORWARD, RTL931X_L2_LRN_CONSTRT_CTRL); +} + +static u64 rtl931x_read_mcast_pmask(int idx) +{ + u64 portmask; + // Read MC_PMSK (2) via register RTL9310_TBL_0 + struct table_reg *q = rtl_table_get(RTL9310_TBL_0, 2); + + rtl_table_read(q, idx); + portmask = sw_r32(rtl_table_data(q, 0)); + portmask <<= 32; + portmask |= sw_r32(rtl_table_data(q, 1)); + portmask >>= 7; + rtl_table_release(q); + + pr_debug("%s: Index idx %d has portmask %016llx\n", __func__, idx, portmask); + return portmask; +} + +static void rtl931x_write_mcast_pmask(int idx, u64 portmask) +{ + u64 pm = portmask; + + // Access MC_PMSK (2) via register RTL9310_TBL_0 + struct table_reg *q = rtl_table_get(RTL9310_TBL_0, 2); + + pr_debug("%s: Index idx %d has portmask %016llx\n", __func__, idx, pm); + pm <<= 7; + sw_w32((u32)(pm >> 32), rtl_table_data(q, 0)); + sw_w32((u32)pm, rtl_table_data(q, 1)); + rtl_table_write(q, idx); + rtl_table_release(q); +} + + +static int rtl931x_set_ageing_time(unsigned long msec) +{ + int t = sw_r32(RTL931X_L2_AGE_CTRL); + + t &= 0x1FFFFF; + t = (t * 8) / 10; + pr_debug("L2 AGING time: %d sec\n", t); + + t = (msec / 100 + 7) / 8; + t = t > 0x1FFFFF ? 0x1FFFFF : t; + sw_w32_mask(0x1FFFFF, t, RTL931X_L2_AGE_CTRL); + pr_debug("Dynamic aging for ports: %x\n", sw_r32(RTL931X_L2_PORT_AGE_CTRL)); + return 0; +} +void rtl931x_sw_init(struct rtl838x_switch_priv *priv) +{ +// rtl931x_sds_init(priv); +} + +static void rtl931x_pie_lookup_enable(struct rtl838x_switch_priv *priv, int index) +{ + int block = index / PIE_BLOCK_SIZE; + + sw_w32_mask(0, BIT(block), RTL931X_PIE_BLK_LOOKUP_CTRL); +} + +/* + * Fills the data in the intermediate representation in the pie_rule structure + * into a data field for a given template field field_type + * TODO: This function looks very similar to the function of the rtl9300, but + * since it uses the physical template_field_id, which are different for each + * SoC and there are other field types, it is actually not. If we would also use + * an intermediate representation for a field type, we would could have one + * pie_data_fill function for all SoCs, provided we have also for each SoC a + * function to map between physical and intermediate field type + */ +int rtl931x_pie_data_fill(enum template_field_id field_type, struct pie_rule *pr, u16 *data, u16 *data_m) +{ + *data = *data_m = 0; + + switch (field_type) { + case TEMPLATE_FIELD_SPM0: + *data = pr->spm; + *data_m = pr->spm_m; + break; + case TEMPLATE_FIELD_SPM1: + *data = pr->spm >> 16; + *data_m = pr->spm_m >> 16; + break; + case TEMPLATE_FIELD_OTAG: + *data = pr->otag; + *data_m = pr->otag_m; + break; + case TEMPLATE_FIELD_SMAC0: + *data = pr->smac[4]; + *data = (*data << 8) | pr->smac[5]; + *data_m = pr->smac_m[4]; + *data_m = (*data_m << 8) | pr->smac_m[5]; + break; + case TEMPLATE_FIELD_SMAC1: + *data = pr->smac[2]; + *data = (*data << 8) | pr->smac[3]; + *data_m = pr->smac_m[2]; + *data_m = (*data_m << 8) | pr->smac_m[3]; + break; + case TEMPLATE_FIELD_SMAC2: + *data = pr->smac[0]; + *data = (*data << 8) | pr->smac[1]; + *data_m = pr->smac_m[0]; + *data_m = (*data_m << 8) | pr->smac_m[1]; + break; + case TEMPLATE_FIELD_DMAC0: + *data = pr->dmac[4]; + *data = (*data << 8) | pr->dmac[5]; + *data_m = pr->dmac_m[4]; + *data_m = (*data_m << 8) | pr->dmac_m[5]; + break; + case TEMPLATE_FIELD_DMAC1: + *data = pr->dmac[2]; + *data = (*data << 8) | pr->dmac[3]; + *data_m = pr->dmac_m[2]; + *data_m = (*data_m << 8) | pr->dmac_m[3]; + break; + case TEMPLATE_FIELD_DMAC2: + *data = pr->dmac[0]; + *data = (*data << 8) | pr->dmac[1]; + *data_m = pr->dmac_m[0]; + *data_m = (*data_m << 8) | pr->dmac_m[1]; + break; + case TEMPLATE_FIELD_ETHERTYPE: + *data = pr->ethertype; + *data_m = pr->ethertype_m; + break; + case TEMPLATE_FIELD_ITAG: + *data = pr->itag; + *data_m = pr->itag_m; + break; + case TEMPLATE_FIELD_SIP0: + if (pr->is_ipv6) { + *data = pr->sip6.s6_addr16[7]; + *data_m = pr->sip6_m.s6_addr16[7]; + } else { + *data = pr->sip; + *data_m = pr->sip_m; + } + break; + case TEMPLATE_FIELD_SIP1: + if (pr->is_ipv6) { + *data = pr->sip6.s6_addr16[6]; + *data_m = pr->sip6_m.s6_addr16[6]; + } else { + *data = pr->sip >> 16; + *data_m = pr->sip_m >> 16; + } + break; + case TEMPLATE_FIELD_SIP2: + case TEMPLATE_FIELD_SIP3: + case TEMPLATE_FIELD_SIP4: + case TEMPLATE_FIELD_SIP5: + case TEMPLATE_FIELD_SIP6: + case TEMPLATE_FIELD_SIP7: + *data = pr->sip6.s6_addr16[5 - (field_type - TEMPLATE_FIELD_SIP2)]; + *data_m = pr->sip6_m.s6_addr16[5 - (field_type - TEMPLATE_FIELD_SIP2)]; + break; + + case TEMPLATE_FIELD_DIP0: + if (pr->is_ipv6) { + *data = pr->dip6.s6_addr16[7]; + *data_m = pr->dip6_m.s6_addr16[7]; + } else { + *data = pr->dip; + *data_m = pr->dip_m; + } + break; + case TEMPLATE_FIELD_DIP1: + if (pr->is_ipv6) { + *data = pr->dip6.s6_addr16[6]; + *data_m = pr->dip6_m.s6_addr16[6]; + } else { + *data = pr->dip >> 16; + *data_m = pr->dip_m >> 16; + } + break; + + case TEMPLATE_FIELD_DIP2: + case TEMPLATE_FIELD_DIP3: + case TEMPLATE_FIELD_DIP4: + case TEMPLATE_FIELD_DIP5: + case TEMPLATE_FIELD_DIP6: + case TEMPLATE_FIELD_DIP7: + *data = pr->dip6.s6_addr16[5 - (field_type - TEMPLATE_FIELD_DIP2)]; + *data_m = pr->dip6_m.s6_addr16[5 - (field_type - TEMPLATE_FIELD_DIP2)]; + break; + + case TEMPLATE_FIELD_IP_TOS_PROTO: + *data = pr->tos_proto; + *data_m = pr->tos_proto_m; + break; + case TEMPLATE_FIELD_L4_SPORT: + *data = pr->sport; + *data_m = pr->sport_m; + break; + case TEMPLATE_FIELD_L4_DPORT: + *data = pr->dport; + *data_m = pr->dport_m; + break; + case TEMPLATE_FIELD_DSAP_SSAP: + *data = pr->dsap_ssap; + *data_m = pr->dsap_ssap_m; + break; + case TEMPLATE_FIELD_TCP_INFO: + *data = pr->tcp_info; + *data_m = pr->tcp_info_m; + break; + case TEMPLATE_FIELD_RANGE_CHK: + pr_info("TEMPLATE_FIELD_RANGE_CHK: not configured\n"); + break; + default: + pr_info("%s: unknown field %d\n", __func__, field_type); + return -1; + } + + return 0; +} + +/* + * Reads the intermediate representation of the templated match-fields of the + * PIE rule in the pie_rule structure and fills in the raw data fields in the + * raw register space r[]. + * The register space configuration size is identical for the RTL8380/90 and RTL9300, + * however the RTL931X has 2 more registers / fields and the physical field-ids are different + * on all SoCs + * On the RTL9300 the mask fields are not word-aligend! + */ +static void rtl931x_write_pie_templated(u32 r[], struct pie_rule *pr, enum template_field_id t[]) +{ + int i; + u16 data, data_m; + + for (i = 0; i < N_FIXED_FIELDS; i++) { + rtl931x_pie_data_fill(t[i], pr, &data, &data_m); + + // On the RTL9300, the mask fields are not word aligned! + if (!(i % 2)) { + r[5 - i / 2] = data; + r[12 - i / 2] |= ((u32)data_m << 8); + } else { + r[5 - i / 2] |= ((u32)data) << 16; + r[12 - i / 2] |= ((u32)data_m) << 24; + r[11 - i / 2] |= ((u32)data_m) >> 8; + } + } +} + +static void rtl931x_read_pie_fixed_fields(u32 r[], struct pie_rule *pr) +{ + pr->mgnt_vlan = r[7] & BIT(31); + if (pr->phase == PHASE_IACL) + pr->dmac_hit_sw = r[7] & BIT(30); + else // TODO: EACL/VACL phase handling + pr->content_too_deep = r[7] & BIT(30); + pr->not_first_frag = r[7] & BIT(29); + pr->frame_type_l4 = (r[7] >> 26) & 7; + pr->frame_type = (r[7] >> 24) & 3; + pr->otag_fmt = (r[7] >> 23) & 1; + pr->itag_fmt = (r[7] >> 22) & 1; + pr->otag_exist = (r[7] >> 21) & 1; + pr->itag_exist = (r[7] >> 20) & 1; + pr->frame_type_l2 = (r[7] >> 18) & 3; + pr->igr_normal_port = (r[7] >> 17) & 1; + pr->tid = (r[7] >> 16) & 1; + + pr->mgnt_vlan_m = r[14] & BIT(15); + if (pr->phase == PHASE_IACL) + pr->dmac_hit_sw_m = r[14] & BIT(14); + else + pr->content_too_deep_m = r[14] & BIT(14); + pr->not_first_frag_m = r[14] & BIT(13); + pr->frame_type_l4_m = (r[14] >> 10) & 7; + pr->frame_type_m = (r[14] >> 8) & 3; + pr->otag_fmt_m = r[14] & BIT(7); + pr->itag_fmt_m = r[14] & BIT(6); + pr->otag_exist_m = r[14] & BIT(5); + pr->itag_exist_m = r[14] & BIT (4); + pr->frame_type_l2_m = (r[14] >> 2) & 3; + pr->igr_normal_port_m = r[14] & BIT(1); + pr->tid_m = r[14] & 1; + + pr->valid = r[15] & BIT(31); + pr->cond_not = r[15] & BIT(30); + pr->cond_and1 = r[15] & BIT(29); + pr->cond_and2 = r[15] & BIT(28); +} + +static void rtl931x_write_pie_fixed_fields(u32 r[], struct pie_rule *pr) +{ + r[7] |= pr->mgnt_vlan ? BIT(31) : 0; + if (pr->phase == PHASE_IACL) + r[7] |= pr->dmac_hit_sw ? BIT(30) : 0; + else + r[7] |= pr->content_too_deep ? BIT(30) : 0; + r[7] |= pr->not_first_frag ? BIT(29) : 0; + r[7] |= ((u32) (pr->frame_type_l4 & 0x7)) << 26; + r[7] |= ((u32) (pr->frame_type & 0x3)) << 24; + r[7] |= pr->otag_fmt ? BIT(23) : 0; + r[7] |= pr->itag_fmt ? BIT(22) : 0; + r[7] |= pr->otag_exist ? BIT(21) : 0; + r[7] |= pr->itag_exist ? BIT(20) : 0; + r[7] |= ((u32) (pr->frame_type_l2 & 0x3)) << 18; + r[7] |= pr->igr_normal_port ? BIT(17) : 0; + r[7] |= ((u32) (pr->tid & 0x1)) << 16; + + r[14] |= pr->mgnt_vlan_m ? BIT(15) : 0; + if (pr->phase == PHASE_IACL) + r[14] |= pr->dmac_hit_sw_m ? BIT(14) : 0; + else + r[14] |= pr->content_too_deep_m ? BIT(14) : 0; + r[14] |= pr->not_first_frag_m ? BIT(13) : 0; + r[14] |= ((u32) (pr->frame_type_l4_m & 0x7)) << 10; + r[14] |= ((u32) (pr->frame_type_m & 0x3)) << 8; + r[14] |= pr->otag_fmt_m ? BIT(7) : 0; + r[14] |= pr->itag_fmt_m ? BIT(6) : 0; + r[14] |= pr->otag_exist_m ? BIT(5) : 0; + r[14] |= pr->itag_exist_m ? BIT(4) : 0; + r[14] |= ((u32) (pr->frame_type_l2_m & 0x3)) << 2; + r[14] |= pr->igr_normal_port_m ? BIT(1) : 0; + r[14] |= (u32) (pr->tid_m & 0x1); + + r[15] |= pr->valid ? BIT(31) : 0; + r[15] |= pr->cond_not ? BIT(30) : 0; + r[15] |= pr->cond_and1 ? BIT(29) : 0; + r[15] |= pr->cond_and2 ? BIT(28) : 0; +} + +static void rtl931x_write_pie_action(u32 r[], struct pie_rule *pr) +{ + // Either drop or forward + if (pr->drop) { + r[15] |= BIT(11) | BIT(12) | BIT(13); // Do Green, Yellow and Red drops + // Actually DROP, not PERMIT in Green / Yellow / Red + r[16] |= BIT(27) | BIT(28) | BIT(29); + } else { + r[15] |= pr->fwd_sel ? BIT(14) : 0; + r[16] |= pr->fwd_act << 24; + r[16] |= BIT(21); // We overwrite any drop + } + if (pr->phase == PHASE_VACL) + r[16] |= pr->fwd_sa_lrn ? BIT(22) : 0; + r[15] |= pr->bypass_sel ? BIT(10) : 0; + r[15] |= pr->nopri_sel ? BIT(21) : 0; + r[15] |= pr->tagst_sel ? BIT(20) : 0; + r[15] |= pr->ovid_sel ? BIT(18) : 0; + r[15] |= pr->ivid_sel ? BIT(16) : 0; + r[15] |= pr->meter_sel ? BIT(27) : 0; + r[15] |= pr->mir_sel ? BIT(15) : 0; + r[15] |= pr->log_sel ? BIT(26) : 0; + + r[16] |= ((u32)(pr->fwd_data & 0xfff)) << 9; +// r[15] |= pr->log_octets ? BIT(31) : 0; + r[15] |= (u32)(pr->meter_data) >> 2; + r[16] |= (((u32)(pr->meter_data) >> 7) & 0x3) << 29; + + r[16] |= ((u32)(pr->ivid_act & 0x3)) << 21; + r[15] |= ((u32)(pr->ivid_data & 0xfff)) << 9; + r[16] |= ((u32)(pr->ovid_act & 0x3)) << 30; + r[16] |= ((u32)(pr->ovid_data & 0xfff)) << 16; + r[16] |= ((u32)(pr->mir_data & 0x3)) << 6; + r[17] |= ((u32)(pr->tagst_data & 0xf)) << 28; + r[17] |= ((u32)(pr->nopri_data & 0x7)) << 25; + r[17] |= pr->bypass_ibc_sc ? BIT(16) : 0; +} + +void rtl931x_pie_rule_dump_raw(u32 r[]) +{ + pr_info("Raw IACL table entry:\n"); + pr_info("r 0 - 7: %08x %08x %08x %08x %08x %08x %08x %08x\n", + r[0], r[1], r[2], r[3], r[4], r[5], r[6], r[7]); + pr_info("r 8 - 15: %08x %08x %08x %08x %08x %08x %08x %08x\n", + r[8], r[9], r[10], r[11], r[12], r[13], r[14], r[15]); + pr_info("r 16 - 18: %08x %08x %08x\n", r[16], r[17], r[18]); + pr_info("Match : %08x %08x %08x %08x %08x %08x\n", r[0], r[1], r[2], r[3], r[4], r[5]); + pr_info("Fixed : %06x\n", r[6] >> 8); + pr_info("Match M: %08x %08x %08x %08x %08x %08x\n", + (r[6] << 24) | (r[7] >> 8), (r[7] << 24) | (r[8] >> 8), (r[8] << 24) | (r[9] >> 8), + (r[9] << 24) | (r[10] >> 8), (r[10] << 24) | (r[11] >> 8), + (r[11] << 24) | (r[12] >> 8)); + pr_info("R[13]: %08x\n", r[13]); + pr_info("Fixed M: %06x\n", ((r[12] << 16) | (r[13] >> 16)) & 0xffffff); + pr_info("Valid / not / and1 / and2 : %1x\n", (r[13] >> 12) & 0xf); + pr_info("r 13-16: %08x %08x %08x %08x\n", r[13], r[14], r[15], r[16]); +} + +static int rtl931x_pie_rule_write(struct rtl838x_switch_priv *priv, int idx, struct pie_rule *pr) +{ + // Access IACL table (0) via register 1, the table size is 4096 + struct table_reg *q = rtl_table_get(RTL9310_TBL_1, 0); + u32 r[22]; + int i; + int block = idx / PIE_BLOCK_SIZE; + u32 t_select = sw_r32(RTL931X_PIE_BLK_TMPLTE_CTRL(block)); + + pr_info("%s: %d, t_select: %08x\n", __func__, idx, t_select); + + for (i = 0; i < 22; i++) + r[i] = 0; + + if (!pr->valid) { + rtl_table_write(q, idx); + rtl_table_release(q); + return 0; + } + rtl931x_write_pie_fixed_fields(r, pr); + + pr_info("%s: template %d\n", __func__, (t_select >> (pr->tid * 4)) & 0xf); + rtl931x_write_pie_templated(r, pr, fixed_templates[(t_select >> (pr->tid * 4)) & 0xf]); + + rtl931x_write_pie_action(r, pr); + + rtl931x_pie_rule_dump_raw(r); + + for (i = 0; i < 22; i++) + sw_w32(r[i], rtl_table_data(q, i)); + + rtl_table_write(q, idx); + rtl_table_release(q); + + return 0; +} + +static bool rtl931x_pie_templ_has(int t, enum template_field_id field_type) +{ + int i; + enum template_field_id ft; + + for (i = 0; i < N_FIXED_FIELDS_RTL931X; i++) { + ft = fixed_templates[t][i]; + if (field_type == ft) + return true; + } + + return false; +} + +/* + * Verify that the rule pr is compatible with a given template t in block block + * Note that this function is SoC specific since the values of e.g. TEMPLATE_FIELD_SIP0 + * depend on the SoC + */ +static int rtl931x_pie_verify_template(struct rtl838x_switch_priv *priv, + struct pie_rule *pr, int t, int block) +{ + int i; + + if (!pr->is_ipv6 && pr->sip_m && !rtl931x_pie_templ_has(t, TEMPLATE_FIELD_SIP0)) + return -1; + + if (!pr->is_ipv6 && pr->dip_m && !rtl931x_pie_templ_has(t, TEMPLATE_FIELD_DIP0)) + return -1; + + if (pr->is_ipv6) { + if ((pr->sip6_m.s6_addr32[0] || pr->sip6_m.s6_addr32[1] + || pr->sip6_m.s6_addr32[2] || pr->sip6_m.s6_addr32[3]) + && !rtl931x_pie_templ_has(t, TEMPLATE_FIELD_SIP2)) + return -1; + if ((pr->dip6_m.s6_addr32[0] || pr->dip6_m.s6_addr32[1] + || pr->dip6_m.s6_addr32[2] || pr->dip6_m.s6_addr32[3]) + && !rtl931x_pie_templ_has(t, TEMPLATE_FIELD_DIP2)) + return -1; + } + + if (ether_addr_to_u64(pr->smac) && !rtl931x_pie_templ_has(t, TEMPLATE_FIELD_SMAC0)) + return -1; + + if (ether_addr_to_u64(pr->dmac) && !rtl931x_pie_templ_has(t, TEMPLATE_FIELD_DMAC0)) + return -1; + + // TODO: Check more + + i = find_first_zero_bit(&priv->pie_use_bm[block * 4], PIE_BLOCK_SIZE); + + if (i >= PIE_BLOCK_SIZE) + return -1; + + return i + PIE_BLOCK_SIZE * block; +} + +static int rtl931x_pie_rule_add(struct rtl838x_switch_priv *priv, struct pie_rule *pr) +{ + int idx, block, j, t; + int min_block = 0; + int max_block = priv->n_pie_blocks / 2; + + if (pr->is_egress) { + min_block = max_block; + max_block = priv->n_pie_blocks; + } + pr_info("In %s\n", __func__); + + mutex_lock(&priv->pie_mutex); + + for (block = min_block; block < max_block; block++) { + for (j = 0; j < 2; j++) { + t = (sw_r32(RTL931X_PIE_BLK_TMPLTE_CTRL(block)) >> (j * 4)) & 0xf; + pr_info("Testing block %d, template %d, template id %d\n", block, j, t); + pr_info("%s: %08x\n", + __func__, sw_r32(RTL931X_PIE_BLK_TMPLTE_CTRL(block))); + idx = rtl931x_pie_verify_template(priv, pr, t, block); + if (idx >= 0) + break; + } + if (j < 2) + break; + } + + if (block >= priv->n_pie_blocks) { + mutex_unlock(&priv->pie_mutex); + return -EOPNOTSUPP; + } + + pr_info("Using block: %d, index %d, template-id %d\n", block, idx, j); + set_bit(idx, priv->pie_use_bm); + + pr->valid = true; + pr->tid = j; // Mapped to template number + pr->tid_m = 0x1; + pr->id = idx; + + rtl931x_pie_lookup_enable(priv, idx); + rtl931x_pie_rule_write(priv, idx, pr); + + mutex_unlock(&priv->pie_mutex); + return 0; +} + +/* + * Delete a range of Packet Inspection Engine rules + */ +static int rtl931x_pie_rule_del(struct rtl838x_switch_priv *priv, int index_from, int index_to) +{ + u32 v = (index_from << 1)| (index_to << 13 ) | BIT(0); + + pr_info("%s: from %d to %d\n", __func__, index_from, index_to); + mutex_lock(&priv->reg_mutex); + + // Write from-to and execute bit into control register + sw_w32(v, RTL931X_PIE_CLR_CTRL); + + // Wait until command has completed + do { + } while (sw_r32(RTL931X_PIE_CLR_CTRL) & BIT(0)); + + mutex_unlock(&priv->reg_mutex); + return 0; +} + +static void rtl931x_pie_rule_rm(struct rtl838x_switch_priv *priv, struct pie_rule *pr) +{ + int idx = pr->id; + + rtl931x_pie_rule_del(priv, idx, idx); + clear_bit(idx, priv->pie_use_bm); +} + +static void rtl931x_pie_init(struct rtl838x_switch_priv *priv) +{ + int i; + u32 template_selectors; + + mutex_init(&priv->pie_mutex); + + pr_info("%s\n", __func__); + // Enable ACL lookup on all ports, including CPU_PORT + for (i = 0; i <= priv->cpu_port; i++) + sw_w32(1, RTL931X_ACL_PORT_LOOKUP_CTRL(i)); + + // Include IPG in metering + sw_w32_mask(0, 1, RTL931X_METER_GLB_CTRL); + + // Delete all present rules, block size is 128 on all SoC families + rtl931x_pie_rule_del(priv, 0, priv->n_pie_blocks * 128 - 1); + + // Assign first half blocks 0-7 to VACL phase, second half to IACL + // 3 bits are used for each block, values for PIE blocks are + // 6: Disabled, 0: VACL, 1: IACL, 2: EACL + // And for OpenFlow Flow blocks: 3: Ingress Flow table 0, + // 4: Ingress Flow Table 3, 5: Egress flow table 0 + for (i = 0; i < priv->n_pie_blocks; i++) { + int pos = (i % 10) * 3; + u32 r = RTL931X_PIE_BLK_PHASE_CTRL + 4 * (i / 10); + + if (i < priv->n_pie_blocks / 2) + sw_w32_mask(0x7 << pos, 0, r); + else + sw_w32_mask(0x7 << pos, 1 << pos, r); + } + + // Enable predefined templates 0, 1 for first quarter of all blocks + template_selectors = 0 | (1 << 4); + for (i = 0; i < priv->n_pie_blocks / 4; i++) + sw_w32(template_selectors, RTL931X_PIE_BLK_TMPLTE_CTRL(i)); + + // Enable predefined templates 2, 3 for second quarter of all blocks + template_selectors = 2 | (3 << 4); + for (i = priv->n_pie_blocks / 4; i < priv->n_pie_blocks / 2; i++) + sw_w32(template_selectors, RTL931X_PIE_BLK_TMPLTE_CTRL(i)); + + // Enable predefined templates 0, 1 for third quater of all blocks + template_selectors = 0 | (1 << 4); + for (i = priv->n_pie_blocks / 2; i < priv->n_pie_blocks * 3 / 4; i++) + sw_w32(template_selectors, RTL931X_PIE_BLK_TMPLTE_CTRL(i)); + + // Enable predefined templates 2, 3 for fourth quater of all blocks + template_selectors = 2 | (3 << 4); + for (i = priv->n_pie_blocks * 3 / 4; i < priv->n_pie_blocks; i++) + sw_w32(template_selectors, RTL931X_PIE_BLK_TMPLTE_CTRL(i)); + +} + +int rtl931x_l3_setup(struct rtl838x_switch_priv *priv) +{ + return 0; +} + +void rtl931x_vlan_port_keep_tag_set(int port, bool keep_outer, bool keep_inner) +{ + sw_w32(FIELD_PREP(RTL931X_VLAN_PORT_TAG_EGR_OTAG_STS_MASK, + keep_outer ? RTL931X_VLAN_PORT_TAG_STS_TAGGED : RTL931X_VLAN_PORT_TAG_STS_UNTAG) | + FIELD_PREP(RTL931X_VLAN_PORT_TAG_EGR_ITAG_STS_MASK, + keep_inner ? RTL931X_VLAN_PORT_TAG_STS_TAGGED : RTL931X_VLAN_PORT_TAG_STS_UNTAG), + RTL931X_VLAN_PORT_TAG_CTRL(port)); +} + +void rtl931x_vlan_port_pvidmode_set(int port, enum pbvlan_type type, enum pbvlan_mode mode) +{ + if (type == PBVLAN_TYPE_INNER) + sw_w32_mask(0x3 << 12, mode << 12, RTL931X_VLAN_PORT_IGR_CTRL + (port << 2)); + else + sw_w32_mask(0x3 << 26, mode << 26, RTL931X_VLAN_PORT_IGR_CTRL + (port << 2)); +} + +void rtl931x_vlan_port_pvid_set(int port, enum pbvlan_type type, int pvid) +{ + if (type == PBVLAN_TYPE_INNER) + sw_w32_mask(0xfff, pvid, RTL931X_VLAN_PORT_IGR_CTRL + (port << 2)); + else + sw_w32_mask(0xfff << 14, pvid << 14, RTL931X_VLAN_PORT_IGR_CTRL + (port << 2)); +} + +static void rtl931x_set_igr_filter(int port, enum igr_filter state) +{ + sw_w32_mask(0x3 << ((port & 0xf)<<1), state << ((port & 0xf)<<1), + RTL931X_VLAN_PORT_IGR_FLTR + (((port >> 4) << 2))); +} + +static void rtl931x_set_egr_filter(int port, enum egr_filter state) +{ + sw_w32_mask(0x1 << (port % 0x20), state << (port % 0x20), + RTL931X_VLAN_PORT_EGR_FLTR + (((port >> 5) << 2))); +} + +void rtl931x_set_distribution_algorithm(int group, int algoidx, u32 algomsk) +{ + u32 l3shift = 0; + u32 newmask = 0; + + /* TODO: for now we set algoidx to 0 */ + algoidx=0; + + if (algomsk & TRUNK_DISTRIBUTION_ALGO_SIP_BIT) { + l3shift = 4; + newmask |= TRUNK_DISTRIBUTION_ALGO_L3_SIP_BIT; + } + if (algomsk & TRUNK_DISTRIBUTION_ALGO_DIP_BIT) { + l3shift = 4; + newmask |= TRUNK_DISTRIBUTION_ALGO_L3_DIP_BIT; + } + if (algomsk & TRUNK_DISTRIBUTION_ALGO_SRC_L4PORT_BIT) { + l3shift = 4; + newmask |= TRUNK_DISTRIBUTION_ALGO_L3_SRC_L4PORT_BIT; + } + if (algomsk & TRUNK_DISTRIBUTION_ALGO_SRC_L4PORT_BIT) { + l3shift = 4; + newmask |= TRUNK_DISTRIBUTION_ALGO_L3_SRC_L4PORT_BIT; + } + + if (l3shift == 4) { + if (algomsk & TRUNK_DISTRIBUTION_ALGO_SMAC_BIT) + newmask |= TRUNK_DISTRIBUTION_ALGO_L3_SMAC_BIT; + if (algomsk & TRUNK_DISTRIBUTION_ALGO_DMAC_BIT) + newmask |= TRUNK_DISTRIBUTION_ALGO_L3_DMAC_BIT; + } else { + if (algomsk & TRUNK_DISTRIBUTION_ALGO_SMAC_BIT) + newmask |= TRUNK_DISTRIBUTION_ALGO_L2_SMAC_BIT; + if (algomsk & TRUNK_DISTRIBUTION_ALGO_DMAC_BIT) + newmask |= TRUNK_DISTRIBUTION_ALGO_L2_DMAC_BIT; + } + + sw_w32(newmask << l3shift, RTL931X_TRK_HASH_CTRL + (algoidx << 2)); +} + +static void rtl931x_led_init(struct rtl838x_switch_priv *priv) +{ + int i, pos; + u32 v, set; + u64 pm_copper = 0, pm_fiber = 0; + u32 setlen; + const __be32 *led_set; + char set_name[9]; + struct device_node *node; + + pr_info("%s called\n", __func__); + node = of_find_compatible_node(NULL, NULL, "realtek,rtl9300-leds"); + if (!node) { + pr_info("%s No compatible LED node found\n", __func__); + return; + } + + for (i= 0; i < priv->cpu_port; i++) { + pos = (i << 1) % 32; + sw_w32_mask(0x3 << pos, 0, RTL931X_LED_PORT_FIB_SET_SEL_CTRL(i)); + sw_w32_mask(0x3 << pos, 0, RTL931X_LED_PORT_COPR_SET_SEL_CTRL(i)); + + if (!priv->ports[i].phy) + continue; + + v = 0x1; // Found on the EdgeCore, but we do not have any HW description + sw_w32_mask(0x3 << pos, v << pos, RTL931X_LED_PORT_NUM_CTRL(i)); + + if (priv->ports[i].phy_is_integrated) + pm_fiber |= BIT_ULL(i); + else + pm_copper |= BIT_ULL(i); + + set = priv->ports[i].led_set; + sw_w32_mask(0, set << pos, RTL931X_LED_PORT_COPR_SET_SEL_CTRL(i)); + sw_w32_mask(0, set << pos, RTL931X_LED_PORT_FIB_SET_SEL_CTRL(i)); + } + + for (i = 0; i < 4; i++) { + sprintf(set_name, "led_set%d", i); + pr_info(">%s<\n", set_name); + led_set = of_get_property(node, set_name, &setlen); + if (!led_set || setlen != 16) + break; + v = be32_to_cpup(led_set) << 16 | be32_to_cpup(led_set + 1); + sw_w32(v, RTL931X_LED_SET0_0_CTRL - 4 - i * 8); + v = be32_to_cpup(led_set + 2) << 16 | be32_to_cpup(led_set + 3); + sw_w32(v, RTL931X_LED_SET0_0_CTRL - i * 8); + } + + // Set LED mode to serial (0x1) + sw_w32_mask(0x3, 0x1, RTL931X_LED_GLB_CTRL); + + rtl839x_set_port_reg_le(pm_copper, RTL931X_LED_PORT_COPR_MASK_CTRL); + rtl839x_set_port_reg_le(pm_fiber, RTL931X_LED_PORT_FIB_MASK_CTRL); + rtl839x_set_port_reg_le(pm_copper | pm_fiber, RTL931X_LED_PORT_COMBO_MASK_CTRL); + + for (i = 0; i < 32; i++) + pr_info("%s %08x: %08x\n",__func__, 0xbb000600 + i * 4, sw_r32(0x0600 + i * 4)); + +} + +const struct rtl838x_reg rtl931x_reg = { + .mask_port_reg_be = rtl839x_mask_port_reg_be, + .set_port_reg_be = rtl839x_set_port_reg_be, + .get_port_reg_be = rtl839x_get_port_reg_be, + .mask_port_reg_le = rtl839x_mask_port_reg_le, + .set_port_reg_le = rtl839x_set_port_reg_le, + .get_port_reg_le = rtl839x_get_port_reg_le, + .stat_port_rst = RTL931X_STAT_PORT_RST, + .stat_rst = RTL931X_STAT_RST, + .stat_port_std_mib = 0, // Not defined + .traffic_enable = rtl931x_traffic_enable, + .traffic_disable = rtl931x_traffic_disable, + .traffic_get = rtl931x_traffic_get, + .traffic_set = rtl931x_traffic_set, + .l2_ctrl_0 = RTL931X_L2_CTRL, + .l2_ctrl_1 = RTL931X_L2_AGE_CTRL, + .l2_port_aging_out = RTL931X_L2_PORT_AGE_CTRL, + .set_ageing_time = rtl931x_set_ageing_time, + // .smi_poll_ctrl does not exist + .l2_tbl_flush_ctrl = RTL931X_L2_TBL_FLUSH_CTRL, + .exec_tbl0_cmd = rtl931x_exec_tbl0_cmd, + .exec_tbl1_cmd = rtl931x_exec_tbl1_cmd, + .tbl_access_data_0 = rtl931x_tbl_access_data_0, + .isr_glb_src = RTL931X_ISR_GLB_SRC, + .isr_port_link_sts_chg = RTL931X_ISR_PORT_LINK_STS_CHG, + .imr_port_link_sts_chg = RTL931X_IMR_PORT_LINK_STS_CHG, + // imr_glb does not exist on RTL931X + .vlan_tables_read = rtl931x_vlan_tables_read, + .vlan_set_tagged = rtl931x_vlan_set_tagged, + .vlan_set_untagged = rtl931x_vlan_set_untagged, + .vlan_profile_dump = rtl931x_vlan_profile_dump, + .vlan_profile_setup = rtl931x_vlan_profile_setup, + .vlan_fwd_on_inner = rtl931x_vlan_fwd_on_inner, + .stp_get = rtl931x_stp_get, + .stp_set = rtl931x_stp_set, + .mac_force_mode_ctrl = rtl931x_mac_force_mode_ctrl, + .mac_port_ctrl = rtl931x_mac_port_ctrl, + .l2_port_new_salrn = rtl931x_l2_port_new_salrn, + .l2_port_new_sa_fwd = rtl931x_l2_port_new_sa_fwd, + .mir_ctrl = RTL931X_MIR_CTRL, + .mir_dpm = RTL931X_MIR_DPM_CTRL, + .mir_spm = RTL931X_MIR_SPM_CTRL, + .mac_link_sts = RTL931X_MAC_LINK_STS, + .mac_link_dup_sts = RTL931X_MAC_LINK_DUP_STS, + .mac_link_spd_sts = rtl931x_mac_link_spd_sts, + .mac_rx_pause_sts = RTL931X_MAC_RX_PAUSE_STS, + .mac_tx_pause_sts = RTL931X_MAC_TX_PAUSE_STS, + .read_l2_entry_using_hash = rtl931x_read_l2_entry_using_hash, + .write_l2_entry_using_hash = rtl931x_write_l2_entry_using_hash, + .read_cam = rtl931x_read_cam, + .write_cam = rtl931x_write_cam, + .vlan_port_keep_tag_set = rtl931x_vlan_port_keep_tag_set, + .vlan_port_pvidmode_set = rtl931x_vlan_port_pvidmode_set, + .vlan_port_pvid_set = rtl931x_vlan_port_pvid_set, + .trk_mbr_ctr = rtl931x_trk_mbr_ctr, + .set_vlan_igr_filter = rtl931x_set_igr_filter, + .set_vlan_egr_filter = rtl931x_set_egr_filter, + .set_distribution_algorithm = rtl931x_set_distribution_algorithm, + .l2_hash_key = rtl931x_l2_hash_key, + .read_mcast_pmask = rtl931x_read_mcast_pmask, + .write_mcast_pmask = rtl931x_write_mcast_pmask, + .pie_init = rtl931x_pie_init, + .pie_rule_write = rtl931x_pie_rule_write, + .pie_rule_add = rtl931x_pie_rule_add, + .pie_rule_rm = rtl931x_pie_rule_rm, + .l2_learning_setup = rtl931x_l2_learning_setup, + .l3_setup = rtl931x_l3_setup, + .led_init = rtl931x_led_init, +}; + diff --git a/target/linux/realtek/files-5.15/drivers/net/dsa/rtl83xx/tc.c b/target/linux/realtek/files-5.15/drivers/net/dsa/rtl83xx/tc.c new file mode 100644 index 0000000000..d0a8ee8cfe --- /dev/null +++ b/target/linux/realtek/files-5.15/drivers/net/dsa/rtl83xx/tc.c @@ -0,0 +1,409 @@ +// SPDX-License-Identifier: GPL-2.0-only + +#include +#include +#include +#include +#include + +#include +#include "rtl83xx.h" +#include "rtl838x.h" + +/* + * Parse the flow rule for the matching conditions + */ +static int rtl83xx_parse_flow_rule(struct rtl838x_switch_priv *priv, + struct flow_rule *rule, struct rtl83xx_flow *flow) +{ + struct flow_dissector *dissector = rule->match.dissector; + + pr_debug("In %s\n", __func__); + /* KEY_CONTROL and KEY_BASIC are needed for forming a meaningful key */ + if ((dissector->used_keys & BIT(FLOW_DISSECTOR_KEY_CONTROL)) == 0 || + (dissector->used_keys & BIT(FLOW_DISSECTOR_KEY_BASIC)) == 0) { + pr_err("Cannot form TC key: used_keys = 0x%x\n", dissector->used_keys); + return -EOPNOTSUPP; + } + + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) { + struct flow_match_basic match; + + pr_debug("%s: BASIC\n", __func__); + flow_rule_match_basic(rule, &match); + if (match.key->n_proto == htons(ETH_P_ARP)) + flow->rule.frame_type = 0; + if (match.key->n_proto == htons(ETH_P_IP)) + flow->rule.frame_type = 2; + if (match.key->n_proto == htons(ETH_P_IPV6)) + flow->rule.frame_type = 3; + if ((match.key->n_proto == htons(ETH_P_ARP)) || flow->rule.frame_type) + flow->rule.frame_type_m = 3; + if (flow->rule.frame_type >= 2) { + if (match.key->ip_proto == IPPROTO_UDP) + flow->rule.frame_type_l4 = 0; + if (match.key->ip_proto == IPPROTO_TCP) + flow->rule.frame_type_l4 = 1; + if (match.key->ip_proto == IPPROTO_ICMP + || match.key->ip_proto ==IPPROTO_ICMPV6) + flow->rule.frame_type_l4 = 2; + if (match.key->ip_proto == IPPROTO_TCP) + flow->rule.frame_type_l4 = 3; + if ((match.key->ip_proto == IPPROTO_UDP) || flow->rule.frame_type_l4) + flow->rule.frame_type_l4_m = 7; + } + } + + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) { + struct flow_match_eth_addrs match; + + pr_debug("%s: ETH_ADDR\n", __func__); + flow_rule_match_eth_addrs(rule, &match); + ether_addr_copy(flow->rule.dmac, match.key->dst); + ether_addr_copy(flow->rule.dmac_m, match.mask->dst); + ether_addr_copy(flow->rule.smac, match.key->src); + ether_addr_copy(flow->rule.smac_m, match.mask->src); + } + + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) { + struct flow_match_vlan match; + + pr_debug("%s: VLAN\n", __func__); + flow_rule_match_vlan(rule, &match); + flow->rule.itag = match.key->vlan_id; + flow->rule.itag_m = match.mask->vlan_id; + // TODO: What about match.key->vlan_priority ? + } + + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS)) { + struct flow_match_ipv4_addrs match; + + pr_debug("%s: IPV4\n", __func__); + flow_rule_match_ipv4_addrs(rule, &match); + flow->rule.is_ipv6 = false; + flow->rule.dip = match.key->dst; + flow->rule.dip_m = match.mask->dst; + flow->rule.sip = match.key->src; + flow->rule.sip_m = match.mask->src; + } else if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV6_ADDRS)) { + struct flow_match_ipv6_addrs match; + + pr_debug("%s: IPV6\n", __func__); + flow->rule.is_ipv6 = true; + flow_rule_match_ipv6_addrs(rule, &match); + flow->rule.dip6 = match.key->dst; + flow->rule.dip6_m = match.mask->dst; + flow->rule.sip6 = match.key->src; + flow->rule.sip6_m = match.mask->src; + } + + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) { + struct flow_match_ports match; + + pr_debug("%s: PORTS\n", __func__); + flow_rule_match_ports(rule, &match); + flow->rule.dport = match.key->dst; + flow->rule.dport_m = match.mask->dst; + flow->rule.sport = match.key->src; + flow->rule.sport_m = match.mask->src; + } + + // TODO: ICMP + return 0; +} + +static void rtl83xx_flow_bypass_all(struct rtl83xx_flow *flow) +{ + flow->rule.bypass_sel = true; + flow->rule.bypass_all = true; + flow->rule.bypass_igr_stp = true; + flow->rule.bypass_ibc_sc = true; +} + +static int rtl83xx_parse_fwd(struct rtl838x_switch_priv *priv, + const struct flow_action_entry *act, struct rtl83xx_flow *flow) +{ + struct net_device *dev = act->dev; + int port; + + port = rtl83xx_port_is_under(dev, priv); + if (port < 0) { + netdev_info(dev, "%s: not a DSA device.\n", __func__); + return -EINVAL; + } + + flow->rule.fwd_sel = true; + flow->rule.fwd_data = port; + pr_debug("Using port index: %d\n", port); + rtl83xx_flow_bypass_all(flow); + + return 0; +} + +static int rtl83xx_add_flow(struct rtl838x_switch_priv *priv, struct flow_cls_offload *f, + struct rtl83xx_flow *flow) +{ + struct flow_rule *rule = flow_cls_offload_flow_rule(f); + const struct flow_action_entry *act; + int i, err; + + pr_debug("%s\n", __func__); + + rtl83xx_parse_flow_rule(priv, rule, flow); + + flow_action_for_each(i, act, &rule->action) { + switch (act->id) { + case FLOW_ACTION_DROP: + pr_debug("%s: DROP\n", __func__); + flow->rule.drop = true; + rtl83xx_flow_bypass_all(flow); + return 0; + + case FLOW_ACTION_TRAP: + pr_debug("%s: TRAP\n", __func__); + flow->rule.fwd_data = priv->cpu_port; + flow->rule.fwd_act = PIE_ACT_REDIRECT_TO_PORT; + rtl83xx_flow_bypass_all(flow); + break; + + case FLOW_ACTION_MANGLE: + pr_err("%s: FLOW_ACTION_MANGLE not supported\n", __func__); + return -EOPNOTSUPP; + + case FLOW_ACTION_ADD: + pr_err("%s: FLOW_ACTION_ADD not supported\n", __func__); + return -EOPNOTSUPP; + + case FLOW_ACTION_VLAN_PUSH: + pr_debug("%s: VLAN_PUSH\n", __func__); +// TODO: act->vlan.proto + flow->rule.ivid_act = PIE_ACT_VID_ASSIGN; + flow->rule.ivid_sel = true; + flow->rule.ivid_data = htons(act->vlan.vid); + flow->rule.ovid_act = PIE_ACT_VID_ASSIGN; + flow->rule.ovid_sel = true; + flow->rule.ovid_data = htons(act->vlan.vid); + flow->rule.fwd_mod_to_cpu = true; + break; + + case FLOW_ACTION_VLAN_POP: + pr_debug("%s: VLAN_POP\n", __func__); + flow->rule.ivid_act = PIE_ACT_VID_ASSIGN; + flow->rule.ivid_data = 0; + flow->rule.ivid_sel = true; + flow->rule.ovid_act = PIE_ACT_VID_ASSIGN; + flow->rule.ovid_data = 0; + flow->rule.ovid_sel = true; + flow->rule.fwd_mod_to_cpu = true; + break; + + case FLOW_ACTION_CSUM: + pr_err("%s: FLOW_ACTION_CSUM not supported\n", __func__); + return -EOPNOTSUPP; + + case FLOW_ACTION_REDIRECT: + pr_debug("%s: REDIRECT\n", __func__); + err = rtl83xx_parse_fwd(priv, act, flow); + if (err) + return err; + flow->rule.fwd_act = PIE_ACT_REDIRECT_TO_PORT; + break; + + case FLOW_ACTION_MIRRED: + pr_debug("%s: MIRRED\n", __func__); + err = rtl83xx_parse_fwd(priv, act, flow); + if (err) + return err; + flow->rule.fwd_act = PIE_ACT_COPY_TO_PORT; + break; + + default: + pr_err("%s: Flow action not supported: %d\n", __func__, act->id); + return -EOPNOTSUPP; + } + } + + return 0; +} + +static const struct rhashtable_params tc_ht_params = { + .head_offset = offsetof(struct rtl83xx_flow, node), + .key_offset = offsetof(struct rtl83xx_flow, cookie), + .key_len = sizeof(((struct rtl83xx_flow *)0)->cookie), + .automatic_shrinking = true, +}; + +static int rtl83xx_configure_flower(struct rtl838x_switch_priv *priv, + struct flow_cls_offload *f) +{ + struct rtl83xx_flow *flow; + int err = 0; + + pr_debug("In %s\n", __func__); + + rcu_read_lock(); + pr_debug("Cookie %08lx\n", f->cookie); + flow = rhashtable_lookup(&priv->tc_ht, &f->cookie, tc_ht_params); + if (flow) { + pr_info("%s: Got flow\n", __func__); + err = -EEXIST; + goto rcu_unlock; + } + +rcu_unlock: + rcu_read_unlock(); + if (flow) + goto out; + pr_debug("%s: New flow\n", __func__); + + flow = kzalloc(sizeof(*flow), GFP_KERNEL); + if (!flow) { + err = -ENOMEM; + goto out; + } + + flow->cookie = f->cookie; + flow->priv = priv; + + err = rhashtable_insert_fast(&priv->tc_ht, &flow->node, tc_ht_params); + if (err) { + pr_err("Could not insert add new rule\n"); + goto out_free; + } + + rtl83xx_add_flow(priv, f, flow); // TODO: check error + + // Add log action to flow + flow->rule.packet_cntr = rtl83xx_packet_cntr_alloc(priv); + if (flow->rule.packet_cntr >= 0) { + pr_debug("Using packet counter %d\n", flow->rule.packet_cntr); + flow->rule.log_sel = true; + flow->rule.log_data = flow->rule.packet_cntr; + } + + err = priv->r->pie_rule_add(priv, &flow->rule); + return err; + +out_free: + kfree(flow); +out: + pr_err("%s: error %d\n", __func__, err); + return err; +} + +static int rtl83xx_delete_flower(struct rtl838x_switch_priv *priv, + struct flow_cls_offload * cls_flower) +{ + struct rtl83xx_flow *flow; + + pr_debug("In %s\n", __func__); + rcu_read_lock(); + flow = rhashtable_lookup_fast(&priv->tc_ht, &cls_flower->cookie, tc_ht_params); + if (!flow) { + rcu_read_unlock(); + return -EINVAL; + } + + priv->r->pie_rule_rm(priv, &flow->rule); + + rhashtable_remove_fast(&priv->tc_ht, &flow->node, tc_ht_params); + + kfree_rcu(flow, rcu_head); + + rcu_read_unlock(); + return 0; +} + +static int rtl83xx_stats_flower(struct rtl838x_switch_priv *priv, + struct flow_cls_offload * cls_flower) +{ + struct rtl83xx_flow *flow; + unsigned long lastused = 0; + int total_packets, new_packets; + + pr_debug("%s: \n", __func__); + flow = rhashtable_lookup_fast(&priv->tc_ht, &cls_flower->cookie, tc_ht_params); + if (!flow) + return -1; + + if (flow->rule.packet_cntr >= 0) { + total_packets = priv->r->packet_cntr_read(flow->rule.packet_cntr); + pr_debug("Total packets: %d\n", total_packets); + new_packets = total_packets - flow->rule.last_packet_cnt; + flow->rule.last_packet_cnt = total_packets; + } + + // TODO: We need a second PIE rule to count the bytes + flow_stats_update(&cls_flower->stats, 100 * new_packets, new_packets, 0, lastused, + FLOW_ACTION_HW_STATS_IMMEDIATE); + return 0; +} + +static int rtl83xx_setup_tc_cls_flower(struct rtl838x_switch_priv *priv, + struct flow_cls_offload *cls_flower) +{ + pr_debug("%s: %d\n", __func__, cls_flower->command); + switch (cls_flower->command) { + case FLOW_CLS_REPLACE: + return rtl83xx_configure_flower(priv, cls_flower); + case FLOW_CLS_DESTROY: + return rtl83xx_delete_flower(priv, cls_flower); + case FLOW_CLS_STATS: + return rtl83xx_stats_flower(priv, cls_flower); + default: + return -EOPNOTSUPP; + } +} + + +static int rtl83xx_setup_tc_block_cb(enum tc_setup_type type, void *type_data, + void *cb_priv) +{ + struct rtl838x_switch_priv *priv = cb_priv; + + switch (type) { + case TC_SETUP_CLSFLOWER: + pr_debug("%s: TC_SETUP_CLSFLOWER\n", __func__); + return rtl83xx_setup_tc_cls_flower(priv, type_data); + default: + return -EOPNOTSUPP; + } +} + +static LIST_HEAD(rtl83xx_block_cb_list); + +int rtl83xx_setup_tc(struct net_device *dev, enum tc_setup_type type, void *type_data) +{ + struct rtl838x_switch_priv *priv; + struct flow_block_offload *f = type_data; + static bool first_time = true; + int err; + + pr_debug("%s: %d\n", __func__, type); + + if(!netdev_uses_dsa(dev)) { + pr_err("%s: no DSA\n", __func__); + return 0; + } + priv = dev->dsa_ptr->ds->priv; + + switch (type) { + case TC_SETUP_BLOCK: + if (first_time) { + first_time = false; + err = rhashtable_init(&priv->tc_ht, &tc_ht_params); + if (err) + pr_err("%s: Could not initialize hash table\n", __func__); + } + + f->unlocked_driver_cb = true; + return flow_block_cb_setup_simple(type_data, + &rtl83xx_block_cb_list, + rtl83xx_setup_tc_block_cb, + priv, priv, true); + default: + return -EOPNOTSUPP; + } + + return 0; +} diff --git a/target/linux/realtek/files-5.15/drivers/net/ethernet/rtl838x_eth.c b/target/linux/realtek/files-5.15/drivers/net/ethernet/rtl838x_eth.c new file mode 100644 index 0000000000..166e49e139 --- /dev/null +++ b/target/linux/realtek/files-5.15/drivers/net/ethernet/rtl838x_eth.c @@ -0,0 +1,2588 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * linux/drivers/net/ethernet/rtl838x_eth.c + * Copyright (C) 2020 B. Koblitz + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include "rtl838x_eth.h" + +extern struct rtl83xx_soc_info soc_info; + +/* + * Maximum number of RX rings is 8 on RTL83XX and 32 on the 93XX + * The ring is assigned by switch based on packet/port priortity + * Maximum number of TX rings is 2, Ring 2 being the high priority + * ring on the RTL93xx SoCs. MAX_RXLEN gives the maximum length + * for an RX ring, MAX_ENTRIES the maximum number of entries + * available in total for all queues. + */ +#define MAX_RXRINGS 32 +#define MAX_RXLEN 300 +#define MAX_ENTRIES (300 * 8) +#define TXRINGS 2 +#define TXRINGLEN 160 +#define NOTIFY_EVENTS 10 +#define NOTIFY_BLOCKS 10 +#define TX_EN 0x8 +#define RX_EN 0x4 +#define TX_EN_93XX 0x20 +#define RX_EN_93XX 0x10 +#define TX_DO 0x2 +#define WRAP 0x2 +#define MAX_PORTS 57 +#define MAX_SMI_BUSSES 4 + +#define RING_BUFFER 1600 + +struct p_hdr { + uint8_t *buf; + uint16_t reserved; + uint16_t size; /* buffer size */ + uint16_t offset; + uint16_t len; /* pkt len */ + /* cpu_tag[0] is a reserved uint16_t on RTL83xx */ + uint16_t cpu_tag[10]; +} __packed __aligned(1); + +struct n_event { + uint32_t type:2; + uint32_t fidVid:12; + uint64_t mac:48; + uint32_t slp:6; + uint32_t valid:1; + uint32_t reserved:27; +} __packed __aligned(1); + +struct ring_b { + uint32_t rx_r[MAX_RXRINGS][MAX_RXLEN]; + uint32_t tx_r[TXRINGS][TXRINGLEN]; + struct p_hdr rx_header[MAX_RXRINGS][MAX_RXLEN]; + struct p_hdr tx_header[TXRINGS][TXRINGLEN]; + uint32_t c_rx[MAX_RXRINGS]; + uint32_t c_tx[TXRINGS]; + uint8_t tx_space[TXRINGS * TXRINGLEN * RING_BUFFER]; + uint8_t *rx_space; +}; + +struct notify_block { + struct n_event events[NOTIFY_EVENTS]; +}; + +struct notify_b { + struct notify_block blocks[NOTIFY_BLOCKS]; + u32 reserved1[8]; + u32 ring[NOTIFY_BLOCKS]; + u32 reserved2[8]; +}; + +static void rtl838x_create_tx_header(struct p_hdr *h, unsigned int dest_port, int prio) +{ + // cpu_tag[0] is reserved on the RTL83XX SoCs + h->cpu_tag[1] = 0x0401; // BIT 10: RTL8380_CPU_TAG, BIT0: L2LEARNING on + h->cpu_tag[2] = 0x0200; // Set only AS_DPM, to enable DPM settings below + h->cpu_tag[3] = 0x0000; + h->cpu_tag[4] = BIT(dest_port) >> 16; + h->cpu_tag[5] = BIT(dest_port) & 0xffff; + + /* Set internal priority (PRI) and enable (AS_PRI) */ + if (prio >= 0) + h->cpu_tag[2] |= ((prio & 0x7) | BIT(3)) << 12; +} + +static void rtl839x_create_tx_header(struct p_hdr *h, unsigned int dest_port, int prio) +{ + // cpu_tag[0] is reserved on the RTL83XX SoCs + h->cpu_tag[1] = 0x0100; // RTL8390_CPU_TAG marker + h->cpu_tag[2] = BIT(4) | BIT(7); /* AS_DPM (4) and L2LEARNING (7) flags */ + h->cpu_tag[3] = h->cpu_tag[4] = h->cpu_tag[5] = 0; + // h->cpu_tag[1] |= BIT(1) | BIT(0); // Bypass filter 1/2 + if (dest_port >= 32) { + dest_port -= 32; + h->cpu_tag[2] |= (BIT(dest_port) >> 16) & 0xf; + h->cpu_tag[3] = BIT(dest_port) & 0xffff; + } else { + h->cpu_tag[4] = BIT(dest_port) >> 16; + h->cpu_tag[5] = BIT(dest_port) & 0xffff; + } + + /* Set internal priority (PRI) and enable (AS_PRI) */ + if (prio >= 0) + h->cpu_tag[2] |= ((prio & 0x7) | BIT(3)) << 8; +} + +static void rtl930x_create_tx_header(struct p_hdr *h, unsigned int dest_port, int prio) +{ + h->cpu_tag[0] = 0x8000; // CPU tag marker + h->cpu_tag[1] = h->cpu_tag[2] = 0; + h->cpu_tag[3] = 0; + h->cpu_tag[4] = 0; + h->cpu_tag[5] = 0; + h->cpu_tag[6] = BIT(dest_port) >> 16; + h->cpu_tag[7] = BIT(dest_port) & 0xffff; + + /* Enable (AS_QID) and set priority queue (QID) */ + if (prio >= 0) + h->cpu_tag[2] = (BIT(5) | (prio & 0x1f)) << 8; +} + +static void rtl931x_create_tx_header(struct p_hdr *h, unsigned int dest_port, int prio) +{ + h->cpu_tag[0] = 0x8000; // CPU tag marker + h->cpu_tag[1] = h->cpu_tag[2] = 0; + h->cpu_tag[3] = 0; + h->cpu_tag[4] = h->cpu_tag[5] = h->cpu_tag[6] = h->cpu_tag[7] = 0; + if (dest_port >= 32) { + dest_port -= 32; + h->cpu_tag[4] = BIT(dest_port) >> 16; + h->cpu_tag[5] = BIT(dest_port) & 0xffff; + } else { + h->cpu_tag[6] = BIT(dest_port) >> 16; + h->cpu_tag[7] = BIT(dest_port) & 0xffff; + } + + /* Enable (AS_QID) and set priority queue (QID) */ + if (prio >= 0) + h->cpu_tag[2] = (BIT(5) | (prio & 0x1f)) << 8; +} + +static void rtl93xx_header_vlan_set(struct p_hdr *h, int vlan) +{ + h->cpu_tag[2] |= BIT(4); // Enable VLAN forwarding offload + h->cpu_tag[2] |= (vlan >> 8) & 0xf; + h->cpu_tag[3] |= (vlan & 0xff) << 8; +} + +struct rtl838x_rx_q { + int id; + struct rtl838x_eth_priv *priv; + struct napi_struct napi; +}; + +struct rtl838x_eth_priv { + struct net_device *netdev; + struct platform_device *pdev; + void *membase; + spinlock_t lock; + struct mii_bus *mii_bus; + struct rtl838x_rx_q rx_qs[MAX_RXRINGS]; + struct phylink *phylink; + struct phylink_config phylink_config; + u16 id; + u16 family_id; + const struct rtl838x_eth_reg *r; + u8 cpu_port; + u32 lastEvent; + u16 rxrings; + u16 rxringlen; + u8 smi_bus[MAX_PORTS]; + u8 smi_addr[MAX_PORTS]; + u32 sds_id[MAX_PORTS]; + bool smi_bus_isc45[MAX_SMI_BUSSES]; + bool phy_is_internal[MAX_PORTS]; + phy_interface_t interfaces[MAX_PORTS]; +}; + +extern int rtl838x_phy_init(struct rtl838x_eth_priv *priv); +extern int rtl838x_read_sds_phy(int phy_addr, int phy_reg); +extern int rtl839x_read_sds_phy(int phy_addr, int phy_reg); +extern int rtl839x_write_sds_phy(int phy_addr, int phy_reg, u16 v); +extern int rtl930x_read_sds_phy(int phy_addr, int page, int phy_reg); +extern int rtl930x_write_sds_phy(int phy_addr, int page, int phy_reg, u16 v); +extern int rtl931x_read_sds_phy(int phy_addr, int page, int phy_reg); +extern int rtl931x_write_sds_phy(int phy_addr, int page, int phy_reg, u16 v); +extern int rtl930x_read_mmd_phy(u32 port, u32 devnum, u32 regnum, u32 *val); +extern int rtl930x_write_mmd_phy(u32 port, u32 devnum, u32 regnum, u32 val); +extern int rtl931x_read_mmd_phy(u32 port, u32 devnum, u32 regnum, u32 *val); +extern int rtl931x_write_mmd_phy(u32 port, u32 devnum, u32 regnum, u32 val); + +/* + * On the RTL93XX, the RTL93XX_DMA_IF_RX_RING_CNTR track the fill level of + * the rings. Writing x into these registers substracts x from its content. + * When the content reaches the ring size, the ASIC no longer adds + * packets to this receive queue. + */ +void rtl838x_update_cntr(int r, int released) +{ + // This feature is not available on RTL838x SoCs +} + +void rtl839x_update_cntr(int r, int released) +{ + // This feature is not available on RTL839x SoCs +} + +void rtl930x_update_cntr(int r, int released) +{ + int pos = (r % 3) * 10; + u32 reg = RTL930X_DMA_IF_RX_RING_CNTR + ((r / 3) << 2); + u32 v = sw_r32(reg); + + v = (v >> pos) & 0x3ff; + pr_debug("RX: Work done %d, old value: %d, pos %d, reg %04x\n", released, v, pos, reg); + sw_w32_mask(0x3ff << pos, released << pos, reg); + sw_w32(v, reg); +} + +void rtl931x_update_cntr(int r, int released) +{ + int pos = (r % 3) * 10; + u32 reg = RTL931X_DMA_IF_RX_RING_CNTR + ((r / 3) << 2); + u32 v = sw_r32(reg); + + v = (v >> pos) & 0x3ff; + sw_w32_mask(0x3ff << pos, released << pos, reg); + sw_w32(v, reg); +} + +struct dsa_tag { + u8 reason; + u8 queue; + u16 port; + u8 l2_offloaded; + u8 prio; + bool crc_error; +}; + +bool rtl838x_decode_tag(struct p_hdr *h, struct dsa_tag *t) +{ + /* cpu_tag[0] is reserved. Fields are off-by-one */ + t->reason = h->cpu_tag[4] & 0xf; + t->queue = (h->cpu_tag[1] & 0xe0) >> 5; + t->port = h->cpu_tag[1] & 0x1f; + t->crc_error = t->reason == 13; + + pr_debug("Reason: %d\n", t->reason); + if (t->reason != 6) // NIC_RX_REASON_SPECIAL_TRAP + t->l2_offloaded = 1; + else + t->l2_offloaded = 0; + + return t->l2_offloaded; +} + +bool rtl839x_decode_tag(struct p_hdr *h, struct dsa_tag *t) +{ + /* cpu_tag[0] is reserved. Fields are off-by-one */ + t->reason = h->cpu_tag[5] & 0x1f; + t->queue = (h->cpu_tag[4] & 0xe000) >> 13; + t->port = h->cpu_tag[1] & 0x3f; + t->crc_error = h->cpu_tag[4] & BIT(6); + + pr_debug("Reason: %d\n", t->reason); + if ((t->reason >= 7 && t->reason <= 13) || // NIC_RX_REASON_RMA + (t->reason >= 23 && t->reason <= 25)) // NIC_RX_REASON_SPECIAL_TRAP + t->l2_offloaded = 0; + else + t->l2_offloaded = 1; + + return t->l2_offloaded; +} + +bool rtl930x_decode_tag(struct p_hdr *h, struct dsa_tag *t) +{ + t->reason = h->cpu_tag[7] & 0x3f; + t->queue = (h->cpu_tag[2] >> 11) & 0x1f; + t->port = (h->cpu_tag[0] >> 8) & 0x1f; + t->crc_error = h->cpu_tag[1] & BIT(6); + + pr_debug("Reason %d, port %d, queue %d\n", t->reason, t->port, t->queue); + if (t->reason >= 19 && t->reason <= 27) + t->l2_offloaded = 0; + else + t->l2_offloaded = 1; + + return t->l2_offloaded; +} + +bool rtl931x_decode_tag(struct p_hdr *h, struct dsa_tag *t) +{ + t->reason = h->cpu_tag[7] & 0x3f; + t->queue = (h->cpu_tag[2] >> 11) & 0x1f; + t->port = (h->cpu_tag[0] >> 8) & 0x3f; + t->crc_error = h->cpu_tag[1] & BIT(6); + + if (t->reason != 63) + pr_info("%s: Reason %d, port %d, queue %d\n", __func__, t->reason, t->port, t->queue); + if (t->reason >= 19 && t->reason <= 27) // NIC_RX_REASON_RMA + t->l2_offloaded = 0; + else + t->l2_offloaded = 1; + + return t->l2_offloaded; +} + +/* + * Discard the RX ring-buffers, called as part of the net-ISR + * when the buffer runs over + */ +static void rtl838x_rb_cleanup(struct rtl838x_eth_priv *priv, int status) +{ + int r; + u32 *last; + struct p_hdr *h; + struct ring_b *ring = priv->membase; + + for (r = 0; r < priv->rxrings; r++) { + pr_debug("In %s working on r: %d\n", __func__, r); + last = (u32 *)KSEG1ADDR(sw_r32(priv->r->dma_if_rx_cur + r * 4)); + do { + if ((ring->rx_r[r][ring->c_rx[r]] & 0x1)) + break; + pr_debug("Got something: %d\n", ring->c_rx[r]); + h = &ring->rx_header[r][ring->c_rx[r]]; + memset(h, 0, sizeof(struct p_hdr)); + h->buf = (u8 *)KSEG1ADDR(ring->rx_space + + r * priv->rxringlen * RING_BUFFER + + ring->c_rx[r] * RING_BUFFER); + h->size = RING_BUFFER; + /* make sure the header is visible to the ASIC */ + mb(); + + ring->rx_r[r][ring->c_rx[r]] = KSEG1ADDR(h) | 0x1 + | (ring->c_rx[r] == (priv->rxringlen - 1) ? WRAP : 0x1); + ring->c_rx[r] = (ring->c_rx[r] + 1) % priv->rxringlen; + } while (&ring->rx_r[r][ring->c_rx[r]] != last); + } +} + +struct fdb_update_work { + struct work_struct work; + struct net_device *ndev; + u64 macs[NOTIFY_EVENTS + 1]; +}; + +void rtl838x_fdb_sync(struct work_struct *work) +{ + const struct fdb_update_work *uw = + container_of(work, struct fdb_update_work, work); + struct switchdev_notifier_fdb_info info; + u8 addr[ETH_ALEN]; + int i = 0; + int action; + + while (uw->macs[i]) { + action = (uw->macs[i] & (1ULL << 63)) ? SWITCHDEV_FDB_ADD_TO_BRIDGE + : SWITCHDEV_FDB_DEL_TO_BRIDGE; + u64_to_ether_addr(uw->macs[i] & 0xffffffffffffULL, addr); + info.addr = &addr[0]; + info.vid = 0; + info.offloaded = 1; + pr_debug("FDB entry %d: %llx, action %d\n", i, uw->macs[0], action); + call_switchdev_notifiers(action, uw->ndev, &info.info, NULL); + i++; + } + kfree(work); +} + +static void rtl839x_l2_notification_handler(struct rtl838x_eth_priv *priv) +{ + struct notify_b *nb = priv->membase + sizeof(struct ring_b); + u32 e = priv->lastEvent; + struct n_event *event; + int i; + u64 mac; + struct fdb_update_work *w; + + while (!(nb->ring[e] & 1)) { + w = kzalloc(sizeof(*w), GFP_ATOMIC); + if (!w) { + pr_err("Out of memory: %s", __func__); + return; + } + INIT_WORK(&w->work, rtl838x_fdb_sync); + + for (i = 0; i < NOTIFY_EVENTS; i++) { + event = &nb->blocks[e].events[i]; + if (!event->valid) + continue; + mac = event->mac; + if (event->type) + mac |= 1ULL << 63; + w->ndev = priv->netdev; + w->macs[i] = mac; + } + + /* Hand the ring entry back to the switch */ + nb->ring[e] = nb->ring[e] | 1; + e = (e + 1) % NOTIFY_BLOCKS; + + w->macs[i] = 0ULL; + schedule_work(&w->work); + } + priv->lastEvent = e; +} + +static irqreturn_t rtl83xx_net_irq(int irq, void *dev_id) +{ + struct net_device *dev = dev_id; + struct rtl838x_eth_priv *priv = netdev_priv(dev); + u32 status = sw_r32(priv->r->dma_if_intr_sts); + int i; + + pr_debug("IRQ: %08x\n", status); + + /* Ignore TX interrupt */ + if ((status & 0xf0000)) { + /* Clear ISR */ + sw_w32(0x000f0000, priv->r->dma_if_intr_sts); + } + + /* RX interrupt */ + if (status & 0x0ff00) { + /* ACK and disable RX interrupt for this ring */ + sw_w32_mask(0xff00 & status, 0, priv->r->dma_if_intr_msk); + sw_w32(0x0000ff00 & status, priv->r->dma_if_intr_sts); + for (i = 0; i < priv->rxrings; i++) { + if (status & BIT(i + 8)) { + pr_debug("Scheduling queue: %d\n", i); + napi_schedule(&priv->rx_qs[i].napi); + } + } + } + + /* RX buffer overrun */ + if (status & 0x000ff) { + pr_debug("RX buffer overrun: status %x, mask: %x\n", + status, sw_r32(priv->r->dma_if_intr_msk)); + sw_w32(status, priv->r->dma_if_intr_sts); + rtl838x_rb_cleanup(priv, status & 0xff); + } + + if (priv->family_id == RTL8390_FAMILY_ID && status & 0x00100000) { + sw_w32(0x00100000, priv->r->dma_if_intr_sts); + rtl839x_l2_notification_handler(priv); + } + + if (priv->family_id == RTL8390_FAMILY_ID && status & 0x00200000) { + sw_w32(0x00200000, priv->r->dma_if_intr_sts); + rtl839x_l2_notification_handler(priv); + } + + if (priv->family_id == RTL8390_FAMILY_ID && status & 0x00400000) { + sw_w32(0x00400000, priv->r->dma_if_intr_sts); + rtl839x_l2_notification_handler(priv); + } + + return IRQ_HANDLED; +} + +static irqreturn_t rtl93xx_net_irq(int irq, void *dev_id) +{ + struct net_device *dev = dev_id; + struct rtl838x_eth_priv *priv = netdev_priv(dev); + u32 status_rx_r = sw_r32(priv->r->dma_if_intr_rx_runout_sts); + u32 status_rx = sw_r32(priv->r->dma_if_intr_rx_done_sts); + u32 status_tx = sw_r32(priv->r->dma_if_intr_tx_done_sts); + int i; + + pr_debug("In %s, status_tx: %08x, status_rx: %08x, status_rx_r: %08x\n", + __func__, status_tx, status_rx, status_rx_r); + + /* Ignore TX interrupt */ + if (status_tx) { + /* Clear ISR */ + pr_debug("TX done\n"); + sw_w32(status_tx, priv->r->dma_if_intr_tx_done_sts); + } + + /* RX interrupt */ + if (status_rx) { + pr_debug("RX IRQ\n"); + /* ACK and disable RX interrupt for given rings */ + sw_w32(status_rx, priv->r->dma_if_intr_rx_done_sts); + sw_w32_mask(status_rx, 0, priv->r->dma_if_intr_rx_done_msk); + for (i = 0; i < priv->rxrings; i++) { + if (status_rx & BIT(i)) { + pr_debug("Scheduling queue: %d\n", i); + napi_schedule(&priv->rx_qs[i].napi); + } + } + } + + /* RX buffer overrun */ + if (status_rx_r) { + pr_debug("RX buffer overrun: status %x, mask: %x\n", + status_rx_r, sw_r32(priv->r->dma_if_intr_rx_runout_msk)); + sw_w32(status_rx_r, priv->r->dma_if_intr_rx_runout_sts); + rtl838x_rb_cleanup(priv, status_rx_r); + } + + return IRQ_HANDLED; +} + +static const struct rtl838x_eth_reg rtl838x_reg = { + .net_irq = rtl83xx_net_irq, + .mac_port_ctrl = rtl838x_mac_port_ctrl, + .dma_if_intr_sts = RTL838X_DMA_IF_INTR_STS, + .dma_if_intr_msk = RTL838X_DMA_IF_INTR_MSK, + .dma_if_ctrl = RTL838X_DMA_IF_CTRL, + .mac_force_mode_ctrl = RTL838X_MAC_FORCE_MODE_CTRL, + .dma_rx_base = RTL838X_DMA_RX_BASE, + .dma_tx_base = RTL838X_DMA_TX_BASE, + .dma_if_rx_ring_size = rtl838x_dma_if_rx_ring_size, + .dma_if_rx_ring_cntr = rtl838x_dma_if_rx_ring_cntr, + .dma_if_rx_cur = RTL838X_DMA_IF_RX_CUR, + .rst_glb_ctrl = RTL838X_RST_GLB_CTRL_0, + .get_mac_link_sts = rtl838x_get_mac_link_sts, + .get_mac_link_dup_sts = rtl838x_get_mac_link_dup_sts, + .get_mac_link_spd_sts = rtl838x_get_mac_link_spd_sts, + .get_mac_rx_pause_sts = rtl838x_get_mac_rx_pause_sts, + .get_mac_tx_pause_sts = rtl838x_get_mac_tx_pause_sts, + .mac = RTL838X_MAC, + .l2_tbl_flush_ctrl = RTL838X_L2_TBL_FLUSH_CTRL, + .update_cntr = rtl838x_update_cntr, + .create_tx_header = rtl838x_create_tx_header, + .decode_tag = rtl838x_decode_tag, +}; + +static const struct rtl838x_eth_reg rtl839x_reg = { + .net_irq = rtl83xx_net_irq, + .mac_port_ctrl = rtl839x_mac_port_ctrl, + .dma_if_intr_sts = RTL839X_DMA_IF_INTR_STS, + .dma_if_intr_msk = RTL839X_DMA_IF_INTR_MSK, + .dma_if_ctrl = RTL839X_DMA_IF_CTRL, + .mac_force_mode_ctrl = RTL839X_MAC_FORCE_MODE_CTRL, + .dma_rx_base = RTL839X_DMA_RX_BASE, + .dma_tx_base = RTL839X_DMA_TX_BASE, + .dma_if_rx_ring_size = rtl839x_dma_if_rx_ring_size, + .dma_if_rx_ring_cntr = rtl839x_dma_if_rx_ring_cntr, + .dma_if_rx_cur = RTL839X_DMA_IF_RX_CUR, + .rst_glb_ctrl = RTL839X_RST_GLB_CTRL, + .get_mac_link_sts = rtl839x_get_mac_link_sts, + .get_mac_link_dup_sts = rtl839x_get_mac_link_dup_sts, + .get_mac_link_spd_sts = rtl839x_get_mac_link_spd_sts, + .get_mac_rx_pause_sts = rtl839x_get_mac_rx_pause_sts, + .get_mac_tx_pause_sts = rtl839x_get_mac_tx_pause_sts, + .mac = RTL839X_MAC, + .l2_tbl_flush_ctrl = RTL839X_L2_TBL_FLUSH_CTRL, + .update_cntr = rtl839x_update_cntr, + .create_tx_header = rtl839x_create_tx_header, + .decode_tag = rtl839x_decode_tag, +}; + +static const struct rtl838x_eth_reg rtl930x_reg = { + .net_irq = rtl93xx_net_irq, + .mac_port_ctrl = rtl930x_mac_port_ctrl, + .dma_if_intr_rx_runout_sts = RTL930X_DMA_IF_INTR_RX_RUNOUT_STS, + .dma_if_intr_rx_done_sts = RTL930X_DMA_IF_INTR_RX_DONE_STS, + .dma_if_intr_tx_done_sts = RTL930X_DMA_IF_INTR_TX_DONE_STS, + .dma_if_intr_rx_runout_msk = RTL930X_DMA_IF_INTR_RX_RUNOUT_MSK, + .dma_if_intr_rx_done_msk = RTL930X_DMA_IF_INTR_RX_DONE_MSK, + .dma_if_intr_tx_done_msk = RTL930X_DMA_IF_INTR_TX_DONE_MSK, + .l2_ntfy_if_intr_sts = RTL930X_L2_NTFY_IF_INTR_STS, + .l2_ntfy_if_intr_msk = RTL930X_L2_NTFY_IF_INTR_MSK, + .dma_if_ctrl = RTL930X_DMA_IF_CTRL, + .mac_force_mode_ctrl = RTL930X_MAC_FORCE_MODE_CTRL, + .dma_rx_base = RTL930X_DMA_RX_BASE, + .dma_tx_base = RTL930X_DMA_TX_BASE, + .dma_if_rx_ring_size = rtl930x_dma_if_rx_ring_size, + .dma_if_rx_ring_cntr = rtl930x_dma_if_rx_ring_cntr, + .dma_if_rx_cur = RTL930X_DMA_IF_RX_CUR, + .rst_glb_ctrl = RTL930X_RST_GLB_CTRL_0, + .get_mac_link_sts = rtl930x_get_mac_link_sts, + .get_mac_link_dup_sts = rtl930x_get_mac_link_dup_sts, + .get_mac_link_spd_sts = rtl930x_get_mac_link_spd_sts, + .get_mac_rx_pause_sts = rtl930x_get_mac_rx_pause_sts, + .get_mac_tx_pause_sts = rtl930x_get_mac_tx_pause_sts, + .mac = RTL930X_MAC_L2_ADDR_CTRL, + .l2_tbl_flush_ctrl = RTL930X_L2_TBL_FLUSH_CTRL, + .update_cntr = rtl930x_update_cntr, + .create_tx_header = rtl930x_create_tx_header, + .decode_tag = rtl930x_decode_tag, +}; + +static const struct rtl838x_eth_reg rtl931x_reg = { + .net_irq = rtl93xx_net_irq, + .mac_port_ctrl = rtl931x_mac_port_ctrl, + .dma_if_intr_rx_runout_sts = RTL931X_DMA_IF_INTR_RX_RUNOUT_STS, + .dma_if_intr_rx_done_sts = RTL931X_DMA_IF_INTR_RX_DONE_STS, + .dma_if_intr_tx_done_sts = RTL931X_DMA_IF_INTR_TX_DONE_STS, + .dma_if_intr_rx_runout_msk = RTL931X_DMA_IF_INTR_RX_RUNOUT_MSK, + .dma_if_intr_rx_done_msk = RTL931X_DMA_IF_INTR_RX_DONE_MSK, + .dma_if_intr_tx_done_msk = RTL931X_DMA_IF_INTR_TX_DONE_MSK, + .l2_ntfy_if_intr_sts = RTL931X_L2_NTFY_IF_INTR_STS, + .l2_ntfy_if_intr_msk = RTL931X_L2_NTFY_IF_INTR_MSK, + .dma_if_ctrl = RTL931X_DMA_IF_CTRL, + .mac_force_mode_ctrl = RTL931X_MAC_FORCE_MODE_CTRL, + .dma_rx_base = RTL931X_DMA_RX_BASE, + .dma_tx_base = RTL931X_DMA_TX_BASE, + .dma_if_rx_ring_size = rtl931x_dma_if_rx_ring_size, + .dma_if_rx_ring_cntr = rtl931x_dma_if_rx_ring_cntr, + .dma_if_rx_cur = RTL931X_DMA_IF_RX_CUR, + .rst_glb_ctrl = RTL931X_RST_GLB_CTRL, + .get_mac_link_sts = rtl931x_get_mac_link_sts, + .get_mac_link_dup_sts = rtl931x_get_mac_link_dup_sts, + .get_mac_link_spd_sts = rtl931x_get_mac_link_spd_sts, + .get_mac_rx_pause_sts = rtl931x_get_mac_rx_pause_sts, + .get_mac_tx_pause_sts = rtl931x_get_mac_tx_pause_sts, + .mac = RTL931X_MAC_L2_ADDR_CTRL, + .l2_tbl_flush_ctrl = RTL931X_L2_TBL_FLUSH_CTRL, + .update_cntr = rtl931x_update_cntr, + .create_tx_header = rtl931x_create_tx_header, + .decode_tag = rtl931x_decode_tag, +}; + +static void rtl838x_hw_reset(struct rtl838x_eth_priv *priv) +{ + u32 int_saved, nbuf; + u32 reset_mask; + int i, pos; + + pr_info("RESETTING %x, CPU_PORT %d\n", priv->family_id, priv->cpu_port); + sw_w32_mask(0x3, 0, priv->r->mac_port_ctrl(priv->cpu_port)); + mdelay(100); + + /* Disable and clear interrupts */ + if (priv->family_id == RTL9300_FAMILY_ID || priv->family_id == RTL9310_FAMILY_ID) { + sw_w32(0x00000000, priv->r->dma_if_intr_rx_runout_msk); + sw_w32(0xffffffff, priv->r->dma_if_intr_rx_runout_sts); + sw_w32(0x00000000, priv->r->dma_if_intr_rx_done_msk); + sw_w32(0xffffffff, priv->r->dma_if_intr_rx_done_sts); + sw_w32(0x00000000, priv->r->dma_if_intr_tx_done_msk); + sw_w32(0x0000000f, priv->r->dma_if_intr_tx_done_sts); + } else { + sw_w32(0x00000000, priv->r->dma_if_intr_msk); + sw_w32(0xffffffff, priv->r->dma_if_intr_sts); + } + + if (priv->family_id == RTL8390_FAMILY_ID) { + /* Preserve L2 notification and NBUF settings */ + int_saved = sw_r32(priv->r->dma_if_intr_msk); + nbuf = sw_r32(RTL839X_DMA_IF_NBUF_BASE_DESC_ADDR_CTRL); + + /* Disable link change interrupt on RTL839x */ + sw_w32(0, RTL839X_IMR_PORT_LINK_STS_CHG); + sw_w32(0, RTL839X_IMR_PORT_LINK_STS_CHG + 4); + + sw_w32(0x00000000, priv->r->dma_if_intr_msk); + sw_w32(0xffffffff, priv->r->dma_if_intr_sts); + } + + /* Reset NIC (SW_NIC_RST) and queues (SW_Q_RST) */ + if (priv->family_id == RTL9300_FAMILY_ID || priv->family_id == RTL9310_FAMILY_ID) + reset_mask = 0x6; + else + reset_mask = 0xc; + + sw_w32(reset_mask, priv->r->rst_glb_ctrl); + + do { /* Wait for reset of NIC and Queues done */ + udelay(20); + } while (sw_r32(priv->r->rst_glb_ctrl) & reset_mask); + mdelay(100); + + /* Setup Head of Line */ + if (priv->family_id == RTL8380_FAMILY_ID) + sw_w32(0, RTL838X_DMA_IF_RX_RING_SIZE); // Disabled on RTL8380 + if (priv->family_id == RTL8390_FAMILY_ID) + sw_w32(0xffffffff, RTL839X_DMA_IF_RX_RING_CNTR); + if (priv->family_id == RTL9300_FAMILY_ID || priv->family_id == RTL9310_FAMILY_ID) { + for (i = 0; i < priv->rxrings; i++) { + pos = (i % 3) * 10; + sw_w32_mask(0x3ff << pos, 0, priv->r->dma_if_rx_ring_size(i)); + sw_w32_mask(0x3ff << pos, priv->rxringlen, + priv->r->dma_if_rx_ring_cntr(i)); + } + } + + /* Re-enable link change interrupt */ + if (priv->family_id == RTL8390_FAMILY_ID) { + sw_w32(0xffffffff, RTL839X_ISR_PORT_LINK_STS_CHG); + sw_w32(0xffffffff, RTL839X_ISR_PORT_LINK_STS_CHG + 4); + sw_w32(0xffffffff, RTL839X_IMR_PORT_LINK_STS_CHG); + sw_w32(0xffffffff, RTL839X_IMR_PORT_LINK_STS_CHG + 4); + + /* Restore notification settings: on RTL838x these bits are null */ + sw_w32_mask(7 << 20, int_saved & (7 << 20), priv->r->dma_if_intr_msk); + sw_w32(nbuf, RTL839X_DMA_IF_NBUF_BASE_DESC_ADDR_CTRL); + } +} + +static void rtl838x_hw_ring_setup(struct rtl838x_eth_priv *priv) +{ + int i; + struct ring_b *ring = priv->membase; + + for (i = 0; i < priv->rxrings; i++) + sw_w32(KSEG1ADDR(&ring->rx_r[i]), priv->r->dma_rx_base + i * 4); + + for (i = 0; i < TXRINGS; i++) + sw_w32(KSEG1ADDR(&ring->tx_r[i]), priv->r->dma_tx_base + i * 4); +} + +static void rtl838x_hw_en_rxtx(struct rtl838x_eth_priv *priv) +{ + /* Disable Head of Line features for all RX rings */ + sw_w32(0xffffffff, priv->r->dma_if_rx_ring_size(0)); + + /* Truncate RX buffer to 0x640 (1600) bytes, pad TX */ + sw_w32(0x06400020, priv->r->dma_if_ctrl); + + /* Enable RX done, RX overflow and TX done interrupts */ + sw_w32(0xfffff, priv->r->dma_if_intr_msk); + + /* Enable DMA, engine expects empty FCS field */ + sw_w32_mask(0, RX_EN | TX_EN, priv->r->dma_if_ctrl); + + /* Restart TX/RX to CPU port */ + sw_w32_mask(0x0, 0x3, priv->r->mac_port_ctrl(priv->cpu_port)); + /* Set Speed, duplex, flow control + * FORCE_EN | LINK_EN | NWAY_EN | DUP_SEL + * | SPD_SEL = 0b10 | FORCE_FC_EN | PHY_MASTER_SLV_MANUAL_EN + * | MEDIA_SEL + */ + sw_w32(0x6192F, priv->r->mac_force_mode_ctrl + priv->cpu_port * 4); + + /* Enable CRC checks on CPU-port */ + sw_w32_mask(0, BIT(3), priv->r->mac_port_ctrl(priv->cpu_port)); +} + +static void rtl839x_hw_en_rxtx(struct rtl838x_eth_priv *priv) +{ + /* Setup CPU-Port: RX Buffer */ + sw_w32(0x0000c808, priv->r->dma_if_ctrl); + + /* Enable Notify, RX done, RX overflow and TX done interrupts */ + sw_w32(0x007fffff, priv->r->dma_if_intr_msk); // Notify IRQ! + + /* Enable DMA */ + sw_w32_mask(0, RX_EN | TX_EN, priv->r->dma_if_ctrl); + + /* Restart TX/RX to CPU port, enable CRC checking */ + sw_w32_mask(0x0, 0x3 | BIT(3), priv->r->mac_port_ctrl(priv->cpu_port)); + + /* CPU port joins Lookup Miss Flooding Portmask */ + // TODO: The code below should also work for the RTL838x + sw_w32(0x28000, RTL839X_TBL_ACCESS_L2_CTRL); + sw_w32_mask(0, 0x80000000, RTL839X_TBL_ACCESS_L2_DATA(0)); + sw_w32(0x38000, RTL839X_TBL_ACCESS_L2_CTRL); + + /* Force CPU port link up */ + sw_w32_mask(0, 3, priv->r->mac_force_mode_ctrl + priv->cpu_port * 4); +} + +static void rtl93xx_hw_en_rxtx(struct rtl838x_eth_priv *priv) +{ + int i, pos; + u32 v; + + /* Setup CPU-Port: RX Buffer truncated at 1600 Bytes */ + sw_w32(0x06400040, priv->r->dma_if_ctrl); + + for (i = 0; i < priv->rxrings; i++) { + pos = (i % 3) * 10; + sw_w32_mask(0x3ff << pos, priv->rxringlen << pos, priv->r->dma_if_rx_ring_size(i)); + + // Some SoCs have issues with missing underflow protection + v = (sw_r32(priv->r->dma_if_rx_ring_cntr(i)) >> pos) & 0x3ff; + sw_w32_mask(0x3ff << pos, v, priv->r->dma_if_rx_ring_cntr(i)); + } + + /* Enable Notify, RX done, RX overflow and TX done interrupts */ + sw_w32(0xffffffff, priv->r->dma_if_intr_rx_runout_msk); + sw_w32(0xffffffff, priv->r->dma_if_intr_rx_done_msk); + sw_w32(0x0000000f, priv->r->dma_if_intr_tx_done_msk); + + /* Enable DMA */ + sw_w32_mask(0, RX_EN_93XX | TX_EN_93XX, priv->r->dma_if_ctrl); + + /* Restart TX/RX to CPU port, enable CRC checking */ + sw_w32_mask(0x0, 0x3 | BIT(4), priv->r->mac_port_ctrl(priv->cpu_port)); + + if (priv->family_id == RTL9300_FAMILY_ID) + sw_w32_mask(0, BIT(priv->cpu_port), RTL930X_L2_UNKN_UC_FLD_PMSK); + else + sw_w32_mask(0, BIT(priv->cpu_port), RTL931X_L2_UNKN_UC_FLD_PMSK); + + if (priv->family_id == RTL9300_FAMILY_ID) + sw_w32(0x217, priv->r->mac_force_mode_ctrl + priv->cpu_port * 4); + else + sw_w32(0x2a1d, priv->r->mac_force_mode_ctrl + priv->cpu_port * 4); +} + +static void rtl838x_setup_ring_buffer(struct rtl838x_eth_priv *priv, struct ring_b *ring) +{ + int i, j; + + struct p_hdr *h; + + for (i = 0; i < priv->rxrings; i++) { + for (j = 0; j < priv->rxringlen; j++) { + h = &ring->rx_header[i][j]; + memset(h, 0, sizeof(struct p_hdr)); + h->buf = (u8 *)KSEG1ADDR(ring->rx_space + + i * priv->rxringlen * RING_BUFFER + + j * RING_BUFFER); + h->size = RING_BUFFER; + /* All rings owned by switch, last one wraps */ + ring->rx_r[i][j] = KSEG1ADDR(h) | 1 + | (j == (priv->rxringlen - 1) ? WRAP : 0); + } + ring->c_rx[i] = 0; + } + + for (i = 0; i < TXRINGS; i++) { + for (j = 0; j < TXRINGLEN; j++) { + h = &ring->tx_header[i][j]; + memset(h, 0, sizeof(struct p_hdr)); + h->buf = (u8 *)KSEG1ADDR(ring->tx_space + + i * TXRINGLEN * RING_BUFFER + + j * RING_BUFFER); + h->size = RING_BUFFER; + ring->tx_r[i][j] = KSEG1ADDR(&ring->tx_header[i][j]); + } + /* Last header is wrapping around */ + ring->tx_r[i][j-1] |= WRAP; + ring->c_tx[i] = 0; + } +} + +static void rtl839x_setup_notify_ring_buffer(struct rtl838x_eth_priv *priv) +{ + int i; + struct notify_b *b = priv->membase + sizeof(struct ring_b); + + for (i = 0; i < NOTIFY_BLOCKS; i++) + b->ring[i] = KSEG1ADDR(&b->blocks[i]) | 1 | (i == (NOTIFY_BLOCKS - 1) ? WRAP : 0); + + sw_w32((u32) b->ring, RTL839X_DMA_IF_NBUF_BASE_DESC_ADDR_CTRL); + sw_w32_mask(0x3ff << 2, 100 << 2, RTL839X_L2_NOTIFICATION_CTRL); + + /* Setup notification events */ + sw_w32_mask(0, 1 << 14, RTL839X_L2_CTRL_0); // RTL8390_L2_CTRL_0_FLUSH_NOTIFY_EN + sw_w32_mask(0, 1 << 12, RTL839X_L2_NOTIFICATION_CTRL); // SUSPEND_NOTIFICATION_EN + + /* Enable Notification */ + sw_w32_mask(0, 1 << 0, RTL839X_L2_NOTIFICATION_CTRL); + priv->lastEvent = 0; +} + +static int rtl838x_eth_open(struct net_device *ndev) +{ + unsigned long flags; + struct rtl838x_eth_priv *priv = netdev_priv(ndev); + struct ring_b *ring = priv->membase; + int i; + + pr_debug("%s called: RX rings %d(length %d), TX rings %d(length %d)\n", + __func__, priv->rxrings, priv->rxringlen, TXRINGS, TXRINGLEN); + + spin_lock_irqsave(&priv->lock, flags); + rtl838x_hw_reset(priv); + rtl838x_setup_ring_buffer(priv, ring); + if (priv->family_id == RTL8390_FAMILY_ID) { + rtl839x_setup_notify_ring_buffer(priv); + /* Make sure the ring structure is visible to the ASIC */ + mb(); + flush_cache_all(); + } + + rtl838x_hw_ring_setup(priv); + phylink_start(priv->phylink); + + for (i = 0; i < priv->rxrings; i++) + napi_enable(&priv->rx_qs[i].napi); + + switch (priv->family_id) { + case RTL8380_FAMILY_ID: + rtl838x_hw_en_rxtx(priv); + /* Trap IGMP/MLD traffic to CPU-Port */ + sw_w32(0x3, RTL838X_SPCL_TRAP_IGMP_CTRL); + /* Flush learned FDB entries on link down of a port */ + sw_w32_mask(0, BIT(7), RTL838X_L2_CTRL_0); + break; + + case RTL8390_FAMILY_ID: + rtl839x_hw_en_rxtx(priv); + // Trap MLD and IGMP messages to CPU_PORT + sw_w32(0x3, RTL839X_SPCL_TRAP_IGMP_CTRL); + /* Flush learned FDB entries on link down of a port */ + sw_w32_mask(0, BIT(7), RTL839X_L2_CTRL_0); + break; + + case RTL9300_FAMILY_ID: + rtl93xx_hw_en_rxtx(priv); + /* Flush learned FDB entries on link down of a port */ + sw_w32_mask(0, BIT(7), RTL930X_L2_CTRL); + // Trap MLD and IGMP messages to CPU_PORT + sw_w32((0x2 << 3) | 0x2, RTL930X_VLAN_APP_PKT_CTRL); + break; + + case RTL9310_FAMILY_ID: + rtl93xx_hw_en_rxtx(priv); + + // Trap MLD and IGMP messages to CPU_PORT + sw_w32((0x2 << 3) | 0x2, RTL931X_VLAN_APP_PKT_CTRL); + + // Disable External CPU access to switch, clear EXT_CPU_EN + sw_w32_mask(BIT(2), 0, RTL931X_MAC_L2_GLOBAL_CTRL2); + + // Set PCIE_PWR_DOWN + sw_w32_mask(0, BIT(1), RTL931X_PS_SOC_CTRL); + break; + } + + netif_tx_start_all_queues(ndev); + + spin_unlock_irqrestore(&priv->lock, flags); + + return 0; +} + +static void rtl838x_hw_stop(struct rtl838x_eth_priv *priv) +{ + u32 force_mac = priv->family_id == RTL8380_FAMILY_ID ? 0x6192C : 0x75; + u32 clear_irq = priv->family_id == RTL8380_FAMILY_ID ? 0x000fffff : 0x007fffff; + int i; + + // Disable RX/TX from/to CPU-port + sw_w32_mask(0x3, 0, priv->r->mac_port_ctrl(priv->cpu_port)); + + /* Disable traffic */ + if (priv->family_id == RTL9300_FAMILY_ID || priv->family_id == RTL9310_FAMILY_ID) + sw_w32_mask(RX_EN_93XX | TX_EN_93XX, 0, priv->r->dma_if_ctrl); + else + sw_w32_mask(RX_EN | TX_EN, 0, priv->r->dma_if_ctrl); + mdelay(200); // Test, whether this is needed + + /* Block all ports */ + if (priv->family_id == RTL8380_FAMILY_ID) { + sw_w32(0x03000000, RTL838X_TBL_ACCESS_DATA_0(0)); + sw_w32(0x00000000, RTL838X_TBL_ACCESS_DATA_0(1)); + sw_w32(1 << 15 | 2 << 12, RTL838X_TBL_ACCESS_CTRL_0); + } + + /* Flush L2 address cache */ + if (priv->family_id == RTL8380_FAMILY_ID) { + for (i = 0; i <= priv->cpu_port; i++) { + sw_w32(1 << 26 | 1 << 23 | i << 5, priv->r->l2_tbl_flush_ctrl); + do { } while (sw_r32(priv->r->l2_tbl_flush_ctrl) & (1 << 26)); + } + } else if (priv->family_id == RTL8390_FAMILY_ID) { + for (i = 0; i <= priv->cpu_port; i++) { + sw_w32(1 << 28 | 1 << 25 | i << 5, priv->r->l2_tbl_flush_ctrl); + do { } while (sw_r32(priv->r->l2_tbl_flush_ctrl) & (1 << 28)); + } + } + // TODO: L2 flush register is 64 bit on RTL931X and 930X + + /* CPU-Port: Link down */ + if (priv->family_id == RTL8380_FAMILY_ID || priv->family_id == RTL8390_FAMILY_ID) + sw_w32(force_mac, priv->r->mac_force_mode_ctrl + priv->cpu_port * 4); + else if (priv->family_id == RTL9300_FAMILY_ID) + sw_w32_mask(0x3, 0, priv->r->mac_force_mode_ctrl + priv->cpu_port *4); + else if (priv->family_id == RTL9310_FAMILY_ID) + sw_w32_mask(BIT(0) | BIT(9), 0, priv->r->mac_force_mode_ctrl + priv->cpu_port *4); + mdelay(100); + + /* Disable all TX/RX interrupts */ + if (priv->family_id == RTL9300_FAMILY_ID || priv->family_id == RTL9310_FAMILY_ID) { + sw_w32(0x00000000, priv->r->dma_if_intr_rx_runout_msk); + sw_w32(0xffffffff, priv->r->dma_if_intr_rx_runout_sts); + sw_w32(0x00000000, priv->r->dma_if_intr_rx_done_msk); + sw_w32(0xffffffff, priv->r->dma_if_intr_rx_done_sts); + sw_w32(0x00000000, priv->r->dma_if_intr_tx_done_msk); + sw_w32(0x0000000f, priv->r->dma_if_intr_tx_done_sts); + } else { + sw_w32(0x00000000, priv->r->dma_if_intr_msk); + sw_w32(clear_irq, priv->r->dma_if_intr_sts); + } + + /* Disable TX/RX DMA */ + sw_w32(0x00000000, priv->r->dma_if_ctrl); + mdelay(200); +} + +static int rtl838x_eth_stop(struct net_device *ndev) +{ + unsigned long flags; + int i; + struct rtl838x_eth_priv *priv = netdev_priv(ndev); + + pr_info("in %s\n", __func__); + + phylink_stop(priv->phylink); + rtl838x_hw_stop(priv); + + for (i = 0; i < priv->rxrings; i++) + napi_disable(&priv->rx_qs[i].napi); + + netif_tx_stop_all_queues(ndev); + + return 0; +} + +static void rtl838x_eth_set_multicast_list(struct net_device *ndev) +{ + /* + * Flood all classes of RMA addresses (01-80-C2-00-00-{01..2F}) + * CTRL_0_FULL = GENMASK(21, 0) = 0x3FFFFF + */ + if (!(ndev->flags & (IFF_PROMISC | IFF_ALLMULTI))) { + sw_w32(0x0, RTL838X_RMA_CTRL_0); + sw_w32(0x0, RTL838X_RMA_CTRL_1); + } + if (ndev->flags & IFF_ALLMULTI) + sw_w32(GENMASK(21, 0), RTL838X_RMA_CTRL_0); + if (ndev->flags & IFF_PROMISC) { + sw_w32(GENMASK(21, 0), RTL838X_RMA_CTRL_0); + sw_w32(0x7fff, RTL838X_RMA_CTRL_1); + } +} + +static void rtl839x_eth_set_multicast_list(struct net_device *ndev) +{ + /* + * Flood all classes of RMA addresses (01-80-C2-00-00-{01..2F}) + * CTRL_0_FULL = GENMASK(31, 2) = 0xFFFFFFFC + * Lower two bits are reserved, corresponding to RMA 01-80-C2-00-00-00 + * CTRL_1_FULL = CTRL_2_FULL = GENMASK(31, 0) + */ + if (!(ndev->flags & (IFF_PROMISC | IFF_ALLMULTI))) { + sw_w32(0x0, RTL839X_RMA_CTRL_0); + sw_w32(0x0, RTL839X_RMA_CTRL_1); + sw_w32(0x0, RTL839X_RMA_CTRL_2); + sw_w32(0x0, RTL839X_RMA_CTRL_3); + } + if (ndev->flags & IFF_ALLMULTI) { + sw_w32(GENMASK(31, 2), RTL839X_RMA_CTRL_0); + sw_w32(GENMASK(31, 0), RTL839X_RMA_CTRL_1); + sw_w32(GENMASK(31, 0), RTL839X_RMA_CTRL_2); + } + if (ndev->flags & IFF_PROMISC) { + sw_w32(GENMASK(31, 2), RTL839X_RMA_CTRL_0); + sw_w32(GENMASK(31, 0), RTL839X_RMA_CTRL_1); + sw_w32(GENMASK(31, 0), RTL839X_RMA_CTRL_2); + sw_w32(0x3ff, RTL839X_RMA_CTRL_3); + } +} + +static void rtl930x_eth_set_multicast_list(struct net_device *ndev) +{ + /* + * Flood all classes of RMA addresses (01-80-C2-00-00-{01..2F}) + * CTRL_0_FULL = GENMASK(31, 2) = 0xFFFFFFFC + * Lower two bits are reserved, corresponding to RMA 01-80-C2-00-00-00 + * CTRL_1_FULL = CTRL_2_FULL = GENMASK(31, 0) + */ + if (ndev->flags & (IFF_ALLMULTI | IFF_PROMISC)) { + sw_w32(GENMASK(31, 2), RTL930X_RMA_CTRL_0); + sw_w32(GENMASK(31, 0), RTL930X_RMA_CTRL_1); + sw_w32(GENMASK(31, 0), RTL930X_RMA_CTRL_2); + } else { + sw_w32(0x0, RTL930X_RMA_CTRL_0); + sw_w32(0x0, RTL930X_RMA_CTRL_1); + sw_w32(0x0, RTL930X_RMA_CTRL_2); + } +} + +static void rtl931x_eth_set_multicast_list(struct net_device *ndev) +{ + /* + * Flood all classes of RMA addresses (01-80-C2-00-00-{01..2F}) + * CTRL_0_FULL = GENMASK(31, 2) = 0xFFFFFFFC + * Lower two bits are reserved, corresponding to RMA 01-80-C2-00-00-00. + * CTRL_1_FULL = CTRL_2_FULL = GENMASK(31, 0) + */ + if (ndev->flags & (IFF_ALLMULTI | IFF_PROMISC)) { + sw_w32(GENMASK(31, 2), RTL931X_RMA_CTRL_0); + sw_w32(GENMASK(31, 0), RTL931X_RMA_CTRL_1); + sw_w32(GENMASK(31, 0), RTL931X_RMA_CTRL_2); + } else { + sw_w32(0x0, RTL931X_RMA_CTRL_0); + sw_w32(0x0, RTL931X_RMA_CTRL_1); + sw_w32(0x0, RTL931X_RMA_CTRL_2); + } +} + +static void rtl838x_eth_tx_timeout(struct net_device *ndev, unsigned int txqueue) +{ + unsigned long flags; + struct rtl838x_eth_priv *priv = netdev_priv(ndev); + + pr_warn("%s\n", __func__); + spin_lock_irqsave(&priv->lock, flags); + rtl838x_hw_stop(priv); + rtl838x_hw_ring_setup(priv); + rtl838x_hw_en_rxtx(priv); + netif_trans_update(ndev); + netif_start_queue(ndev); + spin_unlock_irqrestore(&priv->lock, flags); +} + +static int rtl838x_eth_tx(struct sk_buff *skb, struct net_device *dev) +{ + int len, i; + struct rtl838x_eth_priv *priv = netdev_priv(dev); + struct ring_b *ring = priv->membase; + uint32_t val; + int ret; + unsigned long flags; + struct p_hdr *h; + int dest_port = -1; + int q = skb_get_queue_mapping(skb) % TXRINGS; + + if (q) // Check for high prio queue + pr_debug("SKB priority: %d\n", skb->priority); + + spin_lock_irqsave(&priv->lock, flags); + len = skb->len; + + /* Check for DSA tagging at the end of the buffer */ + if (netdev_uses_dsa(dev) && skb->data[len-4] == 0x80 + && skb->data[len-3] < priv->cpu_port + && skb->data[len-2] == 0x10 + && skb->data[len-1] == 0x00) { + /* Reuse tag space for CRC if possible */ + dest_port = skb->data[len-3]; + skb->data[len-4] = skb->data[len-3] = skb->data[len-2] = skb->data[len-1] = 0x00; + len -= 4; + } + + len += 4; // Add space for CRC + + if (skb_padto(skb, len)) { + ret = NETDEV_TX_OK; + goto txdone; + } + + /* We can send this packet if CPU owns the descriptor */ + if (!(ring->tx_r[q][ring->c_tx[q]] & 0x1)) { + + /* Set descriptor for tx */ + h = &ring->tx_header[q][ring->c_tx[q]]; + h->size = len; + h->len = len; + // On RTL8380 SoCs, small packet lengths being sent need adjustments + if (priv->family_id == RTL8380_FAMILY_ID) { + if (len < ETH_ZLEN - 4) + h->len -= 4; + } + + if (dest_port >= 0) + priv->r->create_tx_header(h, dest_port, skb->priority >> 1); + + /* Copy packet data to tx buffer */ + memcpy((void *)KSEG1ADDR(h->buf), skb->data, len); + /* Make sure packet data is visible to ASIC */ + wmb(); + + /* Hand over to switch */ + ring->tx_r[q][ring->c_tx[q]] |= 1; + + // Before starting TX, prevent a Lextra bus bug on RTL8380 SoCs + if (priv->family_id == RTL8380_FAMILY_ID) { + for (i = 0; i < 10; i++) { + val = sw_r32(priv->r->dma_if_ctrl); + if ((val & 0xc) == 0xc) + break; + } + } + + /* Tell switch to send data */ + if (priv->family_id == RTL9310_FAMILY_ID + || priv->family_id == RTL9300_FAMILY_ID) { + // Ring ID q == 0: Low priority, Ring ID = 1: High prio queue + if (!q) + sw_w32_mask(0, BIT(2), priv->r->dma_if_ctrl); + else + sw_w32_mask(0, BIT(3), priv->r->dma_if_ctrl); + } else { + sw_w32_mask(0, TX_DO, priv->r->dma_if_ctrl); + } + + dev->stats.tx_packets++; + dev->stats.tx_bytes += len; + dev_kfree_skb(skb); + ring->c_tx[q] = (ring->c_tx[q] + 1) % TXRINGLEN; + ret = NETDEV_TX_OK; + } else { + dev_warn(&priv->pdev->dev, "Data is owned by switch\n"); + ret = NETDEV_TX_BUSY; + } +txdone: + spin_unlock_irqrestore(&priv->lock, flags); + return ret; +} + +/* + * Return queue number for TX. On the RTL83XX, these queues have equal priority + * so we do round-robin + */ +u16 rtl83xx_pick_tx_queue(struct net_device *dev, struct sk_buff *skb, + struct net_device *sb_dev) +{ + static u8 last = 0; + + last++; + return last % TXRINGS; +} + +/* + * Return queue number for TX. On the RTL93XX, queue 1 is the high priority queue + */ +u16 rtl93xx_pick_tx_queue(struct net_device *dev, struct sk_buff *skb, + struct net_device *sb_dev) +{ + if (skb->priority >= TC_PRIO_CONTROL) + return 1; + return 0; +} + +static int rtl838x_hw_receive(struct net_device *dev, int r, int budget) +{ + struct rtl838x_eth_priv *priv = netdev_priv(dev); + struct ring_b *ring = priv->membase; + struct sk_buff *skb; + LIST_HEAD(rx_list); + unsigned long flags; + int i, len, work_done = 0; + u8 *data, *skb_data; + unsigned int val; + u32 *last; + struct p_hdr *h; + bool dsa = netdev_uses_dsa(dev); + struct dsa_tag tag; + + pr_debug("---------------------------------------------------------- RX - %d\n", r); + spin_lock_irqsave(&priv->lock, flags); + last = (u32 *)KSEG1ADDR(sw_r32(priv->r->dma_if_rx_cur + r * 4)); + + do { + if ((ring->rx_r[r][ring->c_rx[r]] & 0x1)) { + if (&ring->rx_r[r][ring->c_rx[r]] != last) { + netdev_warn(dev, "Ring contention: r: %x, last %x, cur %x\n", + r, (uint32_t)last, (u32) &ring->rx_r[r][ring->c_rx[r]]); + } + break; + } + + h = &ring->rx_header[r][ring->c_rx[r]]; + data = (u8 *)KSEG1ADDR(h->buf); + len = h->len; + if (!len) + break; + work_done++; + + len -= 4; /* strip the CRC */ + /* Add 4 bytes for cpu_tag */ + if (dsa) + len += 4; + + skb = netdev_alloc_skb(dev, len + 4); + skb_reserve(skb, NET_IP_ALIGN); + + if (likely(skb)) { + /* BUG: Prevent bug on RTL838x SoCs*/ + if (priv->family_id == RTL8380_FAMILY_ID) { + sw_w32(0xffffffff, priv->r->dma_if_rx_ring_size(0)); + for (i = 0; i < priv->rxrings; i++) { + /* Update each ring cnt */ + val = sw_r32(priv->r->dma_if_rx_ring_cntr(i)); + sw_w32(val, priv->r->dma_if_rx_ring_cntr(i)); + } + } + + skb_data = skb_put(skb, len); + /* Make sure data is visible */ + mb(); + memcpy(skb->data, (u8 *)KSEG1ADDR(data), len); + /* Overwrite CRC with cpu_tag */ + if (dsa) { + priv->r->decode_tag(h, &tag); + skb->data[len-4] = 0x80; + skb->data[len-3] = tag.port; + skb->data[len-2] = 0x10; + skb->data[len-1] = 0x00; + if (tag.l2_offloaded) + skb->data[len-3] |= 0x40; + } + + if (tag.queue >= 0) + pr_debug("Queue: %d, len: %d, reason %d port %d\n", + tag.queue, len, tag.reason, tag.port); + + skb->protocol = eth_type_trans(skb, dev); + if (dev->features & NETIF_F_RXCSUM) { + if (tag.crc_error) + skb_checksum_none_assert(skb); + else + skb->ip_summed = CHECKSUM_UNNECESSARY; + } + dev->stats.rx_packets++; + dev->stats.rx_bytes += len; + + list_add_tail(&skb->list, &rx_list); + } else { + if (net_ratelimit()) + dev_warn(&dev->dev, "low on memory - packet dropped\n"); + dev->stats.rx_dropped++; + } + + /* Reset header structure */ + memset(h, 0, sizeof(struct p_hdr)); + h->buf = data; + h->size = RING_BUFFER; + + ring->rx_r[r][ring->c_rx[r]] = KSEG1ADDR(h) | 0x1 + | (ring->c_rx[r] == (priv->rxringlen - 1) ? WRAP : 0x1); + ring->c_rx[r] = (ring->c_rx[r] + 1) % priv->rxringlen; + last = (u32 *)KSEG1ADDR(sw_r32(priv->r->dma_if_rx_cur + r * 4)); + } while (&ring->rx_r[r][ring->c_rx[r]] != last && work_done < budget); + + netif_receive_skb_list(&rx_list); + + // Update counters + priv->r->update_cntr(r, 0); + + spin_unlock_irqrestore(&priv->lock, flags); + + return work_done; +} + +static int rtl838x_poll_rx(struct napi_struct *napi, int budget) +{ + struct rtl838x_rx_q *rx_q = container_of(napi, struct rtl838x_rx_q, napi); + struct rtl838x_eth_priv *priv = rx_q->priv; + int work_done = 0; + int r = rx_q->id; + int work; + + while (work_done < budget) { + work = rtl838x_hw_receive(priv->netdev, r, budget - work_done); + if (!work) + break; + work_done += work; + } + + if (work_done < budget) { + napi_complete_done(napi, work_done); + + /* Enable RX interrupt */ + if (priv->family_id == RTL9300_FAMILY_ID || priv->family_id == RTL9310_FAMILY_ID) + sw_w32(0xffffffff, priv->r->dma_if_intr_rx_done_msk); + else + sw_w32_mask(0, 0xf00ff | BIT(r + 8), priv->r->dma_if_intr_msk); + } + return work_done; +} + + +static void rtl838x_validate(struct phylink_config *config, + unsigned long *supported, + struct phylink_link_state *state) +{ + __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, }; + + pr_debug("In %s\n", __func__); + + if (!phy_interface_mode_is_rgmii(state->interface) && + state->interface != PHY_INTERFACE_MODE_1000BASEX && + state->interface != PHY_INTERFACE_MODE_MII && + state->interface != PHY_INTERFACE_MODE_REVMII && + state->interface != PHY_INTERFACE_MODE_GMII && + state->interface != PHY_INTERFACE_MODE_QSGMII && + state->interface != PHY_INTERFACE_MODE_INTERNAL && + state->interface != PHY_INTERFACE_MODE_SGMII) { + bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS); + pr_err("Unsupported interface: %d\n", state->interface); + return; + } + + /* Allow all the expected bits */ + phylink_set(mask, Autoneg); + phylink_set_port_modes(mask); + phylink_set(mask, Pause); + phylink_set(mask, Asym_Pause); + + /* With the exclusion of MII and Reverse MII, we support Gigabit, + * including Half duplex + */ + if (state->interface != PHY_INTERFACE_MODE_MII && + state->interface != PHY_INTERFACE_MODE_REVMII) { + phylink_set(mask, 1000baseT_Full); + phylink_set(mask, 1000baseT_Half); + } + + phylink_set(mask, 10baseT_Half); + phylink_set(mask, 10baseT_Full); + phylink_set(mask, 100baseT_Half); + phylink_set(mask, 100baseT_Full); + + bitmap_and(supported, supported, mask, + __ETHTOOL_LINK_MODE_MASK_NBITS); + bitmap_and(state->advertising, state->advertising, mask, + __ETHTOOL_LINK_MODE_MASK_NBITS); +} + + +static void rtl838x_mac_config(struct phylink_config *config, + unsigned int mode, + const struct phylink_link_state *state) +{ + /* This is only being called for the master device, + * i.e. the CPU-Port. We don't need to do anything. + */ + + pr_info("In %s, mode %x\n", __func__, mode); +} + +static void rtl838x_mac_an_restart(struct phylink_config *config) +{ + struct net_device *dev = container_of(config->dev, struct net_device, dev); + struct rtl838x_eth_priv *priv = netdev_priv(dev); + + /* This works only on RTL838x chips */ + if (priv->family_id != RTL8380_FAMILY_ID) + return; + + pr_debug("In %s\n", __func__); + /* Restart by disabling and re-enabling link */ + sw_w32(0x6192D, priv->r->mac_force_mode_ctrl + priv->cpu_port * 4); + mdelay(20); + sw_w32(0x6192F, priv->r->mac_force_mode_ctrl + priv->cpu_port * 4); +} + +static void rtl838x_mac_pcs_get_state(struct phylink_config *config, + struct phylink_link_state *state) +{ + u32 speed; + struct net_device *dev = container_of(config->dev, struct net_device, dev); + struct rtl838x_eth_priv *priv = netdev_priv(dev); + int port = priv->cpu_port; + + pr_info("In %s\n", __func__); + + state->link = priv->r->get_mac_link_sts(port) ? 1 : 0; + state->duplex = priv->r->get_mac_link_dup_sts(port) ? 1 : 0; + + pr_info("%s link status is %d\n", __func__, state->link); + speed = priv->r->get_mac_link_spd_sts(port); + switch (speed) { + case 0: + state->speed = SPEED_10; + break; + case 1: + state->speed = SPEED_100; + break; + case 2: + state->speed = SPEED_1000; + break; + case 5: + state->speed = SPEED_2500; + break; + case 6: + state->speed = SPEED_5000; + break; + case 4: + state->speed = SPEED_10000; + break; + default: + state->speed = SPEED_UNKNOWN; + break; + } + + state->pause &= (MLO_PAUSE_RX | MLO_PAUSE_TX); + if (priv->r->get_mac_rx_pause_sts(port)) + state->pause |= MLO_PAUSE_RX; + if (priv->r->get_mac_tx_pause_sts(port)) + state->pause |= MLO_PAUSE_TX; +} + +static void rtl838x_mac_link_down(struct phylink_config *config, + unsigned int mode, + phy_interface_t interface) +{ + struct net_device *dev = container_of(config->dev, struct net_device, dev); + struct rtl838x_eth_priv *priv = netdev_priv(dev); + + pr_debug("In %s\n", __func__); + /* Stop TX/RX to port */ + sw_w32_mask(0x03, 0, priv->r->mac_port_ctrl(priv->cpu_port)); +} + +static void rtl838x_mac_link_up(struct phylink_config *config, + struct phy_device *phy, unsigned int mode, + phy_interface_t interface, int speed, int duplex, + bool tx_pause, bool rx_pause) +{ + struct net_device *dev = container_of(config->dev, struct net_device, dev); + struct rtl838x_eth_priv *priv = netdev_priv(dev); + + pr_debug("In %s\n", __func__); + /* Restart TX/RX to port */ + sw_w32_mask(0, 0x03, priv->r->mac_port_ctrl(priv->cpu_port)); +} + +static void rtl838x_set_mac_hw(struct net_device *dev, u8 *mac) +{ + struct rtl838x_eth_priv *priv = netdev_priv(dev); + unsigned long flags; + + spin_lock_irqsave(&priv->lock, flags); + pr_debug("In %s\n", __func__); + sw_w32((mac[0] << 8) | mac[1], priv->r->mac); + sw_w32((mac[2] << 24) | (mac[3] << 16) | (mac[4] << 8) | mac[5], priv->r->mac + 4); + + if (priv->family_id == RTL8380_FAMILY_ID) { + /* 2 more registers, ALE/MAC block */ + sw_w32((mac[0] << 8) | mac[1], RTL838X_MAC_ALE); + sw_w32((mac[2] << 24) | (mac[3] << 16) | (mac[4] << 8) | mac[5], + (RTL838X_MAC_ALE + 4)); + + sw_w32((mac[0] << 8) | mac[1], RTL838X_MAC2); + sw_w32((mac[2] << 24) | (mac[3] << 16) | (mac[4] << 8) | mac[5], + RTL838X_MAC2 + 4); + } + spin_unlock_irqrestore(&priv->lock, flags); +} + +static int rtl838x_set_mac_address(struct net_device *dev, void *p) +{ + struct rtl838x_eth_priv *priv = netdev_priv(dev); + const struct sockaddr *addr = p; + u8 *mac = (u8 *) (addr->sa_data); + + if (!is_valid_ether_addr(addr->sa_data)) + return -EADDRNOTAVAIL; + + memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN); + rtl838x_set_mac_hw(dev, mac); + + pr_info("Using MAC %08x%08x\n", sw_r32(priv->r->mac), sw_r32(priv->r->mac + 4)); + return 0; +} + +static int rtl8390_init_mac(struct rtl838x_eth_priv *priv) +{ + // We will need to set-up EEE and the egress-rate limitation + return 0; +} + +static int rtl8380_init_mac(struct rtl838x_eth_priv *priv) +{ + int i; + + if (priv->family_id == 0x8390) + return rtl8390_init_mac(priv); + + // At present we do not know how to set up EEE on any other SoC than RTL8380 + if (priv->family_id != 0x8380) + return 0; + + pr_info("%s\n", __func__); + /* fix timer for EEE */ + sw_w32(0x5001411, RTL838X_EEE_TX_TIMER_GIGA_CTRL); + sw_w32(0x5001417, RTL838X_EEE_TX_TIMER_GELITE_CTRL); + + /* Init VLAN. TODO: Understand what is being done, here */ + if (priv->id == 0x8382) { + for (i = 0; i <= 28; i++) + sw_w32(0, 0xd57c + i * 0x80); + } + if (priv->id == 0x8380) { + for (i = 8; i <= 28; i++) + sw_w32(0, 0xd57c + i * 0x80); + } + return 0; +} + +static int rtl838x_get_link_ksettings(struct net_device *ndev, + struct ethtool_link_ksettings *cmd) +{ + struct rtl838x_eth_priv *priv = netdev_priv(ndev); + + pr_debug("%s called\n", __func__); + return phylink_ethtool_ksettings_get(priv->phylink, cmd); +} + +static int rtl838x_set_link_ksettings(struct net_device *ndev, + const struct ethtool_link_ksettings *cmd) +{ + struct rtl838x_eth_priv *priv = netdev_priv(ndev); + + pr_debug("%s called\n", __func__); + return phylink_ethtool_ksettings_set(priv->phylink, cmd); +} + +static int rtl838x_mdio_read_paged(struct mii_bus *bus, int mii_id, u16 page, int regnum) +{ + u32 val; + int err; + struct rtl838x_eth_priv *priv = bus->priv; + + if (mii_id >= 24 && mii_id <= 27 && priv->id == 0x8380) + return rtl838x_read_sds_phy(mii_id, regnum); + + if (regnum & (MII_ADDR_C45 | MII_ADDR_C22_MMD)) { + err = rtl838x_read_mmd_phy(mii_id, + mdiobus_c45_devad(regnum), + regnum, &val); + pr_debug("MMD: %d dev %x register %x read %x, err %d\n", mii_id, + mdiobus_c45_devad(regnum), mdiobus_c45_regad(regnum), + val, err); + } else { + pr_debug("PHY: %d register %x read %x, err %d\n", mii_id, regnum, val, err); + err = rtl838x_read_phy(mii_id, page, regnum, &val); + } + if (err) + return err; + return val; +} + +static int rtl838x_mdio_read(struct mii_bus *bus, int mii_id, int regnum) +{ + return rtl838x_mdio_read_paged(bus, mii_id, 0, regnum); +} + +static int rtl839x_mdio_read_paged(struct mii_bus *bus, int mii_id, u16 page, int regnum) +{ + u32 val; + int err; + struct rtl838x_eth_priv *priv = bus->priv; + + if (mii_id >= 48 && mii_id <= 49 && priv->id == 0x8393) + return rtl839x_read_sds_phy(mii_id, regnum); + + if (regnum & (MII_ADDR_C45 | MII_ADDR_C22_MMD)) { + err = rtl839x_read_mmd_phy(mii_id, + mdiobus_c45_devad(regnum), + regnum, &val); + pr_debug("MMD: %d dev %x register %x read %x, err %d\n", mii_id, + mdiobus_c45_devad(regnum), mdiobus_c45_regad(regnum), + val, err); + } else { + err = rtl839x_read_phy(mii_id, page, regnum, &val); + pr_debug("PHY: %d register %x read %x, err %d\n", mii_id, regnum, val, err); + } + if (err) + return err; + return val; +} + +static int rtl839x_mdio_read(struct mii_bus *bus, int mii_id, int regnum) +{ + return rtl839x_mdio_read_paged(bus, mii_id, 0, regnum); +} + +static int rtl930x_mdio_read_paged(struct mii_bus *bus, int mii_id, u16 page, int regnum) +{ + u32 val; + int err; + struct rtl838x_eth_priv *priv = bus->priv; + + if (priv->phy_is_internal[mii_id]) + return rtl930x_read_sds_phy(priv->sds_id[mii_id], page, regnum); + + if (regnum & (MII_ADDR_C45 | MII_ADDR_C22_MMD)) { + err = rtl930x_read_mmd_phy(mii_id, + mdiobus_c45_devad(regnum), + regnum, &val); + pr_debug("MMD: %d dev %x register %x read %x, err %d\n", mii_id, + mdiobus_c45_devad(regnum), mdiobus_c45_regad(regnum), + val, err); + } else { + err = rtl930x_read_phy(mii_id, page, regnum, &val); + pr_debug("PHY: %d register %x read %x, err %d\n", mii_id, regnum, val, err); + } + if (err) + return err; + return val; +} + +static int rtl930x_mdio_read(struct mii_bus *bus, int mii_id, int regnum) +{ + return rtl930x_mdio_read_paged(bus, mii_id, 0, regnum); +} + +static int rtl931x_mdio_read_paged(struct mii_bus *bus, int mii_id, u16 page, int regnum) +{ + u32 val; + int err, v; + struct rtl838x_eth_priv *priv = bus->priv; + + pr_debug("%s: In here, port %d\n", __func__, mii_id); + if (priv->phy_is_internal[mii_id]) { + v = rtl931x_read_sds_phy(priv->sds_id[mii_id], page, regnum); + if (v < 0) { + err = v; + } else { + err = 0; + val = v; + } + } else { + if (regnum & (MII_ADDR_C45 | MII_ADDR_C22_MMD)) { + err = rtl931x_read_mmd_phy(mii_id, + mdiobus_c45_devad(regnum), + regnum, &val); + pr_debug("MMD: %d dev %x register %x read %x, err %d\n", mii_id, + mdiobus_c45_devad(regnum), mdiobus_c45_regad(regnum), + val, err); + } else { + err = rtl931x_read_phy(mii_id, page, regnum, &val); + pr_debug("PHY: %d register %x read %x, err %d\n", mii_id, regnum, val, err); + } + } + + if (err) + return err; + return val; +} + +static int rtl931x_mdio_read(struct mii_bus *bus, int mii_id, int regnum) +{ + return rtl931x_mdio_read_paged(bus, mii_id, 0, regnum); +} + +static int rtl838x_mdio_write_paged(struct mii_bus *bus, int mii_id, u16 page, + int regnum, u16 value) +{ + u32 offset = 0; + struct rtl838x_eth_priv *priv = bus->priv; + int err; + + if (mii_id >= 24 && mii_id <= 27 && priv->id == 0x8380) { + if (mii_id == 26) + offset = 0x100; + sw_w32(value, RTL838X_SDS4_FIB_REG0 + offset + (regnum << 2)); + return 0; + } + + if (regnum & (MII_ADDR_C45 | MII_ADDR_C22_MMD)) { + err = rtl838x_write_mmd_phy(mii_id, mdiobus_c45_devad(regnum), + regnum, value); + pr_debug("MMD: %d dev %x register %x write %x, err %d\n", mii_id, + mdiobus_c45_devad(regnum), mdiobus_c45_regad(regnum), + value, err); + + return err; + } + err = rtl838x_write_phy(mii_id, page, regnum, value); + pr_debug("PHY: %d register %x write %x, err %d\n", mii_id, regnum, value, err); + return err; +} + +static int rtl838x_mdio_write(struct mii_bus *bus, int mii_id, + int regnum, u16 value) +{ + return rtl838x_mdio_write_paged(bus, mii_id, 0, regnum, value); +} + +static int rtl839x_mdio_write_paged(struct mii_bus *bus, int mii_id, u16 page, + int regnum, u16 value) +{ + struct rtl838x_eth_priv *priv = bus->priv; + int err; + + if (mii_id >= 48 && mii_id <= 49 && priv->id == 0x8393) + return rtl839x_write_sds_phy(mii_id, regnum, value); + + if (regnum & (MII_ADDR_C45 | MII_ADDR_C22_MMD)) { + err = rtl839x_write_mmd_phy(mii_id, mdiobus_c45_devad(regnum), + regnum, value); + pr_debug("MMD: %d dev %x register %x write %x, err %d\n", mii_id, + mdiobus_c45_devad(regnum), mdiobus_c45_regad(regnum), + value, err); + + return err; + } + + err = rtl839x_write_phy(mii_id, page, regnum, value); + pr_debug("PHY: %d register %x write %x, err %d\n", mii_id, regnum, value, err); + return err; +} + +static int rtl839x_mdio_write(struct mii_bus *bus, int mii_id, + int regnum, u16 value) +{ + return rtl839x_mdio_write_paged(bus, mii_id, 0, regnum, value); +} + +static int rtl930x_mdio_write_paged(struct mii_bus *bus, int mii_id, u16 page, + int regnum, u16 value) +{ + struct rtl838x_eth_priv *priv = bus->priv; + int err; + + if (priv->phy_is_internal[mii_id]) + return rtl930x_write_sds_phy(priv->sds_id[mii_id], page, regnum, value); + + if (regnum & (MII_ADDR_C45 | MII_ADDR_C22_MMD)) + return rtl930x_write_mmd_phy(mii_id, mdiobus_c45_devad(regnum), + regnum, value); + + err = rtl930x_write_phy(mii_id, page, regnum, value); + pr_debug("PHY: %d register %x write %x, err %d\n", mii_id, regnum, value, err); + return err; +} + +static int rtl930x_mdio_write(struct mii_bus *bus, int mii_id, + int regnum, u16 value) +{ + return rtl930x_mdio_write_paged(bus, mii_id, 0, regnum, value); +} + +static int rtl931x_mdio_write_paged(struct mii_bus *bus, int mii_id, u16 page, + int regnum, u16 value) +{ + struct rtl838x_eth_priv *priv = bus->priv; + int err; + + if (priv->phy_is_internal[mii_id]) + return rtl931x_write_sds_phy(priv->sds_id[mii_id], page, regnum, value); + + if (regnum & (MII_ADDR_C45 | MII_ADDR_C22_MMD)) { + err = rtl931x_write_mmd_phy(mii_id, mdiobus_c45_devad(regnum), + regnum, value); + pr_debug("MMD: %d dev %x register %x write %x, err %d\n", mii_id, + mdiobus_c45_devad(regnum), mdiobus_c45_regad(regnum), + value, err); + + return err; + } + + err = rtl931x_write_phy(mii_id, page, regnum, value); + pr_debug("PHY: %d register %x write %x, err %d\n", mii_id, regnum, value, err); + return err; +} + +static int rtl931x_mdio_write(struct mii_bus *bus, int mii_id, + int regnum, u16 value) +{ + return rtl931x_mdio_write_paged(bus, mii_id, 0, regnum, value); +} + +static int rtl838x_mdio_reset(struct mii_bus *bus) +{ + pr_debug("%s called\n", __func__); + /* Disable MAC polling the PHY so that we can start configuration */ + sw_w32(0x00000000, RTL838X_SMI_POLL_CTRL); + + /* Enable PHY control via SoC */ + sw_w32_mask(0, 1 << 15, RTL838X_SMI_GLB_CTRL); + + // Probably should reset all PHYs here... + return 0; +} + +static int rtl839x_mdio_reset(struct mii_bus *bus) +{ + return 0; + + pr_debug("%s called\n", __func__); + /* BUG: The following does not work, but should! */ + /* Disable MAC polling the PHY so that we can start configuration */ + sw_w32(0x00000000, RTL839X_SMI_PORT_POLLING_CTRL); + sw_w32(0x00000000, RTL839X_SMI_PORT_POLLING_CTRL + 4); + /* Disable PHY polling via SoC */ + sw_w32_mask(1 << 7, 0, RTL839X_SMI_GLB_CTRL); + + // Probably should reset all PHYs here... + return 0; +} + +u8 mac_type_bit[RTL930X_CPU_PORT] = {0, 0, 0, 0, 2, 2, 2, 2, 4, 4, 4, 4, 6, 6, 6, 6, + 8, 8, 8, 8, 10, 10, 10, 10, 12, 15, 18, 21}; + +static int rtl930x_mdio_reset(struct mii_bus *bus) +{ + int i; + int pos; + struct rtl838x_eth_priv *priv = bus->priv; + u32 c45_mask = 0; + u32 poll_sel[2]; + u32 poll_ctrl = 0; + u32 private_poll_mask = 0; + u32 v; + bool uses_usxgmii = false; // For the Aquantia PHYs + bool uses_hisgmii = false; // For the RTL8221/8226 + + // Mapping of port to phy-addresses on an SMI bus + poll_sel[0] = poll_sel[1] = 0; + for (i = 0; i < RTL930X_CPU_PORT; i++) { + if (priv->smi_bus[i] > 3) + continue; + pos = (i % 6) * 5; + sw_w32_mask(0x1f << pos, priv->smi_addr[i] << pos, + RTL930X_SMI_PORT0_5_ADDR + (i / 6) * 4); + + pos = (i * 2) % 32; + poll_sel[i / 16] |= priv->smi_bus[i] << pos; + poll_ctrl |= BIT(20 + priv->smi_bus[i]); + } + + // Configure which SMI bus is behind which port number + sw_w32(poll_sel[0], RTL930X_SMI_PORT0_15_POLLING_SEL); + sw_w32(poll_sel[1], RTL930X_SMI_PORT16_27_POLLING_SEL); + + // Disable POLL_SEL for any SMI bus with a normal PHY (not RTL8295R for SFP+) + sw_w32_mask(poll_ctrl, 0, RTL930X_SMI_GLB_CTRL); + + // Configure which SMI busses are polled in c45 based on a c45 PHY being on that bus + for (i = 0; i < 4; i++) + if (priv->smi_bus_isc45[i]) + c45_mask |= BIT(i + 16); + + pr_info("c45_mask: %08x\n", c45_mask); + sw_w32_mask(0, c45_mask, RTL930X_SMI_GLB_CTRL); + + // Set the MAC type of each port according to the PHY-interface + // Values are FE: 2, GE: 3, XGE/2.5G: 0(SERDES) or 1(otherwise), SXGE: 0 + v = 0; + for (i = 0; i < RTL930X_CPU_PORT; i++) { + switch (priv->interfaces[i]) { + case PHY_INTERFACE_MODE_10GBASER: + break; // Serdes: Value = 0 + + case PHY_INTERFACE_MODE_HSGMII: + private_poll_mask |= BIT(i); + // fallthrough + case PHY_INTERFACE_MODE_USXGMII: + v |= BIT(mac_type_bit[i]); + uses_usxgmii = true; + break; + + case PHY_INTERFACE_MODE_QSGMII: + private_poll_mask |= BIT(i); + v |= 3 << mac_type_bit[i]; + break; + + default: + break; + } + } + sw_w32(v, RTL930X_SMI_MAC_TYPE_CTRL); + + // Set the private polling mask for all Realtek PHYs (i.e. not the 10GBit Aquantia ones) + sw_w32(private_poll_mask, RTL930X_SMI_PRVTE_POLLING_CTRL); + + /* The following magic values are found in the port configuration, they seem to + * define different ways of polling a PHY. The below is for the Aquantia PHYs of + * the XGS1250 and the RTL8226 of the XGS1210 */ + if (uses_usxgmii) { + sw_w32(0x01010000, RTL930X_SMI_10GPHY_POLLING_REG0_CFG); + sw_w32(0x01E7C400, RTL930X_SMI_10GPHY_POLLING_REG9_CFG); + sw_w32(0x01E7E820, RTL930X_SMI_10GPHY_POLLING_REG10_CFG); + } + if (uses_hisgmii) { + sw_w32(0x011FA400, RTL930X_SMI_10GPHY_POLLING_REG0_CFG); + sw_w32(0x013FA412, RTL930X_SMI_10GPHY_POLLING_REG9_CFG); + sw_w32(0x017FA414, RTL930X_SMI_10GPHY_POLLING_REG10_CFG); + } + + pr_debug("%s: RTL930X_SMI_GLB_CTRL %08x\n", __func__, + sw_r32(RTL930X_SMI_GLB_CTRL)); + pr_debug("%s: RTL930X_SMI_PORT0_15_POLLING_SEL %08x\n", __func__, + sw_r32(RTL930X_SMI_PORT0_15_POLLING_SEL)); + pr_debug("%s: RTL930X_SMI_PORT16_27_POLLING_SEL %08x\n", __func__, + sw_r32(RTL930X_SMI_PORT16_27_POLLING_SEL)); + pr_debug("%s: RTL930X_SMI_MAC_TYPE_CTRL %08x\n", __func__, + sw_r32(RTL930X_SMI_MAC_TYPE_CTRL)); + pr_debug("%s: RTL930X_SMI_10GPHY_POLLING_REG0_CFG %08x\n", __func__, + sw_r32(RTL930X_SMI_10GPHY_POLLING_REG0_CFG)); + pr_debug("%s: RTL930X_SMI_10GPHY_POLLING_REG9_CFG %08x\n", __func__, + sw_r32(RTL930X_SMI_10GPHY_POLLING_REG9_CFG)); + pr_debug("%s: RTL930X_SMI_10GPHY_POLLING_REG10_CFG %08x\n", __func__, + sw_r32(RTL930X_SMI_10GPHY_POLLING_REG10_CFG)); + pr_debug("%s: RTL930X_SMI_PRVTE_POLLING_CTRL %08x\n", __func__, + sw_r32(RTL930X_SMI_PRVTE_POLLING_CTRL)); + return 0; +} + +static int rtl931x_mdio_reset(struct mii_bus *bus) +{ + int i; + int pos; + struct rtl838x_eth_priv *priv = bus->priv; + u32 c45_mask = 0; + u32 poll_sel[4]; + u32 poll_ctrl = 0; + bool mdc_on[4]; + + pr_info("%s called\n", __func__); + // Disable port polling for configuration purposes + sw_w32(0, RTL931X_SMI_PORT_POLLING_CTRL); + sw_w32(0, RTL931X_SMI_PORT_POLLING_CTRL + 4); + msleep(100); + + mdc_on[0] = mdc_on[1] = mdc_on[2] = mdc_on[3] = false; + // Mapping of port to phy-addresses on an SMI bus + poll_sel[0] = poll_sel[1] = poll_sel[2] = poll_sel[3] = 0; + for (i = 0; i < 56; i++) { + pos = (i % 6) * 5; + sw_w32_mask(0x1f << pos, priv->smi_addr[i] << pos, RTL931X_SMI_PORT_ADDR + (i / 6) * 4); + pos = (i * 2) % 32; + poll_sel[i / 16] |= priv->smi_bus[i] << pos; + poll_ctrl |= BIT(20 + priv->smi_bus[i]); + mdc_on[priv->smi_bus[i]] = true; + } + + // Configure which SMI bus is behind which port number + for (i = 0; i < 4; i++) { + pr_info("poll sel %d, %08x\n", i, poll_sel[i]); + sw_w32(poll_sel[i], RTL931X_SMI_PORT_POLLING_SEL + (i * 4)); + } + + // Configure which SMI busses + pr_info("%s: WAS RTL931X_MAC_L2_GLOBAL_CTRL2 %08x\n", __func__, sw_r32(RTL931X_MAC_L2_GLOBAL_CTRL2)); + pr_info("c45_mask: %08x, RTL931X_SMI_GLB_CTRL0 was %X", c45_mask, sw_r32(RTL931X_SMI_GLB_CTRL0)); + for (i = 0; i < 4; i++) { + // bus is polled in c45 + if (priv->smi_bus_isc45[i]) + c45_mask |= 0x2 << (i * 2); // Std. C45, non-standard is 0x3 + // Enable bus access via MDC + if (mdc_on[i]) + sw_w32_mask(0, BIT(9 + i), RTL931X_MAC_L2_GLOBAL_CTRL2); + } + + pr_info("%s: RTL931X_MAC_L2_GLOBAL_CTRL2 %08x\n", __func__, sw_r32(RTL931X_MAC_L2_GLOBAL_CTRL2)); + pr_info("c45_mask: %08x, RTL931X_SMI_GLB_CTRL0 was %X", c45_mask, sw_r32(RTL931X_SMI_GLB_CTRL0)); + + /* We have a 10G PHY enable polling + sw_w32(0x01010000, RTL931X_SMI_10GPHY_POLLING_SEL2); + sw_w32(0x01E7C400, RTL931X_SMI_10GPHY_POLLING_SEL3); + sw_w32(0x01E7E820, RTL931X_SMI_10GPHY_POLLING_SEL4); +*/ + sw_w32_mask(0xff, c45_mask, RTL931X_SMI_GLB_CTRL1); + + return 0; +} + +static int rtl931x_chip_init(struct rtl838x_eth_priv *priv) +{ + pr_info("In %s\n", __func__); + + // Initialize Encapsulation memory and wait until finished + sw_w32(0x1, RTL931X_MEM_ENCAP_INIT); + do { } while (sw_r32(RTL931X_MEM_ENCAP_INIT) & 1); + pr_info("%s: init ENCAP done\n", __func__); + + // Initialize Managemen Information Base memory and wait until finished + sw_w32(0x1, RTL931X_MEM_MIB_INIT); + do { } while (sw_r32(RTL931X_MEM_MIB_INIT) & 1); + pr_info("%s: init MIB done\n", __func__); + + // Initialize ACL (PIE) memory and wait until finished + sw_w32(0x1, RTL931X_MEM_ACL_INIT); + do { } while (sw_r32(RTL931X_MEM_ACL_INIT) & 1); + pr_info("%s: init ACL done\n", __func__); + + // Initialize ALE memory and wait until finished + sw_w32(0xFFFFFFFF, RTL931X_MEM_ALE_INIT_0); + do { } while (sw_r32(RTL931X_MEM_ALE_INIT_0)); + sw_w32(0x7F, RTL931X_MEM_ALE_INIT_1); + sw_w32(0x7ff, RTL931X_MEM_ALE_INIT_2); + do { } while (sw_r32(RTL931X_MEM_ALE_INIT_2) & 0x7ff); + pr_info("%s: init ALE done\n", __func__); + + // Enable ESD auto recovery + sw_w32(0x1, RTL931X_MDX_CTRL_RSVD); + + // Init SPI, is this for thermal control or what? + sw_w32_mask(0x7 << 11, 0x2 << 11, RTL931X_SPI_CTRL0); + + return 0; +} + +static int rtl838x_mdio_init(struct rtl838x_eth_priv *priv) +{ + struct device_node *mii_np, *dn; + u32 pn; + int ret; + + pr_debug("%s called\n", __func__); + mii_np = of_get_child_by_name(priv->pdev->dev.of_node, "mdio-bus"); + + if (!mii_np) { + dev_err(&priv->pdev->dev, "no %s child node found", "mdio-bus"); + return -ENODEV; + } + + if (!of_device_is_available(mii_np)) { + ret = -ENODEV; + goto err_put_node; + } + + priv->mii_bus = devm_mdiobus_alloc(&priv->pdev->dev); + if (!priv->mii_bus) { + ret = -ENOMEM; + goto err_put_node; + } + + switch(priv->family_id) { + case RTL8380_FAMILY_ID: + priv->mii_bus->name = "rtl838x-eth-mdio"; + priv->mii_bus->read = rtl838x_mdio_read; + priv->mii_bus->read_paged = rtl838x_mdio_read_paged; + priv->mii_bus->write = rtl838x_mdio_write; + priv->mii_bus->write_paged = rtl838x_mdio_write_paged; + priv->mii_bus->reset = rtl838x_mdio_reset; + break; + case RTL8390_FAMILY_ID: + priv->mii_bus->name = "rtl839x-eth-mdio"; + priv->mii_bus->read = rtl839x_mdio_read; + priv->mii_bus->read_paged = rtl839x_mdio_read_paged; + priv->mii_bus->write = rtl839x_mdio_write; + priv->mii_bus->write_paged = rtl839x_mdio_write_paged; + priv->mii_bus->reset = rtl839x_mdio_reset; + break; + case RTL9300_FAMILY_ID: + priv->mii_bus->name = "rtl930x-eth-mdio"; + priv->mii_bus->read = rtl930x_mdio_read; + priv->mii_bus->read_paged = rtl930x_mdio_read_paged; + priv->mii_bus->write = rtl930x_mdio_write; + priv->mii_bus->write_paged = rtl930x_mdio_write_paged; + priv->mii_bus->reset = rtl930x_mdio_reset; + priv->mii_bus->probe_capabilities = MDIOBUS_C22_C45; + break; + case RTL9310_FAMILY_ID: + priv->mii_bus->name = "rtl931x-eth-mdio"; + priv->mii_bus->read = rtl931x_mdio_read; + priv->mii_bus->read_paged = rtl931x_mdio_read_paged; + priv->mii_bus->write = rtl931x_mdio_write; + priv->mii_bus->write_paged = rtl931x_mdio_write_paged; + priv->mii_bus->reset = rtl931x_mdio_reset; + priv->mii_bus->probe_capabilities = MDIOBUS_C22_C45; + break; + } + priv->mii_bus->access_capabilities = MDIOBUS_ACCESS_C22_MMD; + priv->mii_bus->priv = priv; + priv->mii_bus->parent = &priv->pdev->dev; + + for_each_node_by_name(dn, "ethernet-phy") { + u32 smi_addr[2]; + + if (of_property_read_u32(dn, "reg", &pn)) + continue; + + if (of_property_read_u32_array(dn, "rtl9300,smi-address", &smi_addr[0], 2)) { + smi_addr[0] = 0; + smi_addr[1] = pn; + } + + if (of_property_read_u32(dn, "sds", &priv->sds_id[pn])) + priv->sds_id[pn] = -1; + else { + pr_info("set sds port %d to %d\n", pn, priv->sds_id[pn]); + } + + if (pn < MAX_PORTS) { + priv->smi_bus[pn] = smi_addr[0]; + priv->smi_addr[pn] = smi_addr[1]; + } else { + pr_err("%s: illegal port number %d\n", __func__, pn); + } + + if (of_device_is_compatible(dn, "ethernet-phy-ieee802.3-c45")) + priv->smi_bus_isc45[smi_addr[0]] = true; + + if (of_property_read_bool(dn, "phy-is-integrated")) { + priv->phy_is_internal[pn] = true; + } + } + + dn = of_find_compatible_node(NULL, NULL, "realtek,rtl83xx-switch"); + if (!dn) { + dev_err(&priv->pdev->dev, "No RTL switch node in DTS\n"); + return -ENODEV; + } + + for_each_node_by_name(dn, "port") { + if (of_property_read_u32(dn, "reg", &pn)) + continue; + pr_debug("%s Looking at port %d\n", __func__, pn); + if (pn > priv->cpu_port) + continue; + if (of_get_phy_mode(dn, &priv->interfaces[pn])) + priv->interfaces[pn] = PHY_INTERFACE_MODE_NA; + pr_debug("%s phy mode of port %d is %s\n", __func__, pn, phy_modes(priv->interfaces[pn])); + } + + snprintf(priv->mii_bus->id, MII_BUS_ID_SIZE, "%pOFn", mii_np); + ret = of_mdiobus_register(priv->mii_bus, mii_np); + +err_put_node: + of_node_put(mii_np); + return ret; +} + +static int rtl838x_mdio_remove(struct rtl838x_eth_priv *priv) +{ + pr_debug("%s called\n", __func__); + if (!priv->mii_bus) + return 0; + + mdiobus_unregister(priv->mii_bus); + mdiobus_free(priv->mii_bus); + + return 0; +} + +static netdev_features_t rtl838x_fix_features(struct net_device *dev, + netdev_features_t features) +{ + return features; +} + +static int rtl83xx_set_features(struct net_device *dev, netdev_features_t features) +{ + struct rtl838x_eth_priv *priv = netdev_priv(dev); + + if ((features ^ dev->features) & NETIF_F_RXCSUM) { + if (!(features & NETIF_F_RXCSUM)) + sw_w32_mask(BIT(3), 0, priv->r->mac_port_ctrl(priv->cpu_port)); + else + sw_w32_mask(0, BIT(3), priv->r->mac_port_ctrl(priv->cpu_port)); + } + + return 0; +} + +static int rtl93xx_set_features(struct net_device *dev, netdev_features_t features) +{ + struct rtl838x_eth_priv *priv = netdev_priv(dev); + + if ((features ^ dev->features) & NETIF_F_RXCSUM) { + if (!(features & NETIF_F_RXCSUM)) + sw_w32_mask(BIT(4), 0, priv->r->mac_port_ctrl(priv->cpu_port)); + else + sw_w32_mask(0, BIT(4), priv->r->mac_port_ctrl(priv->cpu_port)); + } + + return 0; +} + +static const struct net_device_ops rtl838x_eth_netdev_ops = { + .ndo_open = rtl838x_eth_open, + .ndo_stop = rtl838x_eth_stop, + .ndo_start_xmit = rtl838x_eth_tx, + .ndo_select_queue = rtl83xx_pick_tx_queue, + .ndo_set_mac_address = rtl838x_set_mac_address, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_rx_mode = rtl838x_eth_set_multicast_list, + .ndo_tx_timeout = rtl838x_eth_tx_timeout, + .ndo_set_features = rtl83xx_set_features, + .ndo_fix_features = rtl838x_fix_features, + .ndo_setup_tc = rtl83xx_setup_tc, +}; + +static const struct net_device_ops rtl839x_eth_netdev_ops = { + .ndo_open = rtl838x_eth_open, + .ndo_stop = rtl838x_eth_stop, + .ndo_start_xmit = rtl838x_eth_tx, + .ndo_select_queue = rtl83xx_pick_tx_queue, + .ndo_set_mac_address = rtl838x_set_mac_address, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_rx_mode = rtl839x_eth_set_multicast_list, + .ndo_tx_timeout = rtl838x_eth_tx_timeout, + .ndo_set_features = rtl83xx_set_features, + .ndo_fix_features = rtl838x_fix_features, + .ndo_setup_tc = rtl83xx_setup_tc, +}; + +static const struct net_device_ops rtl930x_eth_netdev_ops = { + .ndo_open = rtl838x_eth_open, + .ndo_stop = rtl838x_eth_stop, + .ndo_start_xmit = rtl838x_eth_tx, + .ndo_select_queue = rtl93xx_pick_tx_queue, + .ndo_set_mac_address = rtl838x_set_mac_address, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_rx_mode = rtl930x_eth_set_multicast_list, + .ndo_tx_timeout = rtl838x_eth_tx_timeout, + .ndo_set_features = rtl93xx_set_features, + .ndo_fix_features = rtl838x_fix_features, + .ndo_setup_tc = rtl83xx_setup_tc, +}; + +static const struct net_device_ops rtl931x_eth_netdev_ops = { + .ndo_open = rtl838x_eth_open, + .ndo_stop = rtl838x_eth_stop, + .ndo_start_xmit = rtl838x_eth_tx, + .ndo_select_queue = rtl93xx_pick_tx_queue, + .ndo_set_mac_address = rtl838x_set_mac_address, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_rx_mode = rtl931x_eth_set_multicast_list, + .ndo_tx_timeout = rtl838x_eth_tx_timeout, + .ndo_set_features = rtl93xx_set_features, + .ndo_fix_features = rtl838x_fix_features, +}; + +static const struct phylink_mac_ops rtl838x_phylink_ops = { + .validate = rtl838x_validate, + .mac_pcs_get_state = rtl838x_mac_pcs_get_state, + .mac_an_restart = rtl838x_mac_an_restart, + .mac_config = rtl838x_mac_config, + .mac_link_down = rtl838x_mac_link_down, + .mac_link_up = rtl838x_mac_link_up, +}; + +static const struct ethtool_ops rtl838x_ethtool_ops = { + .get_link_ksettings = rtl838x_get_link_ksettings, + .set_link_ksettings = rtl838x_set_link_ksettings, +}; + +static int __init rtl838x_eth_probe(struct platform_device *pdev) +{ + struct net_device *dev; + struct device_node *dn = pdev->dev.of_node; + struct rtl838x_eth_priv *priv; + struct resource *res, *mem; + phy_interface_t phy_mode; + struct phylink *phylink; + int err = 0, i, rxrings, rxringlen; + struct ring_b *ring; + + pr_info("Probing RTL838X eth device pdev: %x, dev: %x\n", + (u32)pdev, (u32)(&(pdev->dev))); + + if (!dn) { + dev_err(&pdev->dev, "No DT found\n"); + return -EINVAL; + } + + rxrings = (soc_info.family == RTL8380_FAMILY_ID + || soc_info.family == RTL8390_FAMILY_ID) ? 8 : 32; + rxrings = rxrings > MAX_RXRINGS ? MAX_RXRINGS : rxrings; + rxringlen = MAX_ENTRIES / rxrings; + rxringlen = rxringlen > MAX_RXLEN ? MAX_RXLEN : rxringlen; + + dev = alloc_etherdev_mqs(sizeof(struct rtl838x_eth_priv), TXRINGS, rxrings); + if (!dev) { + err = -ENOMEM; + goto err_free; + } + SET_NETDEV_DEV(dev, &pdev->dev); + priv = netdev_priv(dev); + + /* obtain buffer memory space */ + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (res) { + mem = devm_request_mem_region(&pdev->dev, res->start, + resource_size(res), res->name); + if (!mem) { + dev_err(&pdev->dev, "cannot request memory space\n"); + err = -ENXIO; + goto err_free; + } + + dev->mem_start = mem->start; + dev->mem_end = mem->end; + } else { + dev_err(&pdev->dev, "cannot request IO resource\n"); + err = -ENXIO; + goto err_free; + } + + /* Allocate buffer memory */ + priv->membase = dmam_alloc_coherent(&pdev->dev, rxrings * rxringlen * RING_BUFFER + + sizeof(struct ring_b) + sizeof(struct notify_b), + (void *)&dev->mem_start, GFP_KERNEL); + if (!priv->membase) { + dev_err(&pdev->dev, "cannot allocate DMA buffer\n"); + err = -ENOMEM; + goto err_free; + } + + // Allocate ring-buffer space at the end of the allocated memory + ring = priv->membase; + ring->rx_space = priv->membase + sizeof(struct ring_b) + sizeof(struct notify_b); + + spin_lock_init(&priv->lock); + + dev->ethtool_ops = &rtl838x_ethtool_ops; + dev->min_mtu = ETH_ZLEN; + dev->max_mtu = 1536; + dev->features = NETIF_F_RXCSUM | NETIF_F_HW_CSUM; + dev->hw_features = NETIF_F_RXCSUM; + + priv->id = soc_info.id; + priv->family_id = soc_info.family; + if (priv->id) { + pr_info("Found SoC ID: %4x: %s, family %x\n", + priv->id, soc_info.name, priv->family_id); + } else { + pr_err("Unknown chip id (%04x)\n", priv->id); + return -ENODEV; + } + + switch (priv->family_id) { + case RTL8380_FAMILY_ID: + priv->cpu_port = RTL838X_CPU_PORT; + priv->r = &rtl838x_reg; + dev->netdev_ops = &rtl838x_eth_netdev_ops; + break; + case RTL8390_FAMILY_ID: + priv->cpu_port = RTL839X_CPU_PORT; + priv->r = &rtl839x_reg; + dev->netdev_ops = &rtl839x_eth_netdev_ops; + break; + case RTL9300_FAMILY_ID: + priv->cpu_port = RTL930X_CPU_PORT; + priv->r = &rtl930x_reg; + dev->netdev_ops = &rtl930x_eth_netdev_ops; + break; + case RTL9310_FAMILY_ID: + priv->cpu_port = RTL931X_CPU_PORT; + priv->r = &rtl931x_reg; + dev->netdev_ops = &rtl931x_eth_netdev_ops; + rtl931x_chip_init(priv); + break; + default: + pr_err("Unknown SoC family\n"); + return -ENODEV; + } + priv->rxringlen = rxringlen; + priv->rxrings = rxrings; + + /* Obtain device IRQ number */ + dev->irq = platform_get_irq(pdev, 0); + if (dev->irq < 0) { + dev_err(&pdev->dev, "cannot obtain network-device IRQ\n"); + goto err_free; + } + + err = devm_request_irq(&pdev->dev, dev->irq, priv->r->net_irq, + IRQF_SHARED, dev->name, dev); + if (err) { + dev_err(&pdev->dev, "%s: could not acquire interrupt: %d\n", + __func__, err); + goto err_free; + } + + rtl8380_init_mac(priv); + + /* try to get mac address in the following order: + * 1) from device tree data + * 2) from internal registers set by bootloader + */ + of_get_mac_address(pdev->dev.of_node, dev->dev_addr); + if (is_valid_ether_addr(dev->dev_addr)) { + rtl838x_set_mac_hw(dev, (u8 *)dev->dev_addr); + } else { + dev->dev_addr[0] = (sw_r32(priv->r->mac) >> 8) & 0xff; + dev->dev_addr[1] = sw_r32(priv->r->mac) & 0xff; + dev->dev_addr[2] = (sw_r32(priv->r->mac + 4) >> 24) & 0xff; + dev->dev_addr[3] = (sw_r32(priv->r->mac + 4) >> 16) & 0xff; + dev->dev_addr[4] = (sw_r32(priv->r->mac + 4) >> 8) & 0xff; + dev->dev_addr[5] = sw_r32(priv->r->mac + 4) & 0xff; + } + /* if the address is invalid, use a random value */ + if (!is_valid_ether_addr(dev->dev_addr)) { + struct sockaddr sa = { AF_UNSPEC }; + + netdev_warn(dev, "Invalid MAC address, using random\n"); + eth_hw_addr_random(dev); + memcpy(sa.sa_data, dev->dev_addr, ETH_ALEN); + if (rtl838x_set_mac_address(dev, &sa)) + netdev_warn(dev, "Failed to set MAC address.\n"); + } + pr_info("Using MAC %08x%08x\n", sw_r32(priv->r->mac), + sw_r32(priv->r->mac + 4)); + strcpy(dev->name, "eth%d"); + priv->pdev = pdev; + priv->netdev = dev; + + err = rtl838x_mdio_init(priv); + if (err) + goto err_free; + + err = register_netdev(dev); + if (err) + goto err_free; + + for (i = 0; i < priv->rxrings; i++) { + priv->rx_qs[i].id = i; + priv->rx_qs[i].priv = priv; + netif_napi_add(dev, &priv->rx_qs[i].napi, rtl838x_poll_rx, 64); + } + + platform_set_drvdata(pdev, dev); + + phy_mode = PHY_INTERFACE_MODE_NA; + err = of_get_phy_mode(dn, &phy_mode); + if (err < 0) { + dev_err(&pdev->dev, "incorrect phy-mode\n"); + err = -EINVAL; + goto err_free; + } + priv->phylink_config.dev = &dev->dev; + priv->phylink_config.type = PHYLINK_NETDEV; + + phylink = phylink_create(&priv->phylink_config, pdev->dev.fwnode, + phy_mode, &rtl838x_phylink_ops); + + if (IS_ERR(phylink)) { + err = PTR_ERR(phylink); + goto err_free; + } + priv->phylink = phylink; + + return 0; + +err_free: + pr_err("Error setting up netdev, freeing it again.\n"); + free_netdev(dev); + return err; +} + +static int rtl838x_eth_remove(struct platform_device *pdev) +{ + struct net_device *dev = platform_get_drvdata(pdev); + struct rtl838x_eth_priv *priv = netdev_priv(dev); + int i; + + if (dev) { + pr_info("Removing platform driver for rtl838x-eth\n"); + rtl838x_mdio_remove(priv); + rtl838x_hw_stop(priv); + + netif_tx_stop_all_queues(dev); + + for (i = 0; i < priv->rxrings; i++) + netif_napi_del(&priv->rx_qs[i].napi); + + unregister_netdev(dev); + free_netdev(dev); + } + return 0; +} + +static const struct of_device_id rtl838x_eth_of_ids[] = { + { .compatible = "realtek,rtl838x-eth"}, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, rtl838x_eth_of_ids); + +static struct platform_driver rtl838x_eth_driver = { + .probe = rtl838x_eth_probe, + .remove = rtl838x_eth_remove, + .driver = { + .name = "rtl838x-eth", + .pm = NULL, + .of_match_table = rtl838x_eth_of_ids, + }, +}; + +module_platform_driver(rtl838x_eth_driver); + +MODULE_AUTHOR("B. Koblitz"); +MODULE_DESCRIPTION("RTL838X SoC Ethernet Driver"); +MODULE_LICENSE("GPL"); diff --git a/target/linux/realtek/files-5.15/drivers/net/ethernet/rtl838x_eth.h b/target/linux/realtek/files-5.15/drivers/net/ethernet/rtl838x_eth.h new file mode 100644 index 0000000000..d00d11d0c8 --- /dev/null +++ b/target/linux/realtek/files-5.15/drivers/net/ethernet/rtl838x_eth.h @@ -0,0 +1,457 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ + +#ifndef _RTL838X_ETH_H +#define _RTL838X_ETH_H + +/* + * Register definition + */ + +/* Per port MAC control */ +#define RTL838X_MAC_PORT_CTRL (0xd560) +#define RTL839X_MAC_PORT_CTRL (0x8004) +#define RTL930X_MAC_L2_PORT_CTRL (0x3268) +#define RTL930X_MAC_PORT_CTRL (0x3260) +#define RTL931X_MAC_L2_PORT_CTRL (0x6000) +#define RTL931X_MAC_PORT_CTRL (0x6004) + +/* DMA interrupt control and status registers */ +#define RTL838X_DMA_IF_CTRL (0x9f58) +#define RTL838X_DMA_IF_INTR_STS (0x9f54) +#define RTL838X_DMA_IF_INTR_MSK (0x9f50) + +#define RTL839X_DMA_IF_CTRL (0x786c) +#define RTL839X_DMA_IF_INTR_STS (0x7868) +#define RTL839X_DMA_IF_INTR_MSK (0x7864) + +#define RTL930X_DMA_IF_CTRL (0xe028) +#define RTL930X_DMA_IF_INTR_RX_RUNOUT_STS (0xe01C) +#define RTL930X_DMA_IF_INTR_RX_DONE_STS (0xe020) +#define RTL930X_DMA_IF_INTR_TX_DONE_STS (0xe024) +#define RTL930X_DMA_IF_INTR_RX_RUNOUT_MSK (0xe010) +#define RTL930X_DMA_IF_INTR_RX_DONE_MSK (0xe014) +#define RTL930X_DMA_IF_INTR_TX_DONE_MSK (0xe018) +#define RTL930X_L2_NTFY_IF_INTR_MSK (0xe04C) +#define RTL930X_L2_NTFY_IF_INTR_STS (0xe050) + +/* TODO: RTL931X_DMA_IF_CTRL has different bits meanings */ +#define RTL931X_DMA_IF_CTRL (0x0928) +#define RTL931X_DMA_IF_INTR_RX_RUNOUT_STS (0x091c) +#define RTL931X_DMA_IF_INTR_RX_DONE_STS (0x0920) +#define RTL931X_DMA_IF_INTR_TX_DONE_STS (0x0924) +#define RTL931X_DMA_IF_INTR_RX_RUNOUT_MSK (0x0910) +#define RTL931X_DMA_IF_INTR_RX_DONE_MSK (0x0914) +#define RTL931X_DMA_IF_INTR_TX_DONE_MSK (0x0918) +#define RTL931X_L2_NTFY_IF_INTR_MSK (0x09E4) +#define RTL931X_L2_NTFY_IF_INTR_STS (0x09E8) + +#define RTL838X_MAC_FORCE_MODE_CTRL (0xa104) +#define RTL839X_MAC_FORCE_MODE_CTRL (0x02bc) +#define RTL930X_MAC_FORCE_MODE_CTRL (0xCA1C) +#define RTL931X_MAC_FORCE_MODE_CTRL (0x0ddc) + +/* MAC address settings */ +#define RTL838X_MAC (0xa9ec) +#define RTL839X_MAC (0x02b4) +#define RTL838X_MAC_ALE (0x6b04) +#define RTL838X_MAC2 (0xa320) +#define RTL930X_MAC_L2_ADDR_CTRL (0xC714) +#define RTL931X_MAC_L2_ADDR_CTRL (0x135c) + +/* Ringbuffer setup */ +#define RTL838X_DMA_RX_BASE (0x9f00) +#define RTL839X_DMA_RX_BASE (0x780c) +#define RTL930X_DMA_RX_BASE (0xdf00) +#define RTL931X_DMA_RX_BASE (0x0800) + +#define RTL838X_DMA_TX_BASE (0x9f40) +#define RTL839X_DMA_TX_BASE (0x784c) +#define RTL930X_DMA_TX_BASE (0xe000) +#define RTL931X_DMA_TX_BASE (0x0900) + +#define RTL838X_DMA_IF_RX_RING_SIZE (0xB7E4) +#define RTL839X_DMA_IF_RX_RING_SIZE (0x6038) +#define RTL930X_DMA_IF_RX_RING_SIZE (0x7C60) +#define RTL931X_DMA_IF_RX_RING_SIZE (0x2080) + +#define RTL838X_DMA_IF_RX_RING_CNTR (0xB7E8) +#define RTL839X_DMA_IF_RX_RING_CNTR (0x603c) +#define RTL930X_DMA_IF_RX_RING_CNTR (0x7C8C) +#define RTL931X_DMA_IF_RX_RING_CNTR (0x20AC) + +#define RTL838X_DMA_IF_RX_CUR (0x9F20) +#define RTL839X_DMA_IF_RX_CUR (0x782c) +#define RTL930X_DMA_IF_RX_CUR (0xdf80) +#define RTL931X_DMA_IF_RX_CUR (0x0880) + +#define RTL838X_DMA_IF_TX_CUR_DESC_ADDR_CTRL (0x9F48) +#define RTL930X_DMA_IF_TX_CUR_DESC_ADDR_CTRL (0xE008) + +#define RTL838X_DMY_REG31 (0x3b28) +#define RTL838X_SDS_MODE_SEL (0x0028) +#define RTL838X_SDS_CFG_REG (0x0034) +#define RTL838X_INT_MODE_CTRL (0x005c) +#define RTL838X_CHIP_INFO (0x00d8) +#define RTL838X_SDS4_REG28 (0xef80) +#define RTL838X_SDS4_DUMMY0 (0xef8c) +#define RTL838X_SDS5_EXT_REG6 (0xf18c) + +/* L2 features */ +#define RTL839X_TBL_ACCESS_L2_CTRL (0x1180) +#define RTL839X_TBL_ACCESS_L2_DATA(idx) (0x1184 + ((idx) << 2)) +#define RTL838X_TBL_ACCESS_CTRL_0 (0x6914) +#define RTL838X_TBL_ACCESS_DATA_0(idx) (0x6918 + ((idx) << 2)) + +/* MAC-side link state handling */ +#define RTL838X_MAC_LINK_STS (0xa188) +#define RTL839X_MAC_LINK_STS (0x0390) +#define RTL930X_MAC_LINK_STS (0xCB10) +#define RTL931X_MAC_LINK_STS (0x0ec0) + +#define RTL838X_MAC_LINK_SPD_STS (0xa190) +#define RTL839X_MAC_LINK_SPD_STS (0x03a0) +#define RTL930X_MAC_LINK_SPD_STS (0xCB18) +#define RTL931X_MAC_LINK_SPD_STS (0x0ed0) + +#define RTL838X_MAC_LINK_DUP_STS (0xa19c) +#define RTL839X_MAC_LINK_DUP_STS (0x03b0) +#define RTL930X_MAC_LINK_DUP_STS (0xCB28) +#define RTL931X_MAC_LINK_DUP_STS (0x0ef0) + +// TODO: RTL8390_MAC_LINK_MEDIA_STS_ADDR ??? + +#define RTL838X_MAC_TX_PAUSE_STS (0xa1a0) +#define RTL839X_MAC_TX_PAUSE_STS (0x03b8) +#define RTL930X_MAC_TX_PAUSE_STS (0xCB2C) +#define RTL931X_MAC_TX_PAUSE_STS (0x0ef8) + +#define RTL838X_MAC_RX_PAUSE_STS (0xa1a4) +#define RTL839X_MAC_RX_PAUSE_STS (0xCB30) +#define RTL930X_MAC_RX_PAUSE_STS (0xC2F8) +#define RTL931X_MAC_RX_PAUSE_STS (0x0f00) + +#define RTL838X_EEE_TX_TIMER_GIGA_CTRL (0xaa04) +#define RTL838X_EEE_TX_TIMER_GELITE_CTRL (0xaa08) + +#define RTL930X_L2_UNKN_UC_FLD_PMSK (0x9064) +#define RTL931X_L2_UNKN_UC_FLD_PMSK (0xC8F4) + +#define RTL839X_MAC_GLB_CTRL (0x02a8) +#define RTL839X_SCHED_LB_TICK_TKN_CTRL (0x60f8) + +#define RTL838X_L2_TBL_FLUSH_CTRL (0x3370) +#define RTL839X_L2_TBL_FLUSH_CTRL (0x3ba0) +#define RTL930X_L2_TBL_FLUSH_CTRL (0x9404) +#define RTL931X_L2_TBL_FLUSH_CTRL (0xCD9C) + +#define RTL930X_L2_PORT_SABLK_CTRL (0x905c) +#define RTL930X_L2_PORT_DABLK_CTRL (0x9060) + +/* MAC link state bits */ +#define FORCE_EN (1 << 0) +#define FORCE_LINK_EN (1 << 1) +#define NWAY_EN (1 << 2) +#define DUPLX_MODE (1 << 3) +#define TX_PAUSE_EN (1 << 6) +#define RX_PAUSE_EN (1 << 7) + +/* L2 Notification DMA interface */ +#define RTL839X_DMA_IF_NBUF_BASE_DESC_ADDR_CTRL (0x785C) +#define RTL839X_L2_NOTIFICATION_CTRL (0x7808) +#define RTL931X_L2_NTFY_RING_BASE_ADDR (0x09DC) +#define RTL931X_L2_NTFY_RING_CUR_ADDR (0x09E0) +#define RTL839X_L2_NOTIFICATION_CTRL (0x7808) +#define RTL931X_L2_NTFY_CTRL (0xCDC8) +#define RTL838X_L2_CTRL_0 (0x3200) +#define RTL839X_L2_CTRL_0 (0x3800) +#define RTL930X_L2_CTRL (0x8FD8) +#define RTL931X_L2_CTRL (0xC800) + +/* TRAPPING to CPU-PORT */ +#define RTL838X_SPCL_TRAP_IGMP_CTRL (0x6984) +#define RTL838X_RMA_CTRL_0 (0x4300) +#define RTL838X_RMA_CTRL_1 (0x4304) +#define RTL839X_RMA_CTRL_0 (0x1200) + +#define RTL839X_SPCL_TRAP_IGMP_CTRL (0x1058) +#define RTL839X_RMA_CTRL_1 (0x1204) +#define RTL839X_RMA_CTRL_2 (0x1208) +#define RTL839X_RMA_CTRL_3 (0x120C) + +#define RTL930X_VLAN_APP_PKT_CTRL (0xA23C) +#define RTL930X_RMA_CTRL_0 (0x9E60) +#define RTL930X_RMA_CTRL_1 (0x9E64) +#define RTL930X_RMA_CTRL_2 (0x9E68) + +#define RTL931X_VLAN_APP_PKT_CTRL (0x96b0) +#define RTL931X_RMA_CTRL_0 (0x8800) +#define RTL931X_RMA_CTRL_1 (0x8804) +#define RTL931X_RMA_CTRL_2 (0x8808) + +/* Advanced SMI control for clause 45 PHYs */ +#define RTL930X_SMI_MAC_TYPE_CTRL (0xCA04) +#define RTL930X_SMI_PORT24_27_ADDR_CTRL (0xCB90) +#define RTL930X_SMI_PORT0_15_POLLING_SEL (0xCA08) +#define RTL930X_SMI_PORT16_27_POLLING_SEL (0xCA0C) + +#define RTL930X_SMI_10GPHY_POLLING_REG0_CFG (0xCBB4) +#define RTL930X_SMI_10GPHY_POLLING_REG9_CFG (0xCBB8) +#define RTL930X_SMI_10GPHY_POLLING_REG10_CFG (0xCBBC) +#define RTL930X_SMI_PRVTE_POLLING_CTRL (0xCA10) + +/* Registers of the internal Serdes of the 8390 */ +#define RTL839X_SDS12_13_XSG0 (0xB800) + +/* Chip configuration registers of the RTL9310 */ +#define RTL931X_MEM_ENCAP_INIT (0x4854) +#define RTL931X_MEM_MIB_INIT (0x7E18) +#define RTL931X_MEM_ACL_INIT (0x40BC) +#define RTL931X_MEM_ALE_INIT_0 (0x83F0) +#define RTL931X_MEM_ALE_INIT_1 (0x83F4) +#define RTL931X_MEM_ALE_INIT_2 (0x82E4) +#define RTL931X_MDX_CTRL_RSVD (0x0fcc) +#define RTL931X_PS_SOC_CTRL (0x13f8) +#define RTL931X_SMI_10GPHY_POLLING_SEL2 (0xCF8) +#define RTL931X_SMI_10GPHY_POLLING_SEL3 (0xCFC) +#define RTL931X_SMI_10GPHY_POLLING_SEL4 (0xD00) + +/* Registers of the internal Serdes of the 8380 */ +#define RTL838X_SDS4_FIB_REG0 (0xF800) + +inline int rtl838x_mac_port_ctrl(int p) +{ + return RTL838X_MAC_PORT_CTRL + (p << 7); +} + +inline int rtl839x_mac_port_ctrl(int p) +{ + return RTL839X_MAC_PORT_CTRL + (p << 7); +} + +/* On the RTL931XX, the functionality of the MAC port control register is split up + * into RTL931X_MAC_L2_PORT_CTRL and RTL931X_MAC_PORT_CTRL the functionality used + * by the Ethernet driver is in the same bits now in RTL931X_MAC_L2_PORT_CTRL + */ + +inline int rtl930x_mac_port_ctrl(int p) +{ + return RTL930X_MAC_L2_PORT_CTRL + (p << 6); +} + +inline int rtl931x_mac_port_ctrl(int p) +{ + return RTL931X_MAC_L2_PORT_CTRL + (p << 7); +} + +inline int rtl838x_dma_if_rx_ring_size(int i) +{ + return RTL838X_DMA_IF_RX_RING_SIZE + ((i >> 3) << 2); +} + +inline int rtl839x_dma_if_rx_ring_size(int i) +{ + return RTL839X_DMA_IF_RX_RING_SIZE + ((i >> 3) << 2); +} + +inline int rtl930x_dma_if_rx_ring_size(int i) +{ + return RTL930X_DMA_IF_RX_RING_SIZE + ((i / 3) << 2); +} + +inline int rtl931x_dma_if_rx_ring_size(int i) +{ + return RTL931X_DMA_IF_RX_RING_SIZE + ((i / 3) << 2); +} + +inline int rtl838x_dma_if_rx_ring_cntr(int i) +{ + return RTL838X_DMA_IF_RX_RING_CNTR + ((i >> 3) << 2); +} + +inline int rtl839x_dma_if_rx_ring_cntr(int i) +{ + return RTL839X_DMA_IF_RX_RING_CNTR + ((i >> 3) << 2); +} + +inline int rtl930x_dma_if_rx_ring_cntr(int i) +{ + return RTL930X_DMA_IF_RX_RING_CNTR + ((i / 3) << 2); +} + +inline int rtl931x_dma_if_rx_ring_cntr(int i) +{ + return RTL931X_DMA_IF_RX_RING_CNTR + ((i / 3) << 2); +} + +inline u32 rtl838x_get_mac_link_sts(int port) +{ + return (sw_r32(RTL838X_MAC_LINK_STS) & BIT(port)); +} + +inline u32 rtl839x_get_mac_link_sts(int p) +{ + return (sw_r32(RTL839X_MAC_LINK_STS + ((p >> 5) << 2)) & BIT(p % 32)); +} + +inline u32 rtl930x_get_mac_link_sts(int port) +{ + u32 link = sw_r32(RTL930X_MAC_LINK_STS); + + link = sw_r32(RTL930X_MAC_LINK_STS); + pr_info("%s link state is %08x\n", __func__, link); + return link & BIT(port); +} + +inline u32 rtl931x_get_mac_link_sts(int p) +{ + return (sw_r32(RTL931X_MAC_LINK_STS + ((p >> 5) << 2)) & BIT(p % 32)); +} + +inline u32 rtl838x_get_mac_link_dup_sts(int port) +{ + return (sw_r32(RTL838X_MAC_LINK_DUP_STS) & BIT(port)); +} + +inline u32 rtl839x_get_mac_link_dup_sts(int p) +{ + return (sw_r32(RTL839X_MAC_LINK_DUP_STS + ((p >> 5) << 2)) & BIT(p % 32)); +} + +inline u32 rtl930x_get_mac_link_dup_sts(int port) +{ + return (sw_r32(RTL930X_MAC_LINK_DUP_STS) & BIT(port)); +} + +inline u32 rtl931x_get_mac_link_dup_sts(int p) +{ + return (sw_r32(RTL931X_MAC_LINK_DUP_STS + ((p >> 5) << 2)) & BIT(p % 32)); +} + +inline u32 rtl838x_get_mac_link_spd_sts(int port) +{ + int r = RTL838X_MAC_LINK_SPD_STS + ((port >> 4) << 2); + u32 speed = sw_r32(r); + + speed >>= (port % 16) << 1; + return (speed & 0x3); +} + +inline u32 rtl839x_get_mac_link_spd_sts(int port) +{ + int r = RTL839X_MAC_LINK_SPD_STS + ((port >> 4) << 2); + u32 speed = sw_r32(r); + + speed >>= (port % 16) << 1; + return (speed & 0x3); +} + + +inline u32 rtl930x_get_mac_link_spd_sts(int port) +{ + int r = RTL930X_MAC_LINK_SPD_STS + ((port >> 3) << 2); + u32 speed = sw_r32(r); + + speed >>= (port % 8) << 2; + return (speed & 0xf); +} + +inline u32 rtl931x_get_mac_link_spd_sts(int port) +{ + int r = RTL931X_MAC_LINK_SPD_STS + ((port >> 3) << 2); + u32 speed = sw_r32(r); + + speed >>= (port % 8) << 2; + return (speed & 0xf); +} + +inline u32 rtl838x_get_mac_rx_pause_sts(int port) +{ + return (sw_r32(RTL838X_MAC_RX_PAUSE_STS) & (1 << port)); +} + +inline u32 rtl839x_get_mac_rx_pause_sts(int p) +{ + return (sw_r32(RTL839X_MAC_RX_PAUSE_STS + ((p >> 5) << 2)) & BIT(p % 32)); +} + +inline u32 rtl930x_get_mac_rx_pause_sts(int port) +{ + return (sw_r32(RTL930X_MAC_RX_PAUSE_STS) & (1 << port)); +} + +inline u32 rtl931x_get_mac_rx_pause_sts(int p) +{ + return (sw_r32(RTL931X_MAC_RX_PAUSE_STS + ((p >> 5) << 2)) & BIT(p % 32)); +} + +inline u32 rtl838x_get_mac_tx_pause_sts(int port) +{ + return (sw_r32(RTL838X_MAC_TX_PAUSE_STS) & (1 << port)); +} + +inline u32 rtl839x_get_mac_tx_pause_sts(int p) +{ + return (sw_r32(RTL839X_MAC_TX_PAUSE_STS + ((p >> 5) << 2)) & BIT(p % 32)); +} + +inline u32 rtl930x_get_mac_tx_pause_sts(int port) +{ + return (sw_r32(RTL930X_MAC_TX_PAUSE_STS) & (1 << port)); +} + +inline u32 rtl931x_get_mac_tx_pause_sts(int p) +{ + return (sw_r32(RTL931X_MAC_TX_PAUSE_STS + ((p >> 5) << 2)) & BIT(p % 32)); +} + +struct p_hdr; +struct dsa_tag; + +struct rtl838x_eth_reg { + irqreturn_t (*net_irq)(int irq, void *dev_id); + int (*mac_port_ctrl)(int port); + int dma_if_intr_sts; + int dma_if_intr_msk; + int dma_if_intr_rx_runout_sts; + int dma_if_intr_rx_done_sts; + int dma_if_intr_tx_done_sts; + int dma_if_intr_rx_runout_msk; + int dma_if_intr_rx_done_msk; + int dma_if_intr_tx_done_msk; + int l2_ntfy_if_intr_sts; + int l2_ntfy_if_intr_msk; + int dma_if_ctrl; + int mac_force_mode_ctrl; + int dma_rx_base; + int dma_tx_base; + int (*dma_if_rx_ring_size)(int ring); + int (*dma_if_rx_ring_cntr)(int ring); + int dma_if_rx_cur; + int rst_glb_ctrl; + u32 (*get_mac_link_sts)(int port); + u32 (*get_mac_link_dup_sts)(int port); + u32 (*get_mac_link_spd_sts)(int port); + u32 (*get_mac_rx_pause_sts)(int port); + u32 (*get_mac_tx_pause_sts)(int port); + int mac; + int l2_tbl_flush_ctrl; + void (*update_cntr)(int r, int work_done); + void (*create_tx_header)(struct p_hdr *h, unsigned int dest_port, int prio); + bool (*decode_tag)(struct p_hdr *h, struct dsa_tag *tag); +}; + +int rtl838x_write_phy(u32 port, u32 page, u32 reg, u32 val); +int rtl838x_read_phy(u32 port, u32 page, u32 reg, u32 *val); +int rtl838x_write_mmd_phy(u32 port, u32 addr, u32 reg, u32 val); +int rtl838x_read_mmd_phy(u32 port, u32 addr, u32 reg, u32 *val); +int rtl839x_write_phy(u32 port, u32 page, u32 reg, u32 val); +int rtl839x_read_phy(u32 port, u32 page, u32 reg, u32 *val); +int rtl839x_read_mmd_phy(u32 port, u32 devnum, u32 regnum, u32 *val); +int rtl839x_write_mmd_phy(u32 port, u32 devnum, u32 regnum, u32 val); +int rtl930x_write_phy(u32 port, u32 page, u32 reg, u32 val); +int rtl930x_read_phy(u32 port, u32 page, u32 reg, u32 *val); +int rtl931x_write_phy(u32 port, u32 page, u32 reg, u32 val); +int rtl931x_read_phy(u32 port, u32 page, u32 reg, u32 *val); +int rtl83xx_setup_tc(struct net_device *dev, enum tc_setup_type type, void *type_data); + +#endif /* _RTL838X_ETH_H */ diff --git a/target/linux/realtek/files-5.15/drivers/net/phy/rtl83xx-phy.c b/target/linux/realtek/files-5.15/drivers/net/phy/rtl83xx-phy.c new file mode 100644 index 0000000000..491ceb48b6 --- /dev/null +++ b/target/linux/realtek/files-5.15/drivers/net/phy/rtl83xx-phy.c @@ -0,0 +1,4018 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Realtek RTL838X Ethernet MDIO interface driver + * + * Copyright (C) 2020 B. Koblitz + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include "rtl83xx-phy.h" + +extern struct rtl83xx_soc_info soc_info; +extern struct mutex smi_lock; + +#define PHY_CTRL_REG 0 +#define PHY_POWER_BIT 11 + +#define PHY_PAGE_2 2 +#define PHY_PAGE_4 4 + +/* all Clause-22 RealTek MDIO PHYs use register 0x1f for page select */ +#define RTL8XXX_PAGE_SELECT 0x1f + +#define RTL8XXX_PAGE_MAIN 0x0000 +#define RTL821X_PAGE_PORT 0x0266 +#define RTL821X_PAGE_POWER 0x0a40 +#define RTL821X_PAGE_GPHY 0x0a42 +#define RTL821X_PAGE_MAC 0x0a43 +#define RTL821X_PAGE_STATE 0x0b80 +#define RTL821X_PAGE_PATCH 0x0b82 + +/* + * Using the special page 0xfff with the MDIO controller found in + * RealTek SoCs allows to access the PHY in RAW mode, ie. bypassing + * the cache and paging engine of the MDIO controller. + */ +#define RTL83XX_PAGE_RAW 0x0fff + +/* internal RTL821X PHY uses register 0x1d to select media page */ +#define RTL821XINT_MEDIA_PAGE_SELECT 0x1d +/* external RTL821X PHY uses register 0x1e to select media page */ +#define RTL821XEXT_MEDIA_PAGE_SELECT 0x1e + +#define RTL821X_MEDIA_PAGE_AUTO 0 +#define RTL821X_MEDIA_PAGE_COPPER 1 +#define RTL821X_MEDIA_PAGE_FIBRE 3 +#define RTL821X_MEDIA_PAGE_INTERNAL 8 + +#define RTL9300_PHY_ID_MASK 0xf0ffffff + +/* + * This lock protects the state of the SoC automatically polling the PHYs over the SMI + * bus to detect e.g. link and media changes. For operations on the PHYs such as + * patching or other configuration changes such as EEE, polling needs to be disabled + * since otherwise these operations may fails or lead to unpredictable results. + */ +DEFINE_MUTEX(poll_lock); + +static const struct firmware rtl838x_8380_fw; +static const struct firmware rtl838x_8214fc_fw; +static const struct firmware rtl838x_8218b_fw; + +static u64 disable_polling(int port) +{ + u64 saved_state; + + mutex_lock(&poll_lock); + + switch (soc_info.family) { + case RTL8380_FAMILY_ID: + saved_state = sw_r32(RTL838X_SMI_POLL_CTRL); + sw_w32_mask(BIT(port), 0, RTL838X_SMI_POLL_CTRL); + break; + case RTL8390_FAMILY_ID: + saved_state = sw_r32(RTL839X_SMI_PORT_POLLING_CTRL + 4); + saved_state <<= 32; + saved_state |= sw_r32(RTL839X_SMI_PORT_POLLING_CTRL); + sw_w32_mask(BIT(port % 32), 0, + RTL839X_SMI_PORT_POLLING_CTRL + ((port >> 5) << 2)); + break; + case RTL9300_FAMILY_ID: + saved_state = sw_r32(RTL930X_SMI_POLL_CTRL); + sw_w32_mask(BIT(port), 0, RTL930X_SMI_POLL_CTRL); + break; + case RTL9310_FAMILY_ID: + pr_warn("%s not implemented for RTL931X\n", __func__); + break; + } + + mutex_unlock(&poll_lock); + + return saved_state; +} + +static int resume_polling(u64 saved_state) +{ + mutex_lock(&poll_lock); + + switch (soc_info.family) { + case RTL8380_FAMILY_ID: + sw_w32(saved_state, RTL838X_SMI_POLL_CTRL); + break; + case RTL8390_FAMILY_ID: + sw_w32(saved_state >> 32, RTL839X_SMI_PORT_POLLING_CTRL + 4); + sw_w32(saved_state, RTL839X_SMI_PORT_POLLING_CTRL); + break; + case RTL9300_FAMILY_ID: + sw_w32(saved_state, RTL930X_SMI_POLL_CTRL); + break; + case RTL9310_FAMILY_ID: + pr_warn("%s not implemented for RTL931X\n", __func__); + break; + } + + mutex_unlock(&poll_lock); + + return 0; +} + +static void rtl8380_int_phy_on_off(struct phy_device *phydev, bool on) +{ + phy_modify(phydev, 0, BIT(11), on?0:BIT(11)); +} + +static void rtl8380_rtl8214fc_on_off(struct phy_device *phydev, bool on) +{ + /* fiber ports */ + phy_write_paged(phydev, RTL83XX_PAGE_RAW, RTL821XEXT_MEDIA_PAGE_SELECT, RTL821X_MEDIA_PAGE_FIBRE); + phy_modify(phydev, 0x10, BIT(11), on?0:BIT(11)); + + /* copper ports */ + phy_write_paged(phydev, RTL83XX_PAGE_RAW, RTL821XEXT_MEDIA_PAGE_SELECT, RTL821X_MEDIA_PAGE_COPPER); + phy_modify_paged(phydev, RTL821X_PAGE_POWER, 0x10, BIT(11), on?0:BIT(11)); +} + +static void rtl8380_phy_reset(struct phy_device *phydev) +{ + phy_modify(phydev, 0, BIT(15), BIT(15)); +} + +// The access registers for SDS_MODE_SEL and the LSB for each SDS within +u16 rtl9300_sds_regs[] = { 0x0194, 0x0194, 0x0194, 0x0194, 0x02a0, 0x02a0, 0x02a0, 0x02a0, + 0x02A4, 0x02A4, 0x0198, 0x0198 }; +u8 rtl9300_sds_lsb[] = { 0, 6, 12, 18, 0, 6, 12, 18, 0, 6, 0, 6}; + +/* + * Reset the SerDes by powering it off and set a new operations mode + * of the SerDes. 0x1f is off. Other modes are + * 0x02: SGMII 0x04: 1000BX_FIBER 0x05: FIBER100 + * 0x06: QSGMII 0x09: RSGMII 0x0d: USXGMII + * 0x10: XSGMII 0x12: HISGMII 0x16: 2500Base_X + * 0x17: RXAUI_LITE 0x19: RXAUI_PLUS 0x1a: 10G Base-R + * 0x1b: 10GR1000BX_AUTO 0x1f: OFF + */ +void rtl9300_sds_rst(int sds_num, u32 mode) +{ + pr_info("%s %d\n", __func__, mode); + if (sds_num < 0 || sds_num > 11) { + pr_err("Wrong SerDes number: %d\n", sds_num); + return; + } + + sw_w32_mask(0x1f << rtl9300_sds_lsb[sds_num], 0x1f << rtl9300_sds_lsb[sds_num], + rtl9300_sds_regs[sds_num]); + mdelay(10); + + sw_w32_mask(0x1f << rtl9300_sds_lsb[sds_num], mode << rtl9300_sds_lsb[sds_num], + rtl9300_sds_regs[sds_num]); + mdelay(10); + + pr_debug("%s: 194:%08x 198:%08x 2a0:%08x 2a4:%08x\n", __func__, + sw_r32(0x194), sw_r32(0x198), sw_r32(0x2a0), sw_r32(0x2a4)); +} + +void rtl9300_sds_set(int sds_num, u32 mode) +{ + pr_info("%s %d\n", __func__, mode); + if (sds_num < 0 || sds_num > 11) { + pr_err("Wrong SerDes number: %d\n", sds_num); + return; + } + + sw_w32_mask(0x1f << rtl9300_sds_lsb[sds_num], mode << rtl9300_sds_lsb[sds_num], + rtl9300_sds_regs[sds_num]); + mdelay(10); + + pr_debug("%s: 194:%08x 198:%08x 2a0:%08x 2a4:%08x\n", __func__, + sw_r32(0x194), sw_r32(0x198), sw_r32(0x2a0), sw_r32(0x2a4)); +} + +u32 rtl9300_sds_mode_get(int sds_num) +{ + u32 v; + + if (sds_num < 0 || sds_num > 11) { + pr_err("Wrong SerDes number: %d\n", sds_num); + return 0; + } + + v = sw_r32(rtl9300_sds_regs[sds_num]); + v >>= rtl9300_sds_lsb[sds_num]; + + return v & 0x1f; +} + +/* + * On the RTL839x family of SoCs with inbuilt SerDes, these SerDes are accessed through + * a 2048 bit register that holds the contents of the PHY being simulated by the SoC. + */ +int rtl839x_read_sds_phy(int phy_addr, int phy_reg) +{ + int offset = 0; + int reg; + u32 val; + + if (phy_addr == 49) + offset = 0x100; + + /* + * For the RTL8393 internal SerDes, we simulate a PHY ID in registers 2/3 + * which would otherwise read as 0. + */ + if (soc_info.id == 0x8393) { + if (phy_reg == 2) + return 0x1c; + if (phy_reg == 3) + return 0x8393; + } + + /* + * Register RTL839X_SDS12_13_XSG0 is 2048 bit broad, the MSB (bit 15) of the + * 0th PHY register is bit 1023 (in byte 0x80). Because PHY-registers are 16 + * bit broad, we offset by reg << 1. In the SoC 2 registers are stored in + * one 32 bit register. + */ + reg = (phy_reg << 1) & 0xfc; + val = sw_r32(RTL839X_SDS12_13_XSG0 + offset + 0x80 + reg); + + if (phy_reg & 1) + val = (val >> 16) & 0xffff; + else + val &= 0xffff; + return val; +} + +/* + * On the RTL930x family of SoCs, the internal SerDes are accessed through an IO + * register which simulates commands to an internal MDIO bus. + */ +int rtl930x_read_sds_phy(int phy_addr, int page, int phy_reg) +{ + int i; + u32 cmd = phy_addr << 2 | page << 7 | phy_reg << 13 | 1; + + sw_w32(cmd, RTL930X_SDS_INDACS_CMD); + + for (i = 0; i < 100; i++) { + if (!(sw_r32(RTL930X_SDS_INDACS_CMD) & 0x1)) + break; + mdelay(1); + } + + if (i >= 100) + return -EIO; + + return sw_r32(RTL930X_SDS_INDACS_DATA) & 0xffff; +} + +int rtl930x_write_sds_phy(int phy_addr, int page, int phy_reg, u16 v) +{ + int i; + u32 cmd; + + sw_w32(v, RTL930X_SDS_INDACS_DATA); + cmd = phy_addr << 2 | page << 7 | phy_reg << 13 | 0x3; + + for (i = 0; i < 100; i++) { + if (!(sw_r32(RTL930X_SDS_INDACS_CMD) & 0x1)) + break; + mdelay(1); + } + + + if (i >= 100) { + pr_info("%s ERROR !!!!!!!!!!!!!!!!!!!!\n", __func__); + return -EIO; + } + + return 0; +} + +int rtl931x_read_sds_phy(int phy_addr, int page, int phy_reg) +{ + int i; + u32 cmd = phy_addr << 2 | page << 7 | phy_reg << 13 | 1; + + pr_debug("%s: phy_addr(SDS-ID) %d, phy_reg: %d\n", __func__, phy_addr, phy_reg); + sw_w32(cmd, RTL931X_SERDES_INDRT_ACCESS_CTRL); + + for (i = 0; i < 100; i++) { + if (!(sw_r32(RTL931X_SERDES_INDRT_ACCESS_CTRL) & 0x1)) + break; + mdelay(1); + } + + if (i >= 100) + return -EIO; + + pr_debug("%s: returning %04x\n", __func__, sw_r32(RTL931X_SERDES_INDRT_DATA_CTRL) & 0xffff); + return sw_r32(RTL931X_SERDES_INDRT_DATA_CTRL) & 0xffff; +} + +int rtl931x_write_sds_phy(int phy_addr, int page, int phy_reg, u16 v) +{ + int i; + u32 cmd; + + cmd = phy_addr << 2 | page << 7 | phy_reg << 13; + sw_w32(cmd, RTL931X_SERDES_INDRT_ACCESS_CTRL); + + sw_w32(v, RTL931X_SERDES_INDRT_DATA_CTRL); + + cmd = sw_r32(RTL931X_SERDES_INDRT_ACCESS_CTRL) | 0x3; + sw_w32(cmd, RTL931X_SERDES_INDRT_ACCESS_CTRL); + + for (i = 0; i < 100; i++) { + if (!(sw_r32(RTL931X_SERDES_INDRT_ACCESS_CTRL) & 0x1)) + break; + mdelay(1); + } + + if (i >= 100) + return -EIO; + + return 0; +} + +/* + * On the RTL838x SoCs, the internal SerDes is accessed through direct access to + * standard PHY registers, where a 32 bit register holds a 16 bit word as found + * in a standard page 0 of a PHY + */ +int rtl838x_read_sds_phy(int phy_addr, int phy_reg) +{ + int offset = 0; + u32 val; + + if (phy_addr == 26) + offset = 0x100; + val = sw_r32(RTL838X_SDS4_FIB_REG0 + offset + (phy_reg << 2)) & 0xffff; + + return val; +} + +int rtl839x_write_sds_phy(int phy_addr, int phy_reg, u16 v) +{ + int offset = 0; + int reg; + u32 val; + + if (phy_addr == 49) + offset = 0x100; + + reg = (phy_reg << 1) & 0xfc; + val = v; + if (phy_reg & 1) { + val = val << 16; + sw_w32_mask(0xffff0000, val, + RTL839X_SDS12_13_XSG0 + offset + 0x80 + reg); + } else { + sw_w32_mask(0xffff, val, + RTL839X_SDS12_13_XSG0 + offset + 0x80 + reg); + } + + return 0; +} + +/* Read the link and speed status of the 2 internal SGMII/1000Base-X + * ports of the RTL838x SoCs + */ +static int rtl8380_read_status(struct phy_device *phydev) +{ + int err; + + err = genphy_read_status(phydev); + + if (phydev->link) { + phydev->speed = SPEED_1000; + phydev->duplex = DUPLEX_FULL; + } + + return err; +} + +/* Read the link and speed status of the 2 internal SGMII/1000Base-X + * ports of the RTL8393 SoC + */ +static int rtl8393_read_status(struct phy_device *phydev) +{ + int offset = 0; + int err; + int phy_addr = phydev->mdio.addr; + u32 v; + + err = genphy_read_status(phydev); + if (phy_addr == 49) + offset = 0x100; + + if (phydev->link) { + phydev->speed = SPEED_100; + /* Read SPD_RD_00 (bit 13) and SPD_RD_01 (bit 6) out of the internal + * PHY registers + */ + v = sw_r32(RTL839X_SDS12_13_XSG0 + offset + 0x80); + if (!(v & (1 << 13)) && (v & (1 << 6))) + phydev->speed = SPEED_1000; + phydev->duplex = DUPLEX_FULL; + } + + return err; +} + +static int rtl8226_read_page(struct phy_device *phydev) +{ + return __phy_read(phydev, RTL8XXX_PAGE_SELECT); +} + +static int rtl8226_write_page(struct phy_device *phydev, int page) +{ + return __phy_write(phydev, RTL8XXX_PAGE_SELECT, page); +} + +static int rtl8226_read_status(struct phy_device *phydev) +{ + int ret = 0, i; + u32 val; + +// TODO: ret = genphy_read_status(phydev); +// if (ret < 0) { +// pr_info("%s: genphy_read_status failed\n", __func__); +// return ret; +// } + + // Link status must be read twice + for (i = 0; i < 2; i++) { + val = phy_read_mmd(phydev, MMD_VEND2, 0xA402); + } + phydev->link = val & BIT(2) ? 1 : 0; + if (!phydev->link) + goto out; + + // Read duplex status + val = phy_read_mmd(phydev, MMD_VEND2, 0xA434); + if (val < 0) + goto out; + phydev->duplex = !!(val & BIT(3)); + + // Read speed + val = phy_read_mmd(phydev, MMD_VEND2, 0xA434); + switch (val & 0x0630) { + case 0x0000: + phydev->speed = SPEED_10; + break; + case 0x0010: + phydev->speed = SPEED_100; + break; + case 0x0020: + phydev->speed = SPEED_1000; + break; + case 0x0200: + phydev->speed = SPEED_10000; + break; + case 0x0210: + phydev->speed = SPEED_2500; + break; + case 0x0220: + phydev->speed = SPEED_5000; + break; + default: + break; + } +out: + return ret; +} + +static int rtl8226_advertise_aneg(struct phy_device *phydev) +{ + int ret = 0; + u32 v; + + pr_info("In %s\n", __func__); + + v = phy_read_mmd(phydev, MMD_AN, 16); + if (v < 0) + goto out; + + v |= BIT(5); // HD 10M + v |= BIT(6); // FD 10M + v |= BIT(7); // HD 100M + v |= BIT(8); // FD 100M + + ret = phy_write_mmd(phydev, MMD_AN, 16, v); + + // Allow 1GBit + v = phy_read_mmd(phydev, MMD_VEND2, 0xA412); + if (v < 0) + goto out; + v |= BIT(9); // FD 1000M + + ret = phy_write_mmd(phydev, MMD_VEND2, 0xA412, v); + if (ret < 0) + goto out; + + // Allow 2.5G + v = phy_read_mmd(phydev, MMD_AN, 32); + if (v < 0) + goto out; + + v |= BIT(7); + ret = phy_write_mmd(phydev, MMD_AN, 32, v); + +out: + return ret; +} + +static int rtl8226_config_aneg(struct phy_device *phydev) +{ + int ret = 0; + u32 v; + + pr_debug("In %s\n", __func__); + if (phydev->autoneg == AUTONEG_ENABLE) { + ret = rtl8226_advertise_aneg(phydev); + if (ret) + goto out; + // AutoNegotiationEnable + v = phy_read_mmd(phydev, MMD_AN, 0); + if (v < 0) + goto out; + + v |= BIT(12); // Enable AN + ret = phy_write_mmd(phydev, MMD_AN, 0, v); + if (ret < 0) + goto out; + + // RestartAutoNegotiation + v = phy_read_mmd(phydev, MMD_VEND2, 0xA400); + if (v < 0) + goto out; + v |= BIT(9); + + ret = phy_write_mmd(phydev, MMD_VEND2, 0xA400, v); + } + +// TODO: ret = __genphy_config_aneg(phydev, ret); + +out: + return ret; +} + +static int rtl8226_get_eee(struct phy_device *phydev, + struct ethtool_eee *e) +{ + u32 val; + int addr = phydev->mdio.addr; + + pr_debug("In %s, port %d, was enabled: %d\n", __func__, addr, e->eee_enabled); + + val = phy_read_mmd(phydev, MMD_AN, 60); + if (e->eee_enabled) { + e->eee_enabled = !!(val & BIT(1)); + if (!e->eee_enabled) { + val = phy_read_mmd(phydev, MMD_AN, 62); + e->eee_enabled = !!(val & BIT(0)); + } + } + pr_debug("%s: enabled: %d\n", __func__, e->eee_enabled); + + return 0; +} + +static int rtl8226_set_eee(struct phy_device *phydev, struct ethtool_eee *e) +{ + int port = phydev->mdio.addr; + u64 poll_state; + bool an_enabled; + u32 val; + + pr_info("In %s, port %d, enabled %d\n", __func__, port, e->eee_enabled); + + poll_state = disable_polling(port); + + // Remember aneg state + val = phy_read_mmd(phydev, MMD_AN, 0); + an_enabled = !!(val & BIT(12)); + + // Setup 100/1000MBit + val = phy_read_mmd(phydev, MMD_AN, 60); + if (e->eee_enabled) + val |= 0x6; + else + val &= 0x6; + phy_write_mmd(phydev, MMD_AN, 60, val); + + // Setup 2.5GBit + val = phy_read_mmd(phydev, MMD_AN, 62); + if (e->eee_enabled) + val |= 0x1; + else + val &= 0x1; + phy_write_mmd(phydev, MMD_AN, 62, val); + + // RestartAutoNegotiation + val = phy_read_mmd(phydev, MMD_VEND2, 0xA400); + val |= BIT(9); + phy_write_mmd(phydev, MMD_VEND2, 0xA400, val); + + resume_polling(poll_state); + + return 0; +} + +static struct fw_header *rtl838x_request_fw(struct phy_device *phydev, + const struct firmware *fw, + const char *name) +{ + struct device *dev = &phydev->mdio.dev; + int err; + struct fw_header *h; + uint32_t checksum, my_checksum; + + err = request_firmware(&fw, name, dev); + if (err < 0) + goto out; + + if (fw->size < sizeof(struct fw_header)) { + pr_err("Firmware size too small.\n"); + err = -EINVAL; + goto out; + } + + h = (struct fw_header *) fw->data; + pr_info("Firmware loaded. Size %d, magic: %08x\n", fw->size, h->magic); + + if (h->magic != 0x83808380) { + pr_err("Wrong firmware file: MAGIC mismatch.\n"); + goto out; + } + + checksum = h->checksum; + h->checksum = 0; + my_checksum = ~crc32(0xFFFFFFFFU, fw->data, fw->size); + if (checksum != my_checksum) { + pr_err("Firmware checksum mismatch.\n"); + err = -EINVAL; + goto out; + } + h->checksum = checksum; + + return h; +out: + dev_err(dev, "Unable to load firmware %s (%d)\n", name, err); + return NULL; +} + +static void rtl821x_phy_setup_package_broadcast(struct phy_device *phydev, bool enable) +{ + int mac = phydev->mdio.addr; + + /* select main page 0 */ + phy_write_paged(phydev, RTL83XX_PAGE_RAW, RTL8XXX_PAGE_SELECT, RTL8XXX_PAGE_MAIN); + /* write to 0x8 to register 0x1d on main page 0 */ + phy_write_paged(phydev, RTL83XX_PAGE_RAW, RTL821XINT_MEDIA_PAGE_SELECT, RTL821X_MEDIA_PAGE_INTERNAL); + /* select page 0x266 */ + phy_write_paged(phydev, RTL83XX_PAGE_RAW, RTL8XXX_PAGE_SELECT, RTL821X_PAGE_PORT); + /* set phy id and target broadcast bitmap in register 0x16 on page 0x266 */ + phy_write_paged(phydev, RTL83XX_PAGE_RAW, 0x16, (enable?0xff00:0x00) | mac); + /* return to main page 0 */ + phy_write_paged(phydev, RTL83XX_PAGE_RAW, RTL8XXX_PAGE_SELECT, RTL8XXX_PAGE_MAIN); + /* write to 0x0 to register 0x1d on main page 0 */ + phy_write_paged(phydev, RTL83XX_PAGE_RAW, RTL821XINT_MEDIA_PAGE_SELECT, RTL821X_MEDIA_PAGE_AUTO); + mdelay(1); +} + +static int rtl8390_configure_generic(struct phy_device *phydev) +{ + int mac = phydev->mdio.addr; + u32 val, phy_id; + + val = phy_read(phydev, 2); + phy_id = val << 16; + val = phy_read(phydev, 3); + phy_id |= val; + pr_debug("Phy on MAC %d: %x\n", mac, phy_id); + + /* Read internal PHY ID */ + phy_write_paged(phydev, 31, 27, 0x0002); + val = phy_read_paged(phydev, 31, 28); + + /* Internal RTL8218B, version 2 */ + phydev_info(phydev, "Detected unknown %x\n", val); + return 0; +} + +static int rtl8380_configure_int_rtl8218b(struct phy_device *phydev) +{ + u32 val, phy_id; + int i, p, ipd_flag; + int mac = phydev->mdio.addr; + struct fw_header *h; + u32 *rtl838x_6275B_intPhy_perport; + u32 *rtl8218b_6276B_hwEsd_perport; + + val = phy_read(phydev, 2); + phy_id = val << 16; + val = phy_read(phydev, 3); + phy_id |= val; + pr_debug("Phy on MAC %d: %x\n", mac, phy_id); + + /* Read internal PHY ID */ + phy_write_paged(phydev, 31, 27, 0x0002); + val = phy_read_paged(phydev, 31, 28); + if (val != 0x6275) { + phydev_err(phydev, "Expected internal RTL8218B, found PHY-ID %x\n", val); + return -1; + } + + /* Internal RTL8218B, version 2 */ + phydev_info(phydev, "Detected internal RTL8218B\n"); + + h = rtl838x_request_fw(phydev, &rtl838x_8380_fw, FIRMWARE_838X_8380_1); + if (!h) + return -1; + + if (h->phy != 0x83800000) { + phydev_err(phydev, "Wrong firmware file: PHY mismatch.\n"); + return -1; + } + + rtl838x_6275B_intPhy_perport = (void *)h + sizeof(struct fw_header) + + h->parts[8].start; + + rtl8218b_6276B_hwEsd_perport = (void *)h + sizeof(struct fw_header) + + h->parts[9].start; + + if (sw_r32(RTL838X_DMY_REG31) == 0x1) + ipd_flag = 1; + + val = phy_read(phydev, 0); + if (val & BIT(11)) + rtl8380_int_phy_on_off(phydev, true); + else + rtl8380_phy_reset(phydev); + msleep(100); + + /* Ready PHY for patch */ + for (p = 0; p < 8; p++) { + phy_package_port_write_paged(phydev, p, RTL83XX_PAGE_RAW, RTL8XXX_PAGE_SELECT, RTL821X_PAGE_PATCH); + phy_package_port_write_paged(phydev, p, RTL83XX_PAGE_RAW, 0x10, 0x0010); + } + msleep(500); + for (p = 0; p < 8; p++) { + for (i = 0; i < 100 ; i++) { + val = phy_package_port_read_paged(phydev, p, RTL821X_PAGE_STATE, 0x10); + if (val & 0x40) + break; + } + if (i >= 100) { + phydev_err(phydev, + "ERROR: Port %d not ready for patch.\n", + mac + p); + return -1; + } + } + for (p = 0; p < 8; p++) { + i = 0; + while (rtl838x_6275B_intPhy_perport[i * 2]) { + phy_package_port_write_paged(phydev, p, RTL83XX_PAGE_RAW, + rtl838x_6275B_intPhy_perport[i * 2], + rtl838x_6275B_intPhy_perport[i * 2 + 1]); + i++; + } + i = 0; + while (rtl8218b_6276B_hwEsd_perport[i * 2]) { + phy_package_port_write_paged(phydev, p, RTL83XX_PAGE_RAW, + rtl8218b_6276B_hwEsd_perport[i * 2], + rtl8218b_6276B_hwEsd_perport[i * 2 + 1]); + i++; + } + } + return 0; +} + +static int rtl8380_configure_ext_rtl8218b(struct phy_device *phydev) +{ + u32 val, ipd, phy_id; + int i, l; + int mac = phydev->mdio.addr; + struct fw_header *h; + u32 *rtl8380_rtl8218b_perchip; + u32 *rtl8218B_6276B_rtl8380_perport; + u32 *rtl8380_rtl8218b_perport; + + if (soc_info.family == RTL8380_FAMILY_ID && mac != 0 && mac != 16) { + phydev_err(phydev, "External RTL8218B must have PHY-IDs 0 or 16!\n"); + return -1; + } + val = phy_read(phydev, 2); + phy_id = val << 16; + val = phy_read(phydev, 3); + phy_id |= val; + pr_info("Phy on MAC %d: %x\n", mac, phy_id); + + /* Read internal PHY ID */ + phy_write_paged(phydev, 31, 27, 0x0002); + val = phy_read_paged(phydev, 31, 28); + if (val != 0x6276) { + phydev_err(phydev, "Expected external RTL8218B, found PHY-ID %x\n", val); + return -1; + } + phydev_info(phydev, "Detected external RTL8218B\n"); + + h = rtl838x_request_fw(phydev, &rtl838x_8218b_fw, FIRMWARE_838X_8218b_1); + if (!h) + return -1; + + if (h->phy != 0x8218b000) { + phydev_err(phydev, "Wrong firmware file: PHY mismatch.\n"); + return -1; + } + + rtl8380_rtl8218b_perchip = (void *)h + sizeof(struct fw_header) + + h->parts[0].start; + + rtl8218B_6276B_rtl8380_perport = (void *)h + sizeof(struct fw_header) + + h->parts[1].start; + + rtl8380_rtl8218b_perport = (void *)h + sizeof(struct fw_header) + + h->parts[2].start; + + val = phy_read(phydev, 0); + if (val & (1 << 11)) + rtl8380_int_phy_on_off(phydev, true); + else + rtl8380_phy_reset(phydev); + + msleep(100); + + /* Get Chip revision */ + phy_write_paged(phydev, RTL83XX_PAGE_RAW, RTL8XXX_PAGE_SELECT, RTL8XXX_PAGE_MAIN); + phy_write_paged(phydev, RTL83XX_PAGE_RAW, 0x1b, 0x4); + val = phy_read_paged(phydev, RTL83XX_PAGE_RAW, 0x1c); + + phydev_info(phydev, "Detected chip revision %04x\n", val); + + i = 0; + while (rtl8380_rtl8218b_perchip[i * 3] + && rtl8380_rtl8218b_perchip[i * 3 + 1]) { + phy_package_port_write_paged(phydev, rtl8380_rtl8218b_perchip[i * 3], + RTL83XX_PAGE_RAW, rtl8380_rtl8218b_perchip[i * 3 + 1], + rtl8380_rtl8218b_perchip[i * 3 + 2]); + i++; + } + + /* Enable PHY */ + for (i = 0; i < 8; i++) { + phy_package_port_write_paged(phydev, i, RTL83XX_PAGE_RAW, RTL8XXX_PAGE_SELECT, RTL8XXX_PAGE_MAIN); + phy_package_port_write_paged(phydev, i, RTL83XX_PAGE_RAW, 0x00, 0x1140); + } + mdelay(100); + + /* Request patch */ + for (i = 0; i < 8; i++) { + phy_package_port_write_paged(phydev, i, RTL83XX_PAGE_RAW, RTL8XXX_PAGE_SELECT, RTL821X_PAGE_PATCH); + phy_package_port_write_paged(phydev, i, RTL83XX_PAGE_RAW, 0x10, 0x0010); + } + + mdelay(300); + + /* Verify patch readiness */ + for (i = 0; i < 8; i++) { + for (l = 0; l < 100; l++) { + val = phy_package_port_read_paged(phydev, i, RTL821X_PAGE_STATE, 0x10); + if (val & 0x40) + break; + } + if (l >= 100) { + phydev_err(phydev, "Could not patch PHY\n"); + return -1; + } + } + + /* Use Broadcast ID method for patching */ + rtl821x_phy_setup_package_broadcast(phydev, true); + + phy_write_paged(phydev, RTL83XX_PAGE_RAW, 30, 8); + phy_write_paged(phydev, 0x26e, 17, 0xb); + phy_write_paged(phydev, 0x26e, 16, 0x2); + mdelay(1); + ipd = phy_read_paged(phydev, 0x26e, 19); + phy_write_paged(phydev, 0, 30, 0); + ipd = (ipd >> 4) & 0xf; /* unused ? */ + + i = 0; + while (rtl8218B_6276B_rtl8380_perport[i * 2]) { + phy_write_paged(phydev, RTL83XX_PAGE_RAW, rtl8218B_6276B_rtl8380_perport[i * 2], + rtl8218B_6276B_rtl8380_perport[i * 2 + 1]); + i++; + } + + /*Disable broadcast ID*/ + rtl821x_phy_setup_package_broadcast(phydev, false); + + return 0; +} + +static int rtl8218b_ext_match_phy_device(struct phy_device *phydev) +{ + int addr = phydev->mdio.addr; + + /* Both the RTL8214FC and the external RTL8218B have the same + * PHY ID. On the RTL838x, the RTL8218B can only be attached_dev + * at PHY IDs 0-7, while the RTL8214FC must be attached via + * the pair of SGMII/1000Base-X with higher PHY-IDs + */ + if (soc_info.family == RTL8380_FAMILY_ID) + return phydev->phy_id == PHY_ID_RTL8218B_E && addr < 8; + else + return phydev->phy_id == PHY_ID_RTL8218B_E; +} + +static bool rtl8214fc_media_is_fibre(struct phy_device *phydev) +{ + int mac = phydev->mdio.addr; + + static int reg[] = {16, 19, 20, 21}; + u32 val; + + phy_package_write_paged(phydev, RTL83XX_PAGE_RAW, RTL821XINT_MEDIA_PAGE_SELECT, RTL821X_MEDIA_PAGE_INTERNAL); + val = phy_package_read_paged(phydev, RTL821X_PAGE_PORT, reg[mac % 4]); + phy_package_write_paged(phydev, RTL83XX_PAGE_RAW, RTL821XINT_MEDIA_PAGE_SELECT, RTL821X_MEDIA_PAGE_AUTO); + + if (val & BIT(11)) + return false; + + return true; +} + +static void rtl8214fc_power_set(struct phy_device *phydev, int port, bool on) +{ + char *state = on ? "on" : "off"; + + if (port == PORT_FIBRE) { + pr_info("%s: Powering %s FIBRE (port %d)\n", __func__, state, phydev->mdio.addr); + phy_write_paged(phydev, RTL83XX_PAGE_RAW, RTL821XINT_MEDIA_PAGE_SELECT, RTL821X_MEDIA_PAGE_FIBRE); + } else { + pr_info("%s: Powering %s COPPER (port %d)\n", __func__, state, phydev->mdio.addr); + phy_write_paged(phydev, RTL83XX_PAGE_RAW, RTL821XINT_MEDIA_PAGE_SELECT, RTL821X_MEDIA_PAGE_COPPER); + } + + if (on) { + phy_modify_paged(phydev, RTL821X_PAGE_POWER, 0x10, BIT(11), 0); + } else { + phy_modify_paged(phydev, RTL821X_PAGE_POWER, 0x10, 0, BIT(11)); + } + + phy_write_paged(phydev, RTL83XX_PAGE_RAW, RTL821XINT_MEDIA_PAGE_SELECT, RTL821X_MEDIA_PAGE_AUTO); +} + +static int rtl8214fc_suspend(struct phy_device *phydev) +{ + rtl8214fc_power_set(phydev, PORT_MII, false); + rtl8214fc_power_set(phydev, PORT_FIBRE, false); + + return 0; +} + +static int rtl8214fc_resume(struct phy_device *phydev) +{ + if (rtl8214fc_media_is_fibre(phydev)) { + rtl8214fc_power_set(phydev, PORT_MII, false); + rtl8214fc_power_set(phydev, PORT_FIBRE, true); + } else { + rtl8214fc_power_set(phydev, PORT_FIBRE, false); + rtl8214fc_power_set(phydev, PORT_MII, true); + } + + return 0; +} + +static void rtl8214fc_media_set(struct phy_device *phydev, bool set_fibre) +{ + int mac = phydev->mdio.addr; + + static int reg[] = {16, 19, 20, 21}; + int val; + + pr_info("%s: port %d, set_fibre: %d\n", __func__, mac, set_fibre); + phy_package_write_paged(phydev, RTL83XX_PAGE_RAW, RTL821XINT_MEDIA_PAGE_SELECT, RTL821X_MEDIA_PAGE_INTERNAL); + val = phy_package_read_paged(phydev, RTL821X_PAGE_PORT, reg[mac % 4]); + + val |= BIT(10); + if (set_fibre) { + val &= ~BIT(11); + } else { + val |= BIT(11); + } + + phy_package_write_paged(phydev, RTL83XX_PAGE_RAW, RTL821XINT_MEDIA_PAGE_SELECT, RTL821X_MEDIA_PAGE_INTERNAL); + phy_package_write_paged(phydev, RTL821X_PAGE_PORT, reg[mac % 4], val); + phy_package_write_paged(phydev, RTL83XX_PAGE_RAW, RTL821XINT_MEDIA_PAGE_SELECT, RTL821X_MEDIA_PAGE_AUTO); + + if (!phydev->suspended) { + if (set_fibre) { + rtl8214fc_power_set(phydev, PORT_MII, false); + rtl8214fc_power_set(phydev, PORT_FIBRE, true); + } else { + rtl8214fc_power_set(phydev, PORT_FIBRE, false); + rtl8214fc_power_set(phydev, PORT_MII, true); + } + } +} + +static int rtl8214fc_set_port(struct phy_device *phydev, int port) +{ + bool is_fibre = (port == PORT_FIBRE ? true : false); + int addr = phydev->mdio.addr; + + pr_debug("%s port %d to %d\n", __func__, addr, port); + + rtl8214fc_media_set(phydev, is_fibre); + return 0; +} + +static int rtl8214fc_get_port(struct phy_device *phydev) +{ + int addr = phydev->mdio.addr; + + pr_debug("%s: port %d\n", __func__, addr); + if (rtl8214fc_media_is_fibre(phydev)) + return PORT_FIBRE; + return PORT_MII; +} + +/* + * Enable EEE on the RTL8218B PHYs + * The method used is not the preferred way (which would be based on the MAC-EEE state, + * but the only way that works since the kernel first enables EEE in the MAC + * and then sets up the PHY. The MAC-based approach would require the oppsite. + */ +void rtl8218d_eee_set(struct phy_device *phydev, bool enable) +{ + u32 val; + bool an_enabled; + + pr_debug("In %s %d, enable %d\n", __func__, phydev->mdio.addr, enable); + /* Set GPHY page to copper */ + phy_write_paged(phydev, RTL821X_PAGE_GPHY, RTL821XEXT_MEDIA_PAGE_SELECT, RTL821X_MEDIA_PAGE_COPPER); + + val = phy_read(phydev, 0); + an_enabled = val & BIT(12); + + /* Enable 100M (bit 1) / 1000M (bit 2) EEE */ + val = phy_read_mmd(phydev, 7, 60); + val |= BIT(2) | BIT(1); + phy_write_mmd(phydev, 7, 60, enable ? 0x6 : 0); + + /* 500M EEE ability */ + val = phy_read_paged(phydev, RTL821X_PAGE_GPHY, 20); + if (enable) + val |= BIT(7); + else + val &= ~BIT(7); + phy_write_paged(phydev, RTL821X_PAGE_GPHY, 20, val); + + /* Restart AN if enabled */ + if (an_enabled) { + val = phy_read(phydev, 0); + val |= BIT(9); + phy_write(phydev, 0, val); + } + + /* GPHY page back to auto*/ + phy_write_paged(phydev, RTL821X_PAGE_GPHY, RTL821XEXT_MEDIA_PAGE_SELECT, RTL821X_MEDIA_PAGE_AUTO); +} + +static int rtl8218b_get_eee(struct phy_device *phydev, + struct ethtool_eee *e) +{ + u32 val; + int addr = phydev->mdio.addr; + + pr_debug("In %s, port %d, was enabled: %d\n", __func__, addr, e->eee_enabled); + + /* Set GPHY page to copper */ + phy_write_paged(phydev, RTL821X_PAGE_GPHY, RTL821XINT_MEDIA_PAGE_SELECT, RTL821X_MEDIA_PAGE_COPPER); + + val = phy_read_paged(phydev, 7, 60); + if (e->eee_enabled) { + // Verify vs MAC-based EEE + e->eee_enabled = !!(val & BIT(7)); + if (!e->eee_enabled) { + val = phy_read_paged(phydev, RTL821X_PAGE_MAC, 25); + e->eee_enabled = !!(val & BIT(4)); + } + } + pr_debug("%s: enabled: %d\n", __func__, e->eee_enabled); + + /* GPHY page to auto */ + phy_write_paged(phydev, RTL821X_PAGE_GPHY, RTL821XINT_MEDIA_PAGE_SELECT, RTL821X_MEDIA_PAGE_AUTO); + + return 0; +} + +static int rtl8218d_get_eee(struct phy_device *phydev, + struct ethtool_eee *e) +{ + u32 val; + int addr = phydev->mdio.addr; + + pr_debug("In %s, port %d, was enabled: %d\n", __func__, addr, e->eee_enabled); + + /* Set GPHY page to copper */ + phy_write_paged(phydev, RTL821X_PAGE_GPHY, RTL821XEXT_MEDIA_PAGE_SELECT, RTL821X_MEDIA_PAGE_COPPER); + + val = phy_read_paged(phydev, 7, 60); + if (e->eee_enabled) + e->eee_enabled = !!(val & BIT(7)); + pr_debug("%s: enabled: %d\n", __func__, e->eee_enabled); + + /* GPHY page to auto */ + phy_write_paged(phydev, RTL821X_PAGE_GPHY, RTL821XEXT_MEDIA_PAGE_SELECT, RTL821X_MEDIA_PAGE_AUTO); + + return 0; +} + +static int rtl8214fc_set_eee(struct phy_device *phydev, + struct ethtool_eee *e) +{ + u32 poll_state; + int port = phydev->mdio.addr; + bool an_enabled; + u32 val; + + pr_debug("In %s port %d, enabled %d\n", __func__, port, e->eee_enabled); + + if (rtl8214fc_media_is_fibre(phydev)) { + netdev_err(phydev->attached_dev, "Port %d configured for FIBRE", port); + return -ENOTSUPP; + } + + poll_state = disable_polling(port); + + /* Set GPHY page to copper */ + phy_write_paged(phydev, RTL821X_PAGE_GPHY, RTL821XINT_MEDIA_PAGE_SELECT, RTL821X_MEDIA_PAGE_COPPER); + + // Get auto-negotiation status + val = phy_read(phydev, 0); + an_enabled = val & BIT(12); + + pr_info("%s: aneg: %d\n", __func__, an_enabled); + val = phy_read_paged(phydev, RTL821X_PAGE_MAC, 25); + val &= ~BIT(5); // Use MAC-based EEE + phy_write_paged(phydev, RTL821X_PAGE_MAC, 25, val); + + /* Enable 100M (bit 1) / 1000M (bit 2) EEE */ + phy_write_paged(phydev, 7, 60, e->eee_enabled ? 0x6 : 0); + + /* 500M EEE ability */ + val = phy_read_paged(phydev, RTL821X_PAGE_GPHY, 20); + if (e->eee_enabled) + val |= BIT(7); + else + val &= ~BIT(7); + + phy_write_paged(phydev, RTL821X_PAGE_GPHY, 20, val); + + /* Restart AN if enabled */ + if (an_enabled) { + pr_info("%s: doing aneg\n", __func__); + val = phy_read(phydev, 0); + val |= BIT(9); + phy_write(phydev, 0, val); + } + + /* GPHY page back to auto*/ + phy_write_paged(phydev, RTL821X_PAGE_GPHY, RTL821XINT_MEDIA_PAGE_SELECT, RTL821X_MEDIA_PAGE_AUTO); + + resume_polling(poll_state); + + return 0; +} + +static int rtl8214fc_get_eee(struct phy_device *phydev, + struct ethtool_eee *e) +{ + int addr = phydev->mdio.addr; + + pr_debug("In %s port %d, enabled %d\n", __func__, addr, e->eee_enabled); + if (rtl8214fc_media_is_fibre(phydev)) { + netdev_err(phydev->attached_dev, "Port %d configured for FIBRE", addr); + return -ENOTSUPP; + } + + return rtl8218b_get_eee(phydev, e); +} + +static int rtl8218b_set_eee(struct phy_device *phydev, struct ethtool_eee *e) +{ + int port = phydev->mdio.addr; + u64 poll_state; + u32 val; + bool an_enabled; + + pr_info("In %s, port %d, enabled %d\n", __func__, port, e->eee_enabled); + + poll_state = disable_polling(port); + + /* Set GPHY page to copper */ + phy_write(phydev, RTL821XEXT_MEDIA_PAGE_SELECT, RTL821X_MEDIA_PAGE_COPPER); + val = phy_read(phydev, 0); + an_enabled = val & BIT(12); + + if (e->eee_enabled) { + /* 100/1000M EEE Capability */ + phy_write(phydev, 13, 0x0007); + phy_write(phydev, 14, 0x003C); + phy_write(phydev, 13, 0x4007); + phy_write(phydev, 14, 0x0006); + + val = phy_read_paged(phydev, RTL821X_PAGE_MAC, 25); + val |= BIT(4); + phy_write_paged(phydev, RTL821X_PAGE_MAC, 25, val); + } else { + /* 100/1000M EEE Capability */ + phy_write(phydev, 13, 0x0007); + phy_write(phydev, 14, 0x003C); + phy_write(phydev, 13, 0x0007); + phy_write(phydev, 14, 0x0000); + + val = phy_read_paged(phydev, RTL821X_PAGE_MAC, 25); + val &= ~BIT(4); + phy_write_paged(phydev, RTL821X_PAGE_MAC, 25, val); + } + + /* Restart AN if enabled */ + if (an_enabled) { + val = phy_read(phydev, 0); + val |= BIT(9); + phy_write(phydev, 0, val); + } + + /* GPHY page back to auto*/ + phy_write_paged(phydev, RTL821X_PAGE_GPHY, RTL821XEXT_MEDIA_PAGE_SELECT, RTL821X_MEDIA_PAGE_AUTO); + + pr_info("%s done\n", __func__); + resume_polling(poll_state); + + return 0; +} + +static int rtl8218d_set_eee(struct phy_device *phydev, struct ethtool_eee *e) +{ + int addr = phydev->mdio.addr; + u64 poll_state; + + pr_info("In %s, port %d, enabled %d\n", __func__, addr, e->eee_enabled); + + poll_state = disable_polling(addr); + + rtl8218d_eee_set(phydev, (bool) e->eee_enabled); + + resume_polling(poll_state); + + return 0; +} + +static int rtl8214c_match_phy_device(struct phy_device *phydev) +{ + return phydev->phy_id == PHY_ID_RTL8214C; +} + +static int rtl8380_configure_rtl8214c(struct phy_device *phydev) +{ + u32 phy_id, val; + int mac = phydev->mdio.addr; + + val = phy_read(phydev, 2); + phy_id = val << 16; + val = phy_read(phydev, 3); + phy_id |= val; + pr_debug("Phy on MAC %d: %x\n", mac, phy_id); + + phydev_info(phydev, "Detected external RTL8214C\n"); + + /* GPHY auto conf */ + phy_write_paged(phydev, RTL821X_PAGE_GPHY, RTL821XINT_MEDIA_PAGE_SELECT, RTL821X_MEDIA_PAGE_AUTO); + return 0; +} + +static int rtl8380_configure_rtl8214fc(struct phy_device *phydev) +{ + u32 phy_id, val, page = 0; + int i, l; + int mac = phydev->mdio.addr; + struct fw_header *h; + u32 *rtl8380_rtl8214fc_perchip; + u32 *rtl8380_rtl8214fc_perport; + + val = phy_read(phydev, 2); + phy_id = val << 16; + val = phy_read(phydev, 3); + phy_id |= val; + pr_debug("Phy on MAC %d: %x\n", mac, phy_id); + + /* Read internal PHY id */ + phy_write_paged(phydev, 0, RTL821XEXT_MEDIA_PAGE_SELECT, RTL821X_MEDIA_PAGE_COPPER); + phy_write_paged(phydev, 0x1f, 0x1b, 0x0002); + val = phy_read_paged(phydev, 0x1f, 0x1c); + if (val != 0x6276) { + phydev_err(phydev, "Expected external RTL8214FC, found PHY-ID %x\n", val); + return -1; + } + phydev_info(phydev, "Detected external RTL8214FC\n"); + + h = rtl838x_request_fw(phydev, &rtl838x_8214fc_fw, FIRMWARE_838X_8214FC_1); + if (!h) + return -1; + + if (h->phy != 0x8214fc00) { + phydev_err(phydev, "Wrong firmware file: PHY mismatch.\n"); + return -1; + } + + rtl8380_rtl8214fc_perchip = (void *)h + sizeof(struct fw_header) + + h->parts[0].start; + + rtl8380_rtl8214fc_perport = (void *)h + sizeof(struct fw_header) + + h->parts[1].start; + + /* detect phy version */ + phy_write_paged(phydev, RTL83XX_PAGE_RAW, 27, 0x0004); + val = phy_read_paged(phydev, RTL83XX_PAGE_RAW, 28); + + val = phy_read(phydev, 16); + if (val & (1 << 11)) + rtl8380_rtl8214fc_on_off(phydev, true); + else + rtl8380_phy_reset(phydev); + + msleep(100); + phy_write_paged(phydev, 0, RTL821XEXT_MEDIA_PAGE_SELECT, RTL821X_MEDIA_PAGE_COPPER); + + i = 0; + while (rtl8380_rtl8214fc_perchip[i * 3] + && rtl8380_rtl8214fc_perchip[i * 3 + 1]) { + if (rtl8380_rtl8214fc_perchip[i * 3 + 1] == 0x1f) + page = rtl8380_rtl8214fc_perchip[i * 3 + 2]; + if (rtl8380_rtl8214fc_perchip[i * 3 + 1] == 0x13 && page == 0x260) { + val = phy_read_paged(phydev, 0x260, 13); + val = (val & 0x1f00) | (rtl8380_rtl8214fc_perchip[i * 3 + 2] + & 0xe0ff); + phy_write_paged(phydev, RTL83XX_PAGE_RAW, + rtl8380_rtl8214fc_perchip[i * 3 + 1], val); + } else { + phy_write_paged(phydev, RTL83XX_PAGE_RAW, + rtl8380_rtl8214fc_perchip[i * 3 + 1], + rtl8380_rtl8214fc_perchip[i * 3 + 2]); + } + i++; + } + + /* Force copper medium */ + for (i = 0; i < 4; i++) { + phy_package_port_write_paged(phydev, i, RTL83XX_PAGE_RAW, RTL8XXX_PAGE_SELECT, RTL8XXX_PAGE_MAIN); + phy_package_port_write_paged(phydev, i, RTL83XX_PAGE_RAW, RTL821XEXT_MEDIA_PAGE_SELECT, RTL821X_MEDIA_PAGE_COPPER); + } + + /* Enable PHY */ + for (i = 0; i < 4; i++) { + phy_package_port_write_paged(phydev, i, RTL83XX_PAGE_RAW, RTL8XXX_PAGE_SELECT, RTL8XXX_PAGE_MAIN); + phy_package_port_write_paged(phydev, i, RTL83XX_PAGE_RAW, 0x00, 0x1140); + } + mdelay(100); + + /* Disable Autosensing */ + for (i = 0; i < 4; i++) { + for (l = 0; l < 100; l++) { + val = phy_package_port_read_paged(phydev, i, RTL821X_PAGE_GPHY, 0x10); + if ((val & 0x7) >= 3) + break; + } + if (l >= 100) { + phydev_err(phydev, "Could not disable autosensing\n"); + return -1; + } + } + + /* Request patch */ + for (i = 0; i < 4; i++) { + phy_package_port_write_paged(phydev, i, RTL83XX_PAGE_RAW, RTL8XXX_PAGE_SELECT, RTL821X_PAGE_PATCH); + phy_package_port_write_paged(phydev, i, RTL83XX_PAGE_RAW, 0x10, 0x0010); + } + mdelay(300); + + /* Verify patch readiness */ + for (i = 0; i < 4; i++) { + for (l = 0; l < 100; l++) { + val = phy_package_port_read_paged(phydev, i, RTL821X_PAGE_STATE, 0x10); + if (val & 0x40) + break; + } + if (l >= 100) { + phydev_err(phydev, "Could not patch PHY\n"); + return -1; + } + } + /* Use Broadcast ID method for patching */ + rtl821x_phy_setup_package_broadcast(phydev, true); + + i = 0; + while (rtl8380_rtl8214fc_perport[i * 2]) { + phy_write_paged(phydev, RTL83XX_PAGE_RAW, rtl8380_rtl8214fc_perport[i * 2], + rtl8380_rtl8214fc_perport[i * 2 + 1]); + i++; + } + + /*Disable broadcast ID*/ + rtl821x_phy_setup_package_broadcast(phydev, false); + + /* Auto medium selection */ + for (i = 0; i < 4; i++) { + phy_write_paged(phydev, RTL83XX_PAGE_RAW, RTL8XXX_PAGE_SELECT, RTL8XXX_PAGE_MAIN); + phy_write_paged(phydev, RTL83XX_PAGE_RAW, RTL821XEXT_MEDIA_PAGE_SELECT, RTL821X_MEDIA_PAGE_AUTO); + } + + return 0; +} + +static int rtl8214fc_match_phy_device(struct phy_device *phydev) +{ + int addr = phydev->mdio.addr; + + return phydev->phy_id == PHY_ID_RTL8214FC && addr >= 24; +} + +static int rtl8380_configure_serdes(struct phy_device *phydev) +{ + u32 v; + u32 sds_conf_value; + int i; + struct fw_header *h; + u32 *rtl8380_sds_take_reset; + u32 *rtl8380_sds_common; + u32 *rtl8380_sds01_qsgmii_6275b; + u32 *rtl8380_sds23_qsgmii_6275b; + u32 *rtl8380_sds4_fiber_6275b; + u32 *rtl8380_sds5_fiber_6275b; + u32 *rtl8380_sds_reset; + u32 *rtl8380_sds_release_reset; + + phydev_info(phydev, "Detected internal RTL8380 SERDES\n"); + + h = rtl838x_request_fw(phydev, &rtl838x_8218b_fw, FIRMWARE_838X_8380_1); + if (!h) + return -1; + + if (h->magic != 0x83808380) { + phydev_err(phydev, "Wrong firmware file: magic number mismatch.\n"); + return -1; + } + + rtl8380_sds_take_reset = (void *)h + sizeof(struct fw_header) + + h->parts[0].start; + + rtl8380_sds_common = (void *)h + sizeof(struct fw_header) + + h->parts[1].start; + + rtl8380_sds01_qsgmii_6275b = (void *)h + sizeof(struct fw_header) + + h->parts[2].start; + + rtl8380_sds23_qsgmii_6275b = (void *)h + sizeof(struct fw_header) + + h->parts[3].start; + + rtl8380_sds4_fiber_6275b = (void *)h + sizeof(struct fw_header) + + h->parts[4].start; + + rtl8380_sds5_fiber_6275b = (void *)h + sizeof(struct fw_header) + + h->parts[5].start; + + rtl8380_sds_reset = (void *)h + sizeof(struct fw_header) + + h->parts[6].start; + + rtl8380_sds_release_reset = (void *)h + sizeof(struct fw_header) + + h->parts[7].start; + + /* Back up serdes power off value */ + sds_conf_value = sw_r32(RTL838X_SDS_CFG_REG); + pr_info("SDS power down value: %x\n", sds_conf_value); + + /* take serdes into reset */ + i = 0; + while (rtl8380_sds_take_reset[2 * i]) { + sw_w32(rtl8380_sds_take_reset[2 * i + 1], rtl8380_sds_take_reset[2 * i]); + i++; + udelay(1000); + } + + /* apply common serdes patch */ + i = 0; + while (rtl8380_sds_common[2 * i]) { + sw_w32(rtl8380_sds_common[2 * i + 1], rtl8380_sds_common[2 * i]); + i++; + udelay(1000); + } + + /* internal R/W enable */ + sw_w32(3, RTL838X_INT_RW_CTRL); + + /* SerDes ports 4 and 5 are FIBRE ports */ + sw_w32_mask(0x7 | 0x38, 1 | (1 << 3), RTL838X_INT_MODE_CTRL); + + /* SerDes module settings, SerDes 0-3 are QSGMII */ + v = 0x6 << 25 | 0x6 << 20 | 0x6 << 15 | 0x6 << 10; + /* SerDes 4 and 5 are 1000BX FIBRE */ + v |= 0x4 << 5 | 0x4; + sw_w32(v, RTL838X_SDS_MODE_SEL); + + pr_info("PLL control register: %x\n", sw_r32(RTL838X_PLL_CML_CTRL)); + sw_w32_mask(0xfffffff0, 0xaaaaaaaf & 0xf, RTL838X_PLL_CML_CTRL); + i = 0; + while (rtl8380_sds01_qsgmii_6275b[2 * i]) { + sw_w32(rtl8380_sds01_qsgmii_6275b[2 * i + 1], + rtl8380_sds01_qsgmii_6275b[2 * i]); + i++; + } + + i = 0; + while (rtl8380_sds23_qsgmii_6275b[2 * i]) { + sw_w32(rtl8380_sds23_qsgmii_6275b[2 * i + 1], rtl8380_sds23_qsgmii_6275b[2 * i]); + i++; + } + + i = 0; + while (rtl8380_sds4_fiber_6275b[2 * i]) { + sw_w32(rtl8380_sds4_fiber_6275b[2 * i + 1], rtl8380_sds4_fiber_6275b[2 * i]); + i++; + } + + i = 0; + while (rtl8380_sds5_fiber_6275b[2 * i]) { + sw_w32(rtl8380_sds5_fiber_6275b[2 * i + 1], rtl8380_sds5_fiber_6275b[2 * i]); + i++; + } + + i = 0; + while (rtl8380_sds_reset[2 * i]) { + sw_w32(rtl8380_sds_reset[2 * i + 1], rtl8380_sds_reset[2 * i]); + i++; + } + + i = 0; + while (rtl8380_sds_release_reset[2 * i]) { + sw_w32(rtl8380_sds_release_reset[2 * i + 1], rtl8380_sds_release_reset[2 * i]); + i++; + } + + pr_info("SDS power down value now: %x\n", sw_r32(RTL838X_SDS_CFG_REG)); + sw_w32(sds_conf_value, RTL838X_SDS_CFG_REG); + + pr_info("Configuration of SERDES done\n"); + return 0; +} + +static int rtl8390_configure_serdes(struct phy_device *phydev) +{ + phydev_info(phydev, "Detected internal RTL8390 SERDES\n"); + + /* In autoneg state, force link, set SR4_CFG_EN_LINK_FIB1G */ + sw_w32_mask(0, 1 << 18, RTL839X_SDS12_13_XSG0 + 0x0a); + + /* Disable EEE: Clear FRE16_EEE_RSG_FIB1G, FRE16_EEE_STD_FIB1G, + * FRE16_C1_PWRSAV_EN_FIB1G, FRE16_C2_PWRSAV_EN_FIB1G + * and FRE16_EEE_QUIET_FIB1G + */ + sw_w32_mask(0x1f << 10, 0, RTL839X_SDS12_13_XSG0 + 0xe0); + + return 0; +} + +void rtl9300_sds_field_w(int sds, u32 page, u32 reg, int end_bit, int start_bit, u32 v) +{ + int l = end_bit - start_bit + 1; + u32 data = v; + + if (l < 32) { + u32 mask = BIT(l) - 1; + + data = rtl930x_read_sds_phy(sds, page, reg); + data &= ~(mask << start_bit); + data |= (v & mask) << start_bit; + } + + rtl930x_write_sds_phy(sds, page, reg, data); +} + +u32 rtl9300_sds_field_r(int sds, u32 page, u32 reg, int end_bit, int start_bit) +{ + int l = end_bit - start_bit + 1; + u32 v = rtl930x_read_sds_phy(sds, page, reg); + + if (l >= 32) + return v; + + return (v >> start_bit) & (BIT(l) - 1); +} + +/* Read the link and speed status of the internal SerDes of the RTL9300 + */ +static int rtl9300_read_status(struct phy_device *phydev) +{ + struct device *dev = &phydev->mdio.dev; + int phy_addr = phydev->mdio.addr; + struct device_node *dn; + u32 sds_num = 0, status, latch_status, mode; + + if (dev->of_node) { + dn = dev->of_node; + + if (of_property_read_u32(dn, "sds", &sds_num)) + sds_num = -1; + pr_info("%s: Port %d, SerDes is %d\n", __func__, phy_addr, sds_num); + } else { + dev_err(dev, "No DT node.\n"); + return -EINVAL; + } + + if (sds_num < 0) + return 0; + + mode = rtl9300_sds_mode_get(sds_num); + pr_info("%s got SDS mode %02x\n", __func__, mode); + if (mode == 0x1a) { // 10GR mode + status = rtl9300_sds_field_r(sds_num, 0x5, 0, 12, 12); + latch_status = rtl9300_sds_field_r(sds_num, 0x4, 1, 2, 2); + status |= rtl9300_sds_field_r(sds_num, 0x5, 0, 12, 12); + latch_status |= rtl9300_sds_field_r(sds_num, 0x4, 1, 2, 2); + } else { + status = rtl9300_sds_field_r(sds_num, 0x1, 29, 8, 0); + latch_status = rtl9300_sds_field_r(sds_num, 0x1, 30, 8, 0); + status |= rtl9300_sds_field_r(sds_num, 0x1, 29, 8, 0); + latch_status |= rtl9300_sds_field_r(sds_num, 0x1, 30, 8, 0); + } + + pr_info("%s link status: status: %d, latch %d\n", __func__, status, latch_status); + + if (latch_status) { + phydev->link = true; + if (mode == 0x1a) + phydev->speed = SPEED_10000; + else + phydev->speed = SPEED_1000; + + phydev->duplex = DUPLEX_FULL; + } + + return 0; +} + +void rtl930x_sds_rx_rst(int sds_num, phy_interface_t phy_if) +{ + int page = 0x2e; // 10GR and USXGMII + + if (phy_if == PHY_INTERFACE_MODE_1000BASEX) + page = 0x24; + + rtl9300_sds_field_w(sds_num, page, 0x15, 4, 4, 0x1); + mdelay(5); + rtl9300_sds_field_w(sds_num, page, 0x15, 4, 4, 0x0); +} + +/* + * Force PHY modes on 10GBit Serdes + */ +void rtl9300_force_sds_mode(int sds, phy_interface_t phy_if) +{ + int sds_mode; + bool lc_on; + int i, lc_value; + int lane_0 = (sds % 2) ? sds - 1 : sds; + u32 v, cr_0, cr_1, cr_2; + u32 m_bit, l_bit; + + pr_info("%s: SDS: %d, mode %d\n", __func__, sds, phy_if); + switch (phy_if) { + case PHY_INTERFACE_MODE_SGMII: + sds_mode = 0x2; + lc_on = false; + lc_value = 0x1; + break; + + case PHY_INTERFACE_MODE_HSGMII: + sds_mode = 0x12; + lc_value = 0x3; + // Configure LC + break; + + case PHY_INTERFACE_MODE_1000BASEX: + sds_mode = 0x04; + lc_on = false; + break; + + case PHY_INTERFACE_MODE_2500BASEX: + sds_mode = 0x16; + lc_value = 0x3; + // Configure LC + break; + + case PHY_INTERFACE_MODE_10GBASER: + sds_mode = 0x1a; + lc_on = true; + lc_value = 0x5; + break; + + case PHY_INTERFACE_MODE_NA: + // This will disable SerDes + sds_mode = 0x1f; + break; + + default: + pr_err("%s: unknown serdes mode: %s\n", + __func__, phy_modes(phy_if)); + return; + } + + pr_info("%s --------------------- serdes %d forcing to %x ...\n", __func__, sds, sds_mode); + // Power down SerDes + rtl9300_sds_field_w(sds, 0x20, 0, 7, 6, 0x3); + if (sds == 5) pr_info("%s after %x\n", __func__, rtl930x_read_sds_phy(sds, 0x20, 0)); + + if (sds == 5) pr_info("%s a %x\n", __func__, rtl930x_read_sds_phy(sds, 0x1f, 9)); + // Force mode enable + rtl9300_sds_field_w(sds, 0x1f, 9, 6, 6, 0x1); + if (sds == 5) pr_info("%s b %x\n", __func__, rtl930x_read_sds_phy(sds, 0x1f, 9)); + + /* SerDes off */ + rtl9300_sds_field_w(sds, 0x1f, 9, 11, 7, 0x1f); + + if (phy_if == PHY_INTERFACE_MODE_NA) + return; + + if (sds == 5) pr_info("%s c %x\n", __func__, rtl930x_read_sds_phy(sds, 0x20, 18)); + // Enable LC and ring + rtl9300_sds_field_w(lane_0, 0x20, 18, 3, 0, 0xf); + + if (sds == lane_0) + rtl9300_sds_field_w(lane_0, 0x20, 18, 5, 4, 0x1); + else + rtl9300_sds_field_w(lane_0, 0x20, 18, 7, 6, 0x1); + + rtl9300_sds_field_w(sds, 0x20, 0, 5, 4, 0x3); + + if (lc_on) + rtl9300_sds_field_w(lane_0, 0x20, 18, 11, 8, lc_value); + else + rtl9300_sds_field_w(lane_0, 0x20, 18, 15, 12, lc_value); + + // Force analog LC & ring on + rtl9300_sds_field_w(lane_0, 0x21, 11, 3, 0, 0xf); + + v = lc_on ? 0x3 : 0x1; + + if (sds == lane_0) + rtl9300_sds_field_w(lane_0, 0x20, 18, 5, 4, v); + else + rtl9300_sds_field_w(lane_0, 0x20, 18, 7, 6, v); + + // Force SerDes mode + rtl9300_sds_field_w(sds, 0x1f, 9, 6, 6, 1); + rtl9300_sds_field_w(sds, 0x1f, 9, 11, 7, sds_mode); + + // Toggle LC or Ring + for (i = 0; i < 20; i++) { + mdelay(200); + + rtl930x_write_sds_phy(lane_0, 0x1f, 2, 53); + + m_bit = (lane_0 == sds) ? (4) : (5); + l_bit = (lane_0 == sds) ? (4) : (5); + + cr_0 = rtl9300_sds_field_r(lane_0, 0x1f, 20, m_bit, l_bit); + mdelay(10); + cr_1 = rtl9300_sds_field_r(lane_0, 0x1f, 20, m_bit, l_bit); + mdelay(10); + cr_2 = rtl9300_sds_field_r(lane_0, 0x1f, 20, m_bit, l_bit); + + if (cr_0 && cr_1 && cr_2) { + u32 t; + if (phy_if != PHY_INTERFACE_MODE_10GBASER) + break; + + t = rtl9300_sds_field_r(sds, 0x6, 0x1, 2, 2); + rtl9300_sds_field_w(sds, 0x6, 0x1, 2, 2, 0x1); + + // Reset FSM + rtl9300_sds_field_w(sds, 0x6, 0x2, 12, 12, 0x1); + mdelay(10); + rtl9300_sds_field_w(sds, 0x6, 0x2, 12, 12, 0x0); + mdelay(10); + + // Need to read this twice + v = rtl9300_sds_field_r(sds, 0x5, 0, 12, 12); + v = rtl9300_sds_field_r(sds, 0x5, 0, 12, 12); + + rtl9300_sds_field_w(sds, 0x6, 0x1, 2, 2, t); + + // Reset FSM again + rtl9300_sds_field_w(sds, 0x6, 0x2, 12, 12, 0x1); + mdelay(10); + rtl9300_sds_field_w(sds, 0x6, 0x2, 12, 12, 0x0); + mdelay(10); + + if (v == 1) + break; + } + + m_bit = (phy_if == PHY_INTERFACE_MODE_10GBASER) ? 3 : 1; + l_bit = (phy_if == PHY_INTERFACE_MODE_10GBASER) ? 2 : 0; + + rtl9300_sds_field_w(lane_0, 0x21, 11, m_bit, l_bit, 0x2); + mdelay(10); + rtl9300_sds_field_w(lane_0, 0x21, 11, m_bit, l_bit, 0x3); + } + + rtl930x_sds_rx_rst(sds, phy_if); + + // Re-enable power + rtl9300_sds_field_w(sds, 0x20, 0, 7, 6, 0); + + pr_info("%s --------------------- serdes %d forced to %x DONE\n", __func__, sds, sds_mode); +} + +void rtl9300_sds_tx_config(int sds, phy_interface_t phy_if) +{ + // parameters: rtl9303_80G_txParam_s2 + int impedance = 0x8; + int pre_amp = 0x2; + int main_amp = 0x9; + int post_amp = 0x2; + int pre_en = 0x1; + int post_en = 0x1; + int page; + + switch(phy_if) { + case PHY_INTERFACE_MODE_1000BASEX: + page = 0x25; + break; + case PHY_INTERFACE_MODE_HSGMII: + case PHY_INTERFACE_MODE_2500BASEX: + page = 0x29; + break; + case PHY_INTERFACE_MODE_10GBASER: + page = 0x2f; + break; + default: + pr_err("%s: unsupported PHY mode\n", __func__); + return; + } + + rtl9300_sds_field_w(sds, page, 0x1, 15, 11, pre_amp); + rtl9300_sds_field_w(sds, page, 0x7, 0, 0, pre_en); + rtl9300_sds_field_w(sds, page, 0x7, 8, 4, main_amp); + rtl9300_sds_field_w(sds, page, 0x6, 4, 0, post_amp); + rtl9300_sds_field_w(sds, page, 0x7, 3, 3, post_en); + rtl9300_sds_field_w(sds, page, 0x18, 15, 12, impedance); +} + +/* + * Wait for clock ready, this assumes the SerDes is in XGMII mode + * timeout is in ms + */ +int rtl9300_sds_clock_wait(int timeout) +{ + u32 v; + unsigned long start = jiffies; + + do { + rtl9300_sds_field_w(2, 0x1f, 0x2, 15, 0, 53); + v = rtl9300_sds_field_r(2, 0x1f, 20, 5, 4); + if (v == 3) + return 0; + } while (jiffies < start + (HZ / 1000) * timeout); + + return 1; +} + +void rtl9300_serdes_mac_link_config(int sds, bool tx_normal, bool rx_normal) +{ + u32 v10, v1; + + v10 = rtl930x_read_sds_phy(sds, 6, 2); // 10GBit, page 6, reg 2 + v1 = rtl930x_read_sds_phy(sds, 0, 0); // 1GBit, page 0, reg 0 + pr_info("%s: registers before %08x %08x\n", __func__, v10, v1); + + v10 &= ~(BIT(13) | BIT(14)); + v1 &= ~(BIT(8) | BIT(9)); + + v10 |= rx_normal ? 0 : BIT(13); + v1 |= rx_normal ? 0 : BIT(9); + + v10 |= tx_normal ? 0 : BIT(14); + v1 |= tx_normal ? 0 : BIT(8); + + rtl930x_write_sds_phy(sds, 6, 2, v10); + rtl930x_write_sds_phy(sds, 0, 0, v1); + + v10 = rtl930x_read_sds_phy(sds, 6, 2); + v1 = rtl930x_read_sds_phy(sds, 0, 0); + pr_info("%s: registers after %08x %08x\n", __func__, v10, v1); +} + +void rtl9300_sds_rxcal_dcvs_manual(u32 sds_num, u32 dcvs_id, bool manual, u32 dvcs_list[]) +{ + if (manual) { + switch(dcvs_id) { + case 0: + rtl9300_sds_field_w(sds_num, 0x2e, 0x1e, 14, 14, 0x1); + rtl9300_sds_field_w(sds_num, 0x2f, 0x03, 5, 5, dvcs_list[0]); + rtl9300_sds_field_w(sds_num, 0x2f, 0x03, 4, 0, dvcs_list[1]); + break; + case 1: + rtl9300_sds_field_w(sds_num, 0x2e, 0x1e, 13, 13, 0x1); + rtl9300_sds_field_w(sds_num, 0x2e, 0x1d, 15, 15, dvcs_list[0]); + rtl9300_sds_field_w(sds_num, 0x2e, 0x1d, 14, 11, dvcs_list[1]); + break; + case 2: + rtl9300_sds_field_w(sds_num, 0x2e, 0x1e, 12, 12, 0x1); + rtl9300_sds_field_w(sds_num, 0x2e, 0x1d, 10, 10, dvcs_list[0]); + rtl9300_sds_field_w(sds_num, 0x2e, 0x1d, 9, 6, dvcs_list[1]); + break; + case 3: + rtl9300_sds_field_w(sds_num, 0x2e, 0x1e, 11, 11, 0x1); + rtl9300_sds_field_w(sds_num, 0x2e, 0x1d, 5, 5, dvcs_list[0]); + rtl9300_sds_field_w(sds_num, 0x2e, 0x1d, 4, 1, dvcs_list[1]); + break; + case 4: + rtl9300_sds_field_w(sds_num, 0x2e, 0x01, 15, 15, 0x1); + rtl9300_sds_field_w(sds_num, 0x2e, 0x11, 10, 10, dvcs_list[0]); + rtl9300_sds_field_w(sds_num, 0x2e, 0x11, 9, 6, dvcs_list[1]); + break; + case 5: + rtl9300_sds_field_w(sds_num, 0x2e, 0x02, 11, 11, 0x1); + rtl9300_sds_field_w(sds_num, 0x2e, 0x11, 4, 4, dvcs_list[0]); + rtl9300_sds_field_w(sds_num, 0x2e, 0x11, 3, 0, dvcs_list[1]); + break; + default: + break; + } + } else { + switch(dcvs_id) { + case 0: + rtl9300_sds_field_w(sds_num, 0x2e, 0x1e, 14, 14, 0x0); + break; + case 1: + rtl9300_sds_field_w(sds_num, 0x2e, 0x1e, 13, 13, 0x0); + break; + case 2: + rtl9300_sds_field_w(sds_num, 0x2e, 0x1e, 12, 12, 0x0); + break; + case 3: + rtl9300_sds_field_w(sds_num, 0x2e, 0x1e, 11, 11, 0x0); + break; + case 4: + rtl9300_sds_field_w(sds_num, 0x2e, 0x01, 15, 15, 0x0); + break; + case 5: + rtl9300_sds_field_w(sds_num, 0x2e, 0x02, 11, 11, 0x0); + break; + default: + break; + } + mdelay(1); + } +} + +void rtl9300_sds_rxcal_dcvs_get(u32 sds_num, u32 dcvs_id, u32 dcvs_list[]) +{ + u32 dcvs_sign_out = 0, dcvs_coef_bin = 0; + bool dcvs_manual; + + if (!(sds_num % 2)) + rtl930x_write_sds_phy(sds_num, 0x1f, 0x2, 0x2f); + else + rtl930x_write_sds_phy(sds_num - 1, 0x1f, 0x2, 0x31); + + // ##Page0x2E, Reg0x15[9], REG0_RX_EN_TEST=[1] + rtl9300_sds_field_w(sds_num, 0x2e, 0x15, 9, 9, 0x1); + + // ##Page0x21, Reg0x06[11 6], REG0_RX_DEBUG_SEL=[1 0 x x x x] + rtl9300_sds_field_w(sds_num, 0x21, 0x06, 11, 6, 0x20); + + switch(dcvs_id) { + case 0: + rtl9300_sds_field_w(sds_num, 0x2f, 0x0c, 5, 0, 0x22); + mdelay(1); + + // ##DCVS0 Read Out + dcvs_sign_out = rtl9300_sds_field_r(sds_num, 0x1f, 0x14, 4, 4); + dcvs_coef_bin = rtl9300_sds_field_r(sds_num, 0x1f, 0x14, 3, 0); + dcvs_manual = !!rtl9300_sds_field_r(sds_num, 0x2e, 0x1e, 14, 14); + break; + + case 1: + rtl9300_sds_field_w(sds_num, 0x2f, 0x0c, 5, 0, 0x23); + mdelay(1); + + // ##DCVS0 Read Out + dcvs_coef_bin = rtl9300_sds_field_r(sds_num, 0x1f, 0x14, 4, 4); + dcvs_coef_bin = rtl9300_sds_field_r(sds_num, 0x1f, 0x14, 3, 0); + dcvs_manual = !!rtl9300_sds_field_r(sds_num, 0x2e, 0x1e, 13, 13); + break; + + case 2: + rtl9300_sds_field_w(sds_num, 0x2f, 0x0c, 5, 0, 0x24); + mdelay(1); + + // ##DCVS0 Read Out + dcvs_sign_out = rtl9300_sds_field_r(sds_num, 0x1f, 0x14, 4, 4); + dcvs_coef_bin = rtl9300_sds_field_r(sds_num, 0x1f, 0x14, 3, 0); + dcvs_manual = !!rtl9300_sds_field_r(sds_num, 0x2e, 0x1e, 12, 12); + break; + case 3: + rtl9300_sds_field_w(sds_num, 0x2f, 0x0c, 5, 0, 0x25); + mdelay(1); + + // ##DCVS0 Read Out + dcvs_sign_out = rtl9300_sds_field_r(sds_num, 0x1f, 0x14, 4, 4); + dcvs_coef_bin = rtl9300_sds_field_r(sds_num, 0x1f, 0x14, 3, 0); + dcvs_manual = rtl9300_sds_field_r(sds_num, 0x2e, 0x1e, 11, 11); + break; + + case 4: + rtl9300_sds_field_w(sds_num, 0x2f, 0x0c, 5, 0, 0x2c); + mdelay(1); + + // ##DCVS0 Read Out + dcvs_sign_out = rtl9300_sds_field_r(sds_num, 0x1f, 0x14, 4, 4); + dcvs_coef_bin = rtl9300_sds_field_r(sds_num, 0x1f, 0x14, 3, 0); + dcvs_manual = !!rtl9300_sds_field_r(sds_num, 0x2e, 0x01, 15, 15); + break; + + case 5: + rtl9300_sds_field_w(sds_num, 0x2f, 0x0c, 5, 0, 0x2d); + mdelay(1); + + // ##DCVS0 Read Out + dcvs_sign_out = rtl9300_sds_field_r(sds_num, 0x1f, 0x14, 4, 4); + dcvs_coef_bin = rtl9300_sds_field_r(sds_num, 0x1f, 0x14, 3, 0); + dcvs_manual = rtl9300_sds_field_r(sds_num, 0x2e, 0x02, 11, 11); + break; + + default: + break; + } + + if (dcvs_sign_out) + pr_info("%s DCVS %u Sign: -", __func__, dcvs_id); + else + pr_info("%s DCVS %u Sign: +", __func__, dcvs_id); + + pr_info("DCVS %u even coefficient = %u", dcvs_id, dcvs_coef_bin); + pr_info("DCVS %u manual = %u", dcvs_id, dcvs_manual); + + dcvs_list[0] = dcvs_sign_out; + dcvs_list[1] = dcvs_coef_bin; +} + +void rtl9300_sds_rxcal_leq_manual(u32 sds_num, bool manual, u32 leq_gray) +{ + if (manual) { + rtl9300_sds_field_w(sds_num, 0x2e, 0x18, 15, 15, 0x1); + rtl9300_sds_field_w(sds_num, 0x2e, 0x16, 14, 10, leq_gray); + } else { + rtl9300_sds_field_w(sds_num, 0x2e, 0x18, 15, 15, 0x0); + mdelay(100); + } +} + +void rtl9300_sds_rxcal_leq_offset_manual(u32 sds_num, bool manual, u32 offset) +{ + if (manual) { + rtl9300_sds_field_w(sds_num, 0x2e, 0x17, 6, 2, offset); + } else { + rtl9300_sds_field_w(sds_num, 0x2e, 0x17, 6, 2, offset); + mdelay(1); + } +} + +#define GRAY_BITS 5 +u32 rtl9300_sds_rxcal_gray_to_binary(u32 gray_code) +{ + int i, j, m; + u32 g[GRAY_BITS]; + u32 c[GRAY_BITS]; + u32 leq_binary = 0; + + for(i = 0; i < GRAY_BITS; i++) + g[i] = (gray_code & BIT(i)) >> i; + + m = GRAY_BITS - 1; + + c[m] = g[m]; + + for(i = 0; i < m; i++) { + c[i] = g[i]; + for(j = i + 1; j < GRAY_BITS; j++) + c[i] = c[i] ^ g[j]; + } + + for(i = 0; i < GRAY_BITS; i++) + leq_binary += c[i] << i; + + return leq_binary; +} + +u32 rtl9300_sds_rxcal_leq_read(int sds_num) +{ + u32 leq_gray, leq_bin; + bool leq_manual; + + if (!(sds_num % 2)) + rtl930x_write_sds_phy(sds_num, 0x1f, 0x2, 0x2f); + else + rtl930x_write_sds_phy(sds_num - 1, 0x1f, 0x2, 0x31); + + // ##Page0x2E, Reg0x15[9], REG0_RX_EN_TEST=[1] + rtl9300_sds_field_w(sds_num, 0x2e, 0x15, 9, 9, 0x1); + + // ##Page0x21, Reg0x06[11 6], REG0_RX_DEBUG_SEL=[0 1 x x x x] + rtl9300_sds_field_w(sds_num, 0x21, 0x06, 11, 6, 0x10); + mdelay(1); + + // ##LEQ Read Out + leq_gray = rtl9300_sds_field_r(sds_num, 0x1f, 0x14, 7, 3); + leq_manual = !!rtl9300_sds_field_r(sds_num, 0x2e, 0x18, 15, 15); + leq_bin = rtl9300_sds_rxcal_gray_to_binary(leq_gray); + + pr_info("LEQ_gray: %u, LEQ_bin: %u", leq_gray, leq_bin); + pr_info("LEQ manual: %u", leq_manual); + + return leq_bin; +} + +void rtl9300_sds_rxcal_vth_manual(u32 sds_num, bool manual, u32 vth_list[]) +{ + if (manual) { + rtl9300_sds_field_w(sds_num, 0x2e, 0x0f, 13, 13, 0x1); + rtl9300_sds_field_w(sds_num, 0x2e, 0x13, 5, 3, vth_list[0]); + rtl9300_sds_field_w(sds_num, 0x2e, 0x13, 2, 0, vth_list[1]); + } else { + rtl9300_sds_field_w(sds_num, 0x2e, 0x0f, 13, 13, 0x0); + mdelay(10); + } +} + +void rtl9300_sds_rxcal_vth_get(u32 sds_num, u32 vth_list[]) +{ + u32 vth_manual; + + //##Page0x1F, Reg0x02[15 0], REG_DBGO_SEL=[0x002F]; //Lane0 + //##Page0x1F, Reg0x02[15 0], REG_DBGO_SEL=[0x0031]; //Lane1 + if (!(sds_num % 2)) + rtl930x_write_sds_phy(sds_num, 0x1f, 0x2, 0x2f); + else + rtl930x_write_sds_phy(sds_num - 1, 0x1f, 0x2, 0x31); + + //##Page0x2E, Reg0x15[9], REG0_RX_EN_TEST=[1] + rtl9300_sds_field_w(sds_num, 0x2e, 0x15, 9, 9, 0x1); + //##Page0x21, Reg0x06[11 6], REG0_RX_DEBUG_SEL=[1 0 x x x x] + rtl9300_sds_field_w(sds_num, 0x21, 0x06, 11, 6, 0x20); + //##Page0x2F, Reg0x0C[5 0], REG0_COEF_SEL=[0 0 1 1 0 0] + rtl9300_sds_field_w(sds_num, 0x2f, 0x0c, 5, 0, 0xc); + + mdelay(1); + + //##VthP & VthN Read Out + vth_list[0] = rtl9300_sds_field_r(sds_num, 0x1f, 0x14, 2, 0); // v_thp set bin + vth_list[1] = rtl9300_sds_field_r(sds_num, 0x1f, 0x14, 5, 3); // v_thn set bin + + pr_info("vth_set_bin = %d", vth_list[0]); + pr_info("vth_set_bin = %d", vth_list[1]); + + vth_manual = !!rtl9300_sds_field_r(sds_num, 0x2e, 0x0f, 13, 13); + pr_info("Vth Maunal = %d", vth_manual); +} + +void rtl9300_sds_rxcal_tap_manual(u32 sds_num, int tap_id, bool manual, u32 tap_list[]) +{ + if (manual) { + switch(tap_id) { + case 0: + //##REG0_LOAD_IN_INIT[0]=1; REG0_TAP0_INIT[5:0]=Tap0_Value + rtl9300_sds_field_w(sds_num, 0x2e, 0x0f, tap_id + 7, tap_id + 7, 0x1); + rtl9300_sds_field_w(sds_num, 0x2f, 0x03, 5, 5, tap_list[0]); + rtl9300_sds_field_w(sds_num, 0x2f, 0x03, 4, 0, tap_list[1]); + break; + case 1: + rtl9300_sds_field_w(sds_num, 0x2e, 0x0f, tap_id + 7, tap_id + 7, 0x1); + rtl9300_sds_field_w(sds_num, 0x21, 0x07, 6, 6, tap_list[0]); + rtl9300_sds_field_w(sds_num, 0x2e, 0x09, 11, 6, tap_list[1]); + rtl9300_sds_field_w(sds_num, 0x21, 0x07, 5, 5, tap_list[2]); + rtl9300_sds_field_w(sds_num, 0x2f, 0x12, 5, 0, tap_list[3]); + break; + case 2: + rtl9300_sds_field_w(sds_num, 0x2e, 0x0f, tap_id + 7, tap_id + 7, 0x1); + rtl9300_sds_field_w(sds_num, 0x2e, 0x09, 5, 5, tap_list[0]); + rtl9300_sds_field_w(sds_num, 0x2e, 0x09, 4, 0, tap_list[1]); + rtl9300_sds_field_w(sds_num, 0x2e, 0x0a, 11, 11, tap_list[2]); + rtl9300_sds_field_w(sds_num, 0x2e, 0x0a, 10, 6, tap_list[3]); + break; + case 3: + rtl9300_sds_field_w(sds_num, 0x2e, 0x0f, tap_id + 7, tap_id + 7, 0x1); + rtl9300_sds_field_w(sds_num, 0x2e, 0x0a, 5, 5, tap_list[0]); + rtl9300_sds_field_w(sds_num, 0x2e, 0x0a, 4, 0, tap_list[1]); + rtl9300_sds_field_w(sds_num, 0x2e, 0x06, 5, 5, tap_list[2]); + rtl9300_sds_field_w(sds_num, 0x2e, 0x06, 4, 0, tap_list[3]); + break; + case 4: + rtl9300_sds_field_w(sds_num, 0x2e, 0x0f, tap_id + 7, tap_id + 7, 0x1); + rtl9300_sds_field_w(sds_num, 0x2f, 0x01, 5, 5, tap_list[0]); + rtl9300_sds_field_w(sds_num, 0x2f, 0x01, 4, 0, tap_list[1]); + rtl9300_sds_field_w(sds_num, 0x2e, 0x06, 11, 11, tap_list[2]); + rtl9300_sds_field_w(sds_num, 0x2e, 0x06, 10, 6, tap_list[3]); + break; + default: + break; + } + } else { + rtl9300_sds_field_w(sds_num, 0x2e, 0x0f, tap_id + 7, tap_id + 7, 0x0); + mdelay(10); + } +} + +void rtl9300_sds_rxcal_tap_get(u32 sds_num, u32 tap_id, u32 tap_list[]) +{ + u32 tap0_sign_out; + u32 tap0_coef_bin; + u32 tap_sign_out_even; + u32 tap_coef_bin_even; + u32 tap_sign_out_odd; + u32 tap_coef_bin_odd; + bool tap_manual; + + if (!(sds_num % 2)) + rtl930x_write_sds_phy(sds_num, 0x1f, 0x2, 0x2f); + else + rtl930x_write_sds_phy(sds_num - 1, 0x1f, 0x2, 0x31); + + //##Page0x2E, Reg0x15[9], REG0_RX_EN_TEST=[1] + rtl9300_sds_field_w(sds_num, 0x2e, 0x15, 9, 9, 0x1); + //##Page0x21, Reg0x06[11 6], REG0_RX_DEBUG_SEL=[1 0 x x x x] + rtl9300_sds_field_w(sds_num, 0x21, 0x06, 11, 6, 0x20); + + if (!tap_id) { + //##Page0x2F, Reg0x0C[5 0], REG0_COEF_SEL=[0 0 0 0 0 1] + rtl9300_sds_field_w(sds_num, 0x2f, 0x0c, 5, 0, 0); + //##Tap1 Even Read Out + mdelay(1); + tap0_sign_out = rtl9300_sds_field_r(sds_num, 0x1f, 0x14, 5, 5); + tap0_coef_bin = rtl9300_sds_field_r(sds_num, 0x1f, 0x14, 4, 0); + + if (tap0_sign_out == 1) + pr_info("Tap0 Sign : -"); + else + pr_info("Tap0 Sign : +"); + + pr_info("tap0_coef_bin = %d", tap0_coef_bin); + + tap_list[0] = tap0_sign_out; + tap_list[1] = tap0_coef_bin; + + tap_manual = !!rtl9300_sds_field_r(sds_num, 0x2e, 0x0f, 7, 7); + pr_info("tap0 manual = %u",tap_manual); + } else { + //##Page0x2F, Reg0x0C[5 0], REG0_COEF_SEL=[0 0 0 0 0 1] + rtl9300_sds_field_w(sds_num, 0x2f, 0x0c, 5, 0, tap_id); + mdelay(1); + //##Tap1 Even Read Out + tap_sign_out_even = rtl9300_sds_field_r(sds_num, 0x1f, 0x14, 5, 5); + tap_coef_bin_even = rtl9300_sds_field_r(sds_num, 0x1f, 0x14, 4, 0); + + //##Page0x2F, Reg0x0C[5 0], REG0_COEF_SEL=[0 0 0 1 1 0] + rtl9300_sds_field_w(sds_num, 0x2f, 0x0c, 5, 0, (tap_id + 5)); + //##Tap1 Odd Read Out + tap_sign_out_odd = rtl9300_sds_field_r(sds_num, 0x1f, 0x14, 5, 5); + tap_coef_bin_odd = rtl9300_sds_field_r(sds_num, 0x1f, 0x14, 4, 0); + + if (tap_sign_out_even == 1) + pr_info("Tap %u even sign: -", tap_id); + else + pr_info("Tap %u even sign: +", tap_id); + + pr_info("Tap %u even coefficient = %u", tap_id, tap_coef_bin_even); + + if (tap_sign_out_odd == 1) + pr_info("Tap %u odd sign: -", tap_id); + else + pr_info("Tap %u odd sign: +", tap_id); + + pr_info("Tap %u odd coefficient = %u", tap_id,tap_coef_bin_odd); + + tap_list[0] = tap_sign_out_even; + tap_list[1] = tap_coef_bin_even; + tap_list[2] = tap_sign_out_odd; + tap_list[3] = tap_coef_bin_odd; + + tap_manual = rtl9300_sds_field_r(sds_num, 0x2e, 0x0f, tap_id + 7, tap_id + 7); + pr_info("tap %u manual = %d",tap_id, tap_manual); + } +} + +void rtl9300_do_rx_calibration_1(int sds, phy_interface_t phy_mode) +{ + // From both rtl9300_rxCaliConf_serdes_myParam and rtl9300_rxCaliConf_phy_myParam + int tap0_init_val = 0x1f; // Initial Decision Fed Equalizer 0 tap + int vth_min = 0x0; + + pr_info("start_1.1.1 initial value for sds %d\n", sds); + rtl930x_write_sds_phy(sds, 6, 0, 0); + + // FGCAL + rtl9300_sds_field_w(sds, 0x2e, 0x01, 14, 14, 0x0); + rtl9300_sds_field_w(sds, 0x2e, 0x1c, 10, 5, 0x20); + rtl9300_sds_field_w(sds, 0x2f, 0x02, 0, 0, 0x1); + + // DCVS + rtl9300_sds_field_w(sds, 0x2e, 0x1e, 14, 11, 0x0); + rtl9300_sds_field_w(sds, 0x2e, 0x01, 15, 15, 0x0); + rtl9300_sds_field_w(sds, 0x2e, 0x02, 11, 11, 0x0); + rtl9300_sds_field_w(sds, 0x2e, 0x1c, 4, 0, 0x0); + rtl9300_sds_field_w(sds, 0x2e, 0x1d, 15, 11, 0x0); + rtl9300_sds_field_w(sds, 0x2e, 0x1d, 10, 6, 0x0); + rtl9300_sds_field_w(sds, 0x2e, 0x1d, 5, 1, 0x0); + rtl9300_sds_field_w(sds, 0x2e, 0x02, 10, 6, 0x0); + rtl9300_sds_field_w(sds, 0x2e, 0x11, 4, 0, 0x0); + rtl9300_sds_field_w(sds, 0x2f, 0x00, 3, 0, 0xf); + rtl9300_sds_field_w(sds, 0x2e, 0x04, 6, 6, 0x1); + rtl9300_sds_field_w(sds, 0x2e, 0x04, 7, 7, 0x1); + + // LEQ (Long Term Equivalent signal level) + rtl9300_sds_field_w(sds, 0x2e, 0x16, 14, 8, 0x0); + + // DFE (Decision Fed Equalizer) + rtl9300_sds_field_w(sds, 0x2f, 0x03, 5, 0, tap0_init_val); + rtl9300_sds_field_w(sds, 0x2e, 0x09, 11, 6, 0x0); + rtl9300_sds_field_w(sds, 0x2e, 0x09, 5, 0, 0x0); + rtl9300_sds_field_w(sds, 0x2e, 0x0a, 5, 0, 0x0); + rtl9300_sds_field_w(sds, 0x2f, 0x01, 5, 0, 0x0); + rtl9300_sds_field_w(sds, 0x2f, 0x12, 5, 0, 0x0); + rtl9300_sds_field_w(sds, 0x2e, 0x0a, 11, 6, 0x0); + rtl9300_sds_field_w(sds, 0x2e, 0x06, 5, 0, 0x0); + rtl9300_sds_field_w(sds, 0x2f, 0x01, 5, 0, 0x0); + + // Vth + rtl9300_sds_field_w(sds, 0x2e, 0x13, 5, 3, 0x7); + rtl9300_sds_field_w(sds, 0x2e, 0x13, 2, 0, 0x7); + rtl9300_sds_field_w(sds, 0x2f, 0x0b, 5, 3, vth_min); + + pr_info("end_1.1.1 --\n"); + + pr_info("start_1.1.2 Load DFE init. value\n"); + + rtl9300_sds_field_w(sds, 0x2e, 0x0f, 13, 7, 0x7f); + + pr_info("end_1.1.2\n"); + + pr_info("start_1.1.3 disable LEQ training,enable DFE clock\n"); + + rtl9300_sds_field_w(sds, 0x2e, 0x17, 7, 7, 0x0); + rtl9300_sds_field_w(sds, 0x2e, 0x17, 6, 2, 0x0); + rtl9300_sds_field_w(sds, 0x2e, 0x0c, 8, 8, 0x0); + rtl9300_sds_field_w(sds, 0x2e, 0x0b, 4, 4, 0x1); + rtl9300_sds_field_w(sds, 0x2e, 0x12, 14, 14, 0x0); + rtl9300_sds_field_w(sds, 0x2f, 0x02, 15, 15, 0x0); + + pr_info("end_1.1.3 --\n"); + + pr_info("start_1.1.4 offset cali setting\n"); + + rtl9300_sds_field_w(sds, 0x2e, 0x0f, 15, 14, 0x3); + + pr_info("end_1.1.4\n"); + + pr_info("start_1.1.5 LEQ and DFE setting\n"); + + // TODO: make this work for DAC cables of different lengths + // For a 10GBit serdes wit Fibre, SDS 8 or 9 + if (phy_mode == PHY_INTERFACE_MODE_10GBASER || PHY_INTERFACE_MODE_1000BASEX) + rtl9300_sds_field_w(sds, 0x2e, 0x16, 3, 2, 0x2); + else + pr_err("%s not PHY-based or SerDes, implement DAC!\n", __func__); + + // No serdes, check for Aquantia PHYs + rtl9300_sds_field_w(sds, 0x2e, 0x16, 3, 2, 0x2); + + rtl9300_sds_field_w(sds, 0x2e, 0x0f, 6, 0, 0x5f); + rtl9300_sds_field_w(sds, 0x2f, 0x05, 7, 2, 0x1f); + rtl9300_sds_field_w(sds, 0x2e, 0x19, 9, 5, 0x1f); + rtl9300_sds_field_w(sds, 0x2f, 0x0b, 15, 9, 0x3c); + rtl9300_sds_field_w(sds, 0x2e, 0x0b, 1, 0, 0x3); + + pr_info("end_1.1.5\n"); +} + +void rtl9300_do_rx_calibration_2_1(u32 sds_num) +{ + pr_info("start_1.2.1 ForegroundOffsetCal_Manual\n"); + + // Gray config endis to 1 + rtl9300_sds_field_w(sds_num, 0x2f, 0x02, 2, 2, 0x1); + + // ForegroundOffsetCal_Manual(auto mode) + rtl9300_sds_field_w(sds_num, 0x2e, 0x01, 14, 14, 0x0); + + pr_info("end_1.2.1"); +} + +void rtl9300_do_rx_calibration_2_2(int sds_num) +{ + //Force Rx-Run = 0 + rtl9300_sds_field_w(sds_num, 0x2e, 0x15, 8, 8, 0x0); + + rtl930x_sds_rx_rst(sds_num, PHY_INTERFACE_MODE_10GBASER); +} + +void rtl9300_do_rx_calibration_2_3(int sds_num) +{ + u32 fgcal_binary, fgcal_gray; + u32 offset_range; + + pr_info("start_1.2.3 Foreground Calibration\n"); + + while(1) { + if (!(sds_num % 2)) + rtl930x_write_sds_phy(sds_num, 0x1f, 0x2, 0x2f); + else + rtl930x_write_sds_phy(sds_num -1 , 0x1f, 0x2, 0x31); + + // ##Page0x2E, Reg0x15[9], REG0_RX_EN_TEST=[1] + rtl9300_sds_field_w(sds_num, 0x2e, 0x15, 9, 9, 0x1); + // ##Page0x21, Reg0x06[11 6], REG0_RX_DEBUG_SEL=[1 0 x x x x] + rtl9300_sds_field_w(sds_num, 0x21, 0x06, 11, 6, 0x20); + // ##Page0x2F, Reg0x0C[5 0], REG0_COEF_SEL=[0 0 1 1 1 1] + rtl9300_sds_field_w(sds_num, 0x2f, 0x0c, 5, 0, 0xf); + // ##FGCAL read gray + fgcal_gray = rtl9300_sds_field_r(sds_num, 0x1f, 0x14, 5, 0); + // ##Page0x2F, Reg0x0C[5 0], REG0_COEF_SEL=[0 0 1 1 1 0] + rtl9300_sds_field_w(sds_num, 0x2f, 0x0c, 5, 0, 0xe); + // ##FGCAL read binary + fgcal_binary = rtl9300_sds_field_r(sds_num, 0x1f, 0x14, 5, 0); + + pr_info("%s: fgcal_gray: %d, fgcal_binary %d\n", + __func__, fgcal_gray, fgcal_binary); + + offset_range = rtl9300_sds_field_r(sds_num, 0x2e, 0x15, 15, 14); + + if (fgcal_binary > 60 || fgcal_binary < 3) { + if (offset_range == 3) { + pr_info("%s: Foreground Calibration result marginal!", __func__); + break; + } else { + offset_range++; + rtl9300_sds_field_w(sds_num, 0x2e, 0x15, 15, 14, offset_range); + rtl9300_do_rx_calibration_2_2(sds_num); + } + } else { + break; + } + } + pr_info("%s: end_1.2.3\n", __func__); +} + +void rtl9300_do_rx_calibration_2(int sds) +{ + rtl930x_sds_rx_rst(sds, PHY_INTERFACE_MODE_10GBASER); + rtl9300_do_rx_calibration_2_1(sds); + rtl9300_do_rx_calibration_2_2(sds); + rtl9300_do_rx_calibration_2_3(sds); +} + +void rtl9300_sds_rxcal_3_1(int sds_num, phy_interface_t phy_mode) +{ + pr_info("start_1.3.1"); + + // ##1.3.1 + if (phy_mode != PHY_INTERFACE_MODE_10GBASER && phy_mode != PHY_INTERFACE_MODE_1000BASEX) + rtl9300_sds_field_w(sds_num, 0x2e, 0xc, 8, 8, 0); + + rtl9300_sds_field_w(sds_num, 0x2e, 0x17, 7, 7, 0x0); + rtl9300_sds_rxcal_leq_manual(sds_num, false, 0); + + pr_info("end_1.3.1"); +} + +void rtl9300_sds_rxcal_3_2(int sds_num, phy_interface_t phy_mode) +{ + u32 sum10 = 0, avg10, int10; + int dac_long_cable_offset; + bool eq_hold_enabled; + int i; + + if (phy_mode == PHY_INTERFACE_MODE_10GBASER || phy_mode == PHY_INTERFACE_MODE_1000BASEX) { + // rtl9300_rxCaliConf_serdes_myParam + dac_long_cable_offset = 3; + eq_hold_enabled = true; + } else { + // rtl9300_rxCaliConf_phy_myParam + dac_long_cable_offset = 0; + eq_hold_enabled = false; + } + + if (phy_mode == PHY_INTERFACE_MODE_1000BASEX) + pr_warn("%s: LEQ only valid for 10GR!\n", __func__); + + pr_info("start_1.3.2"); + + for(i = 0; i < 10; i++) { + sum10 += rtl9300_sds_rxcal_leq_read(sds_num); + mdelay(10); + } + + avg10 = (sum10 / 10) + (((sum10 % 10) >= 5) ? 1 : 0); + int10 = sum10 / 10; + + pr_info("sum10:%u, avg10:%u, int10:%u", sum10, avg10, int10); + + if (phy_mode == PHY_INTERFACE_MODE_10GBASER || phy_mode == PHY_INTERFACE_MODE_1000BASEX) { + if (dac_long_cable_offset) { + rtl9300_sds_rxcal_leq_offset_manual(sds_num, 1, dac_long_cable_offset); + rtl9300_sds_field_w(sds_num, 0x2e, 0x17, 7, 7, eq_hold_enabled); + if (phy_mode == PHY_INTERFACE_MODE_10GBASER) + rtl9300_sds_rxcal_leq_manual(sds_num, true, avg10); + } else { + if (sum10 >= 5) { + rtl9300_sds_rxcal_leq_offset_manual(sds_num, 1, 3); + rtl9300_sds_field_w(sds_num, 0x2e, 0x17, 7, 7, 0x1); + if (phy_mode == PHY_INTERFACE_MODE_10GBASER) + rtl9300_sds_rxcal_leq_manual(sds_num, true, avg10); + } else { + rtl9300_sds_rxcal_leq_offset_manual(sds_num, 1, 0); + rtl9300_sds_field_w(sds_num, 0x2e, 0x17, 7, 7, 0x1); + if (phy_mode == PHY_INTERFACE_MODE_10GBASER) + rtl9300_sds_rxcal_leq_manual(sds_num, true, avg10); + } + } + } + + pr_info("Sds:%u LEQ = %u",sds_num, rtl9300_sds_rxcal_leq_read(sds_num)); + + pr_info("end_1.3.2"); +} + +void rtl9300_do_rx_calibration_3(int sds_num, phy_interface_t phy_mode) +{ + rtl9300_sds_rxcal_3_1(sds_num, phy_mode); + + if (phy_mode == PHY_INTERFACE_MODE_10GBASER || phy_mode == PHY_INTERFACE_MODE_1000BASEX) + rtl9300_sds_rxcal_3_2(sds_num, phy_mode); +} + +void rtl9300_do_rx_calibration_4_1(int sds_num) +{ + u32 vth_list[2] = {0, 0}; + u32 tap0_list[4] = {0, 0, 0, 0}; + + pr_info("start_1.4.1"); + + // ##1.4.1 + rtl9300_sds_rxcal_vth_manual(sds_num, false, vth_list); + rtl9300_sds_rxcal_tap_manual(sds_num, 0, false, tap0_list); + mdelay(200); + + pr_info("end_1.4.1"); +} + +void rtl9300_do_rx_calibration_4_2(u32 sds_num) +{ + u32 vth_list[2]; + u32 tap_list[4]; + + pr_info("start_1.4.2"); + + rtl9300_sds_rxcal_vth_get(sds_num, vth_list); + rtl9300_sds_rxcal_vth_manual(sds_num, true, vth_list); + + mdelay(100); + + rtl9300_sds_rxcal_tap_get(sds_num, 0, tap_list); + rtl9300_sds_rxcal_tap_manual(sds_num, 0, true, tap_list); + + pr_info("end_1.4.2"); +} + +void rtl9300_do_rx_calibration_4(u32 sds_num) +{ + rtl9300_do_rx_calibration_4_1(sds_num); + rtl9300_do_rx_calibration_4_2(sds_num); +} + +void rtl9300_do_rx_calibration_5_2(u32 sds_num) +{ + u32 tap1_list[4] = {0}; + u32 tap2_list[4] = {0}; + u32 tap3_list[4] = {0}; + u32 tap4_list[4] = {0}; + + pr_info("start_1.5.2"); + + rtl9300_sds_rxcal_tap_manual(sds_num, 1, false, tap1_list); + rtl9300_sds_rxcal_tap_manual(sds_num, 2, false, tap2_list); + rtl9300_sds_rxcal_tap_manual(sds_num, 3, false, tap3_list); + rtl9300_sds_rxcal_tap_manual(sds_num, 4, false, tap4_list); + + mdelay(30); + + pr_info("end_1.5.2"); +} + +void rtl9300_do_rx_calibration_5(u32 sds_num, phy_interface_t phy_mode) +{ + if (phy_mode == PHY_INTERFACE_MODE_10GBASER) // dfeTap1_4Enable true + rtl9300_do_rx_calibration_5_2(sds_num); +} + + +void rtl9300_do_rx_calibration_dfe_disable(u32 sds_num) +{ + u32 tap1_list[4] = {0}; + u32 tap2_list[4] = {0}; + u32 tap3_list[4] = {0}; + u32 tap4_list[4] = {0}; + + rtl9300_sds_rxcal_tap_manual(sds_num, 1, true, tap1_list); + rtl9300_sds_rxcal_tap_manual(sds_num, 2, true, tap2_list); + rtl9300_sds_rxcal_tap_manual(sds_num, 3, true, tap3_list); + rtl9300_sds_rxcal_tap_manual(sds_num, 4, true, tap4_list); + + mdelay(10); +} + +void rtl9300_do_rx_calibration(int sds, phy_interface_t phy_mode) +{ + u32 latch_sts; + + rtl9300_do_rx_calibration_1(sds, phy_mode); + rtl9300_do_rx_calibration_2(sds); + rtl9300_do_rx_calibration_4(sds); + rtl9300_do_rx_calibration_5(sds, phy_mode); + mdelay(20); + + // Do this only for 10GR mode, SDS active in mode 0x1a + if (rtl9300_sds_field_r(sds, 0x1f, 9, 11, 7) == 0x1a) { + pr_info("%s: SDS enabled\n", __func__); + latch_sts = rtl9300_sds_field_r(sds, 0x4, 1, 2, 2); + mdelay(1); + latch_sts = rtl9300_sds_field_r(sds, 0x4, 1, 2, 2); + if (latch_sts) { + rtl9300_do_rx_calibration_dfe_disable(sds); + rtl9300_do_rx_calibration_4(sds); + rtl9300_do_rx_calibration_5(sds, phy_mode); + } + } +} + +int rtl9300_sds_sym_err_reset(int sds_num, phy_interface_t phy_mode) +{ + switch (phy_mode) { + case PHY_INTERFACE_MODE_XGMII: + break; + + case PHY_INTERFACE_MODE_10GBASER: + // Read twice to clear + rtl930x_read_sds_phy(sds_num, 5, 1); + rtl930x_read_sds_phy(sds_num, 5, 1); + break; + + case PHY_INTERFACE_MODE_1000BASEX: + rtl9300_sds_field_w(sds_num, 0x1, 24, 2, 0, 0); + rtl9300_sds_field_w(sds_num, 0x1, 3, 15, 8, 0); + rtl9300_sds_field_w(sds_num, 0x1, 2, 15, 0, 0); + break; + + default: + pr_info("%s unsupported phy mode\n", __func__); + return -1; + } + + return 0; +} + +u32 rtl9300_sds_sym_err_get(int sds_num, phy_interface_t phy_mode) +{ + u32 v = 0; + + switch (phy_mode) { + case PHY_INTERFACE_MODE_XGMII: + break; + + case PHY_INTERFACE_MODE_10GBASER: + v = rtl930x_read_sds_phy(sds_num, 5, 1); + return v & 0xff; + + default: + pr_info("%s unsupported PHY-mode\n", __func__); + } + + return v; +} + +int rtl9300_sds_check_calibration(int sds_num, phy_interface_t phy_mode) +{ + u32 errors1, errors2; + + rtl9300_sds_sym_err_reset(sds_num, phy_mode); + rtl9300_sds_sym_err_reset(sds_num, phy_mode); + + // Count errors during 1ms + errors1 = rtl9300_sds_sym_err_get(sds_num, phy_mode); + mdelay(1); + errors2 = rtl9300_sds_sym_err_get(sds_num, phy_mode); + + switch (phy_mode) { + case PHY_INTERFACE_MODE_XGMII: + + if ((errors2 - errors1 > 100) + || (errors1 >= 0xffff00) || (errors2 >= 0xffff00)) { + pr_info("%s XSGMII error rate too high\n", __func__); + return 1; + } + break; + case PHY_INTERFACE_MODE_10GBASER: + if (errors2 > 0) { + pr_info("%s 10GBASER error rate too high\n", __func__); + return 1; + } + break; + default: + return 1; + } + return 0; +} + +void rtl9300_phy_enable_10g_1g(int sds_num) +{ + u32 v; + + // Enable 1GBit PHY + v = rtl930x_read_sds_phy(sds_num, PHY_PAGE_2, PHY_CTRL_REG); + pr_info("%s 1gbit phy: %08x\n", __func__, v); + v &= ~BIT(PHY_POWER_BIT); + rtl930x_write_sds_phy(sds_num, PHY_PAGE_2, PHY_CTRL_REG, v); + pr_info("%s 1gbit phy enabled: %08x\n", __func__, v); + + // Enable 10GBit PHY + v = rtl930x_read_sds_phy(sds_num, PHY_PAGE_4, PHY_CTRL_REG); + pr_info("%s 10gbit phy: %08x\n", __func__, v); + v &= ~BIT(PHY_POWER_BIT); + rtl930x_write_sds_phy(sds_num, PHY_PAGE_4, PHY_CTRL_REG, v); + pr_info("%s 10gbit phy after: %08x\n", __func__, v); + + // dal_longan_construct_mac_default_10gmedia_fiber + v = rtl930x_read_sds_phy(sds_num, 0x1f, 11); + pr_info("%s set medium: %08x\n", __func__, v); + v |= BIT(1); + rtl930x_write_sds_phy(sds_num, 0x1f, 11, v); + pr_info("%s set medium after: %08x\n", __func__, v); +} + +#define RTL930X_MAC_FORCE_MODE_CTRL (0xCA1C) +// phy_mode = PHY_INTERFACE_MODE_10GBASER, sds_mode = 0x1a +int rtl9300_serdes_setup(int sds_num, phy_interface_t phy_mode) +{ + int sds_mode; + int calib_tries = 0; + + switch (phy_mode) { + case PHY_INTERFACE_MODE_HSGMII: + sds_mode = 0x12; + break; + case PHY_INTERFACE_MODE_1000BASEX: + sds_mode = 0x04; + break; + case PHY_INTERFACE_MODE_XGMII: + sds_mode = 0x10; + break; + case PHY_INTERFACE_MODE_10GBASER: + sds_mode = 0x1a; + break; + case PHY_INTERFACE_MODE_USXGMII: + sds_mode = 0x0d; + break; + default: + pr_err("%s: unknown serdes mode: %s\n", __func__, phy_modes(phy_mode)); + return -EINVAL; + } + + // Maybe use dal_longan_sds_init + + // dal_longan_construct_serdesConfig_init // Serdes Construct + rtl9300_phy_enable_10g_1g(sds_num); + + // Set Serdes Mode + rtl9300_sds_set(sds_num, 0x1a); // 0x1b: RTK_MII_10GR1000BX_AUTO + + // Do RX calibration + do { + rtl9300_do_rx_calibration(sds_num, phy_mode); + calib_tries++; + mdelay(50); + } while (rtl9300_sds_check_calibration(sds_num, phy_mode) && calib_tries < 3); + + + return 0; +} + +typedef struct { + u8 page; + u8 reg; + u16 data; +} sds_config; + +sds_config rtl9300_a_sds_10gr_lane0[] = +{ + /*1G*/ + {0x00, 0x0E, 0x3053}, {0x01, 0x14, 0x0100}, {0x21, 0x03, 0x8206}, + {0x21, 0x05, 0x40B0}, {0x21, 0x06, 0x0010}, {0x21, 0x07, 0xF09F}, + {0x21, 0x0C, 0x0007}, {0x21, 0x0D, 0x6009}, {0x21, 0x0E, 0x0000}, + {0x21, 0x0F, 0x0008}, {0x24, 0x00, 0x0668}, {0x24, 0x02, 0xD020}, + {0x24, 0x06, 0xC000}, {0x24, 0x0B, 0x1892}, {0x24, 0x0F, 0xFFDF}, + {0x24, 0x12, 0x03C4}, {0x24, 0x13, 0x027F}, {0x24, 0x14, 0x1311}, + {0x24, 0x16, 0x00C9}, {0x24, 0x17, 0xA100}, {0x24, 0x1A, 0x0001}, + {0x24, 0x1C, 0x0400}, {0x25, 0x01, 0x0300}, {0x25, 0x02, 0x1017}, + {0x25, 0x03, 0xFFDF}, {0x25, 0x05, 0x7F7C}, {0x25, 0x07, 0x8100}, + {0x25, 0x08, 0x0001}, {0x25, 0x09, 0xFFD4}, {0x25, 0x0A, 0x7C2F}, + {0x25, 0x0E, 0x003F}, {0x25, 0x0F, 0x0121}, {0x25, 0x10, 0x0020}, + {0x25, 0x11, 0x8840}, {0x2B, 0x13, 0x0050}, {0x2B, 0x18, 0x8E88}, + {0x2B, 0x19, 0x4902}, {0x2B, 0x1D, 0x2501}, {0x2D, 0x13, 0x0050}, + {0x2D, 0x18, 0x8E88}, {0x2D, 0x19, 0x4902}, {0x2D, 0x1D, 0x2641}, + {0x2F, 0x13, 0x0050}, {0x2F, 0x18, 0x8E88}, {0x2F, 0x19, 0x4902}, + {0x2F, 0x1D, 0x66E1}, + /*3.125G*/ + {0x28, 0x00, 0x0668}, {0x28, 0x02, 0xD020}, {0x28, 0x06, 0xC000}, + {0x28, 0x0B, 0x1892}, {0x28, 0x0F, 0xFFDF}, {0x28, 0x12, 0x01C4}, + {0x28, 0x13, 0x027F}, {0x28, 0x14, 0x1311}, {0x28, 0x16, 0x00C9}, + {0x28, 0x17, 0xA100}, {0x28, 0x1A, 0x0001}, {0x28, 0x1C, 0x0400}, + {0x29, 0x01, 0x0300}, {0x29, 0x02, 0x1017}, {0x29, 0x03, 0xFFDF}, + {0x29, 0x05, 0x7F7C}, {0x29, 0x07, 0x8100}, {0x29, 0x08, 0x0001}, + {0x29, 0x09, 0xFFD4}, {0x29, 0x0A, 0x7C2F}, {0x29, 0x0E, 0x003F}, + {0x29, 0x0F, 0x0121}, {0x29, 0x10, 0x0020}, {0x29, 0x11, 0x8840}, + /*10G*/ + {0x06, 0x0D, 0x0F00}, {0x06, 0x00, 0x0000}, {0x06, 0x01, 0xC800}, + {0x21, 0x03, 0x8206}, {0x21, 0x05, 0x40B0}, {0x21, 0x06, 0x0010}, + {0x21, 0x07, 0xF09F}, {0x21, 0x0C, 0x0007}, {0x21, 0x0D, 0x6009}, + {0x21, 0x0E, 0x0000}, {0x21, 0x0F, 0x0008}, {0x2E, 0x00, 0xA668}, + {0x2E, 0x02, 0xD020}, {0x2E, 0x06, 0xC000}, {0x2E, 0x0B, 0x1892}, + {0x2E, 0x0F, 0xFFDF}, {0x2E, 0x11, 0x8280}, {0x2E, 0x12, 0x0044}, + {0x2E, 0x13, 0x027F}, {0x2E, 0x14, 0x1311}, {0x2E, 0x17, 0xA100}, + {0x2E, 0x1A, 0x0001}, {0x2E, 0x1C, 0x0400}, {0x2F, 0x01, 0x0300}, + {0x2F, 0x02, 0x1217}, {0x2F, 0x03, 0xFFDF}, {0x2F, 0x05, 0x7F7C}, + {0x2F, 0x07, 0x80C4}, {0x2F, 0x08, 0x0001}, {0x2F, 0x09, 0xFFD4}, + {0x2F, 0x0A, 0x7C2F}, {0x2F, 0x0E, 0x003F}, {0x2F, 0x0F, 0x0121}, + {0x2F, 0x10, 0x0020}, {0x2F, 0x11, 0x8840}, {0x2F, 0x14, 0xE008}, + {0x2B, 0x13, 0x0050}, {0x2B, 0x18, 0x8E88}, {0x2B, 0x19, 0x4902}, + {0x2B, 0x1D, 0x2501}, {0x2D, 0x13, 0x0050}, {0x2D, 0x17, 0x4109}, + {0x2D, 0x18, 0x8E88}, {0x2D, 0x19, 0x4902}, {0x2D, 0x1C, 0x1109}, + {0x2D, 0x1D, 0x2641}, {0x2F, 0x13, 0x0050}, {0x2F, 0x18, 0x8E88}, + {0x2F, 0x19, 0x4902}, {0x2F, 0x1D, 0x76E1}, +}; + +sds_config rtl9300_a_sds_10gr_lane1[] = +{ + /*1G*/ + {0x00, 0x0E, 0x3053}, {0x01, 0x14, 0x0100}, {0x21, 0x03, 0x8206}, + {0x21, 0x06, 0x0010}, {0x21, 0x07, 0xF09F}, {0x21, 0x0A, 0x0003}, + {0x21, 0x0B, 0x0005}, {0x21, 0x0C, 0x0007}, {0x21, 0x0D, 0x6009}, + {0x21, 0x0E, 0x0000}, {0x21, 0x0F, 0x0008}, {0x24, 0x00, 0x0668}, + {0x24, 0x02, 0xD020}, {0x24, 0x06, 0xC000}, {0x24, 0x0B, 0x1892}, + {0x24, 0x0F, 0xFFDF}, {0x24, 0x12, 0x03C4}, {0x24, 0x13, 0x027F}, + {0x24, 0x14, 0x1311}, {0x24, 0x16, 0x00C9}, {0x24, 0x17, 0xA100}, + {0x24, 0x1A, 0x0001}, {0x24, 0x1C, 0x0400}, {0x25, 0x00, 0x820F}, + {0x25, 0x01, 0x0300}, {0x25, 0x02, 0x1017}, {0x25, 0x03, 0xFFDF}, + {0x25, 0x05, 0x7F7C}, {0x25, 0x07, 0x8100}, {0x25, 0x08, 0x0001}, + {0x25, 0x09, 0xFFD4}, {0x25, 0x0A, 0x7C2F}, {0x25, 0x0E, 0x003F}, + {0x25, 0x0F, 0x0121}, {0x25, 0x10, 0x0020}, {0x25, 0x11, 0x8840}, + {0x2B, 0x13, 0x3D87}, {0x2B, 0x14, 0x3108}, {0x2D, 0x13, 0x3C87}, + {0x2D, 0x14, 0x1808}, + /*3.125G*/ + {0x28, 0x00, 0x0668}, {0x28, 0x02, 0xD020}, {0x28, 0x06, 0xC000}, + {0x28, 0x0B, 0x1892}, {0x28, 0x0F, 0xFFDF}, {0x28, 0x12, 0x01C4}, + {0x28, 0x13, 0x027F}, {0x28, 0x14, 0x1311}, {0x28, 0x16, 0x00C9}, + {0x28, 0x17, 0xA100}, {0x28, 0x1A, 0x0001}, {0x28, 0x1C, 0x0400}, + {0x29, 0x00, 0x820F}, {0x29, 0x01, 0x0300}, {0x29, 0x02, 0x1017}, + {0x29, 0x03, 0xFFDF}, {0x29, 0x05, 0x7F7C}, {0x29, 0x07, 0x8100}, + {0x29, 0x08, 0x0001}, {0x29, 0x0A, 0x7C2F}, {0x29, 0x0E, 0x003F}, + {0x29, 0x0F, 0x0121}, {0x29, 0x10, 0x0020}, {0x29, 0x11, 0x8840}, + /*10G*/ + {0x06, 0x0D, 0x0F00}, {0x06, 0x00, 0x0000}, {0x06, 0x01, 0xC800}, + {0x21, 0x03, 0x8206}, {0x21, 0x05, 0x40B0}, {0x21, 0x06, 0x0010}, + {0x21, 0x07, 0xF09F}, {0x21, 0x0A, 0x0003}, {0x21, 0x0B, 0x0005}, + {0x21, 0x0C, 0x0007}, {0x21, 0x0D, 0x6009}, {0x21, 0x0E, 0x0000}, + {0x21, 0x0F, 0x0008}, {0x2E, 0x00, 0xA668}, {0x2E, 0x02, 0xD020}, + {0x2E, 0x06, 0xC000}, {0x2E, 0x0B, 0x1892}, {0x2E, 0x0F, 0xFFDF}, + {0x2E, 0x11, 0x8280}, {0x2E, 0x12, 0x0044}, {0x2E, 0x13, 0x027F}, + {0x2E, 0x14, 0x1311}, {0x2E, 0x17, 0xA100}, {0x2E, 0x1A, 0x0001}, + {0x2E, 0x1C, 0x0400}, {0x2F, 0x00, 0x820F}, {0x2F, 0x01, 0x0300}, + {0x2F, 0x02, 0x1217}, {0x2F, 0x03, 0xFFDF}, {0x2F, 0x05, 0x7F7C}, + {0x2F, 0x07, 0x80C4}, {0x2F, 0x08, 0x0001}, {0x2F, 0x09, 0xFFD4}, + {0x2F, 0x0A, 0x7C2F}, {0x2F, 0x0E, 0x003F}, {0x2F, 0x0F, 0x0121}, + {0x2F, 0x10, 0x0020}, {0x2F, 0x11, 0x8840}, {0x2B, 0x13, 0x3D87}, + {0x2B, 0x14, 0x3108}, {0x2D, 0x13, 0x3C87}, {0x2D, 0x14, 0x1808}, +}; + +int rtl9300_sds_cmu_band_get(int sds) +{ + u32 page; + u32 en; + u32 cmu_band; + +// page = rtl9300_sds_cmu_page_get(sds); + page = 0x25; // 10GR and 1000BX + sds = (sds % 2) ? (sds - 1) : (sds); + + rtl9300_sds_field_w(sds, page, 0x1c, 15, 15, 1); + rtl9300_sds_field_w(sds + 1, page, 0x1c, 15, 15, 1); + + en = rtl9300_sds_field_r(sds, page, 27, 1, 1); + if(!en) { // Auto mode + rtl930x_write_sds_phy(sds, 0x1f, 0x02, 31); + + cmu_band = rtl9300_sds_field_r(sds, 0x1f, 0x15, 5, 1); + } else { + cmu_band = rtl9300_sds_field_r(sds, page, 30, 4, 0); + } + + return cmu_band; +} + +int rtl9300_configure_serdes(struct phy_device *phydev) +{ + struct device *dev = &phydev->mdio.dev; + int phy_addr = phydev->mdio.addr; + struct device_node *dn; + u32 sds_num = 0; + int sds_mode, calib_tries = 0, phy_mode = PHY_INTERFACE_MODE_10GBASER, i; + + if (dev->of_node) { + dn = dev->of_node; + + if (of_property_read_u32(dn, "sds", &sds_num)) + sds_num = -1; + pr_info("%s: Port %d, SerDes is %d\n", __func__, phy_addr, sds_num); + } else { + dev_err(dev, "No DT node.\n"); + return -EINVAL; + } + + if (sds_num < 0) + return 0; + + if (phy_mode != PHY_INTERFACE_MODE_10GBASER) // TODO: for now we only patch 10GR SerDes + return 0; + + switch (phy_mode) { + case PHY_INTERFACE_MODE_HSGMII: + sds_mode = 0x12; + break; + case PHY_INTERFACE_MODE_1000BASEX: + sds_mode = 0x04; + break; + case PHY_INTERFACE_MODE_XGMII: + sds_mode = 0x10; + break; + case PHY_INTERFACE_MODE_10GBASER: + sds_mode = 0x1a; + break; + case PHY_INTERFACE_MODE_USXGMII: + sds_mode = 0x0d; + break; + default: + pr_err("%s: unknown serdes mode: %s\n", __func__, phy_modes(phy_mode)); + return -EINVAL; + } + + pr_info("%s CMU BAND is %d\n", __func__, rtl9300_sds_cmu_band_get(sds_num)); + + // Turn Off Serdes + rtl9300_sds_rst(sds_num, 0x1f); + + pr_info("%s PATCHING SerDes %d\n", __func__, sds_num); + if (sds_num % 2) { + for (i = 0; i < sizeof(rtl9300_a_sds_10gr_lane1) / sizeof(sds_config); ++i) { + rtl930x_write_sds_phy(sds_num, rtl9300_a_sds_10gr_lane1[i].page, + rtl9300_a_sds_10gr_lane1[i].reg, + rtl9300_a_sds_10gr_lane1[i].data); + } + } else { + for (i = 0; i < sizeof(rtl9300_a_sds_10gr_lane0) / sizeof(sds_config); ++i) { + rtl930x_write_sds_phy(sds_num, rtl9300_a_sds_10gr_lane0[i].page, + rtl9300_a_sds_10gr_lane0[i].reg, + rtl9300_a_sds_10gr_lane0[i].data); + } + } + + rtl9300_phy_enable_10g_1g(sds_num); + + // Disable MAC + sw_w32_mask(0, 1, RTL930X_MAC_FORCE_MODE_CTRL); + mdelay(20); + + // ----> dal_longan_sds_mode_set + pr_info("%s: Configuring RTL9300 SERDES %d, mode %02x\n", __func__, sds_num, sds_mode); + + // Configure link to MAC + rtl9300_serdes_mac_link_config(sds_num, true, true); // MAC Construct + + // Disable MAC + sw_w32_mask(0, 1, RTL930X_MAC_FORCE_MODE_CTRL); + mdelay(20); + + rtl9300_force_sds_mode(sds_num, PHY_INTERFACE_MODE_NA); + + // Re-Enable MAC + sw_w32_mask(1, 0, RTL930X_MAC_FORCE_MODE_CTRL); + + rtl9300_force_sds_mode(sds_num, phy_mode); + + // Do RX calibration + do { + rtl9300_do_rx_calibration(sds_num, phy_mode); + calib_tries++; + mdelay(50); + } while (rtl9300_sds_check_calibration(sds_num, phy_mode) && calib_tries < 3); + + if (calib_tries >= 3) + pr_err("%s CALIBTRATION FAILED\n", __func__); + + rtl9300_sds_tx_config(sds_num, phy_mode); + + // The clock needs only to be configured on the FPGA implementation + + return 0; +} + +void rtl9310_sds_field_w(int sds, u32 page, u32 reg, int end_bit, int start_bit, u32 v) +{ + int l = end_bit - start_bit + 1; + u32 data = v; + + if (l < 32) { + u32 mask = BIT(l) - 1; + + data = rtl930x_read_sds_phy(sds, page, reg); + data &= ~(mask << start_bit); + data |= (v & mask) << start_bit; + } + + rtl931x_write_sds_phy(sds, page, reg, data); +} + + +u32 rtl9310_sds_field_r(int sds, u32 page, u32 reg, int end_bit, int start_bit) +{ + int l = end_bit - start_bit + 1; + u32 v = rtl931x_read_sds_phy(sds, page, reg); + + if (l >= 32) + return v; + + return (v >> start_bit) & (BIT(l) - 1); +} + +static void rtl931x_sds_rst(u32 sds) +{ + u32 o, v, o_mode; + int shift = ((sds & 0x3) << 3); + + // TODO: We need to lock this! + + o = sw_r32(RTL931X_PS_SERDES_OFF_MODE_CTRL_ADDR); + v = o | BIT(sds); + sw_w32(v, RTL931X_PS_SERDES_OFF_MODE_CTRL_ADDR); + + o_mode = sw_r32(RTL931X_SERDES_MODE_CTRL + 4 * (sds >> 2)); + v = BIT(7) | 0x1F; + sw_w32_mask(0xff << shift, v << shift, RTL931X_SERDES_MODE_CTRL + 4 * (sds >> 2)); + sw_w32(o_mode, RTL931X_SERDES_MODE_CTRL + 4 * (sds >> 2)); + + sw_w32(o, RTL931X_PS_SERDES_OFF_MODE_CTRL_ADDR); +} + +static void rtl931x_symerr_clear(u32 sds, phy_interface_t mode) +{ + u32 i; + u32 xsg_sdsid_0, xsg_sdsid_1; + + switch (mode) { + case PHY_INTERFACE_MODE_NA: + break; + case PHY_INTERFACE_MODE_XGMII: + if (sds < 2) + xsg_sdsid_0 = sds; + else + xsg_sdsid_0 = (sds - 1) * 2; + xsg_sdsid_1 = xsg_sdsid_0 + 1; + + for (i = 0; i < 4; ++i) { + rtl9310_sds_field_w(xsg_sdsid_0, 0x1, 24, 2, 0, i); + rtl9310_sds_field_w(xsg_sdsid_0, 0x1, 3, 15, 8, 0x0); + rtl9310_sds_field_w(xsg_sdsid_0, 0x1, 2, 15, 0, 0x0); + } + + for (i = 0; i < 4; ++i) { + rtl9310_sds_field_w(xsg_sdsid_1, 0x1, 24, 2, 0, i); + rtl9310_sds_field_w(xsg_sdsid_1, 0x1, 3, 15, 8, 0x0); + rtl9310_sds_field_w(xsg_sdsid_1, 0x1, 2, 15, 0, 0x0); + } + + rtl9310_sds_field_w(xsg_sdsid_0, 0x1, 0, 15, 0, 0x0); + rtl9310_sds_field_w(xsg_sdsid_0, 0x1, 1, 15, 8, 0x0); + rtl9310_sds_field_w(xsg_sdsid_1, 0x1, 0, 15, 0, 0x0); + rtl9310_sds_field_w(xsg_sdsid_1, 0x1, 1, 15, 8, 0x0); + break; + default: + break; + } + + return; +} + +static u32 rtl931x_get_analog_sds(u32 sds) +{ + u32 sds_map[] = { 0, 1, 2, 3, 6, 7, 10, 11, 14, 15, 18, 19, 22, 23 }; + + if (sds < 14) + return sds_map[sds]; + return sds; +} + +void rtl931x_sds_fiber_disable(u32 sds) +{ + u32 v = 0x3F; + u32 asds = rtl931x_get_analog_sds(sds); + + rtl9310_sds_field_w(asds, 0x1F, 0x9, 11, 6, v); +} + +static void rtl931x_sds_fiber_mode_set(u32 sds, phy_interface_t mode) +{ + u32 val, asds = rtl931x_get_analog_sds(sds); + + /* clear symbol error count before changing mode */ + rtl931x_symerr_clear(sds, mode); + + val = 0x9F; + sw_w32(val, RTL931X_SERDES_MODE_CTRL + 4 * (sds >> 2)); + + switch (mode) { + case PHY_INTERFACE_MODE_SGMII: + val = 0x5; + break; + + case PHY_INTERFACE_MODE_1000BASEX: + /* serdes mode FIBER1G */ + val = 0x9; + break; + + case PHY_INTERFACE_MODE_10GBASER: + case PHY_INTERFACE_MODE_10GKR: + val = 0x35; + break; +/* case MII_10GR1000BX_AUTO: + val = 0x39; + break; */ + + + case PHY_INTERFACE_MODE_USXGMII: + val = 0x1B; + break; + default: + val = 0x25; + } + + pr_info("%s writing analog SerDes Mode value %02x\n", __func__, val); + rtl9310_sds_field_w(asds, 0x1F, 0x9, 11, 6, val); + + return; +} + +static int rtl931x_sds_cmu_page_get(phy_interface_t mode) +{ + switch (mode) { + case PHY_INTERFACE_MODE_SGMII: + case PHY_INTERFACE_MODE_1000BASEX: // MII_1000BX_FIBER / 100BX_FIBER / 1000BX100BX_AUTO + return 0x24; + case PHY_INTERFACE_MODE_HSGMII: + case PHY_INTERFACE_MODE_2500BASEX: // MII_2500Base_X: + return 0x28; +// case MII_HISGMII_5G: +// return 0x2a; + case PHY_INTERFACE_MODE_QSGMII: + return 0x2a; // Code also has 0x34 + case PHY_INTERFACE_MODE_XAUI: // MII_RXAUI_LITE: + return 0x2c; + case PHY_INTERFACE_MODE_XGMII: // MII_XSGMII + case PHY_INTERFACE_MODE_10GKR: + case PHY_INTERFACE_MODE_10GBASER: // MII_10GR + return 0x2e; + default: + return -1; + } + return -1; +} + +static void rtl931x_cmu_type_set(u32 asds, phy_interface_t mode, int chiptype) +{ + int cmu_type = 0; // Clock Management Unit + u32 cmu_page = 0; + u32 frc_cmu_spd; + u32 evenSds; + u32 lane, frc_lc_mode_bitnum, frc_lc_mode_val_bitnum; + + switch (mode) { + case PHY_INTERFACE_MODE_NA: + case PHY_INTERFACE_MODE_10GKR: + case PHY_INTERFACE_MODE_XGMII: + case PHY_INTERFACE_MODE_10GBASER: + case PHY_INTERFACE_MODE_USXGMII: + return; + +/* case MII_10GR1000BX_AUTO: + if (chiptype) + rtl9310_sds_field_w(asds, 0x24, 0xd, 14, 14, 0); + return; */ + + case PHY_INTERFACE_MODE_QSGMII: + cmu_type = 1; + frc_cmu_spd = 0; + break; + + case PHY_INTERFACE_MODE_HSGMII: + cmu_type = 1; + frc_cmu_spd = 1; + break; + + case PHY_INTERFACE_MODE_1000BASEX: + cmu_type = 1; + frc_cmu_spd = 0; + break; + +/* case MII_1000BX100BX_AUTO: + cmu_type = 1; + frc_cmu_spd = 0; + break; */ + + case PHY_INTERFACE_MODE_SGMII: + cmu_type = 1; + frc_cmu_spd = 0; + break; + + case PHY_INTERFACE_MODE_2500BASEX: + cmu_type = 1; + frc_cmu_spd = 1; + break; + + default: + pr_info("SerDes %d mode is invalid\n", asds); + return; + } + + if (cmu_type == 1) + cmu_page = rtl931x_sds_cmu_page_get(mode); + + lane = asds % 2; + + if (!lane) { + frc_lc_mode_bitnum = 4; + frc_lc_mode_val_bitnum = 5; + } else { + frc_lc_mode_bitnum = 6; + frc_lc_mode_val_bitnum = 7; + } + + evenSds = asds - lane; + + pr_info("%s: cmu_type %0d cmu_page %x frc_cmu_spd %d lane %d asds %d\n", + __func__, cmu_type, cmu_page, frc_cmu_spd, lane, asds); + + if (cmu_type == 1) { + pr_info("%s A CMU page 0x28 0x7 %08x\n", __func__, rtl931x_read_sds_phy(asds, 0x28, 0x7)); + rtl9310_sds_field_w(asds, cmu_page, 0x7, 15, 15, 0); + pr_info("%s B CMU page 0x28 0x7 %08x\n", __func__, rtl931x_read_sds_phy(asds, 0x28, 0x7)); + if (chiptype) { + rtl9310_sds_field_w(asds, cmu_page, 0xd, 14, 14, 0); + } + + rtl9310_sds_field_w(evenSds, 0x20, 0x12, 3, 2, 0x3); + rtl9310_sds_field_w(evenSds, 0x20, 0x12, frc_lc_mode_bitnum, frc_lc_mode_bitnum, 1); + rtl9310_sds_field_w(evenSds, 0x20, 0x12, frc_lc_mode_val_bitnum, frc_lc_mode_val_bitnum, 0); + rtl9310_sds_field_w(evenSds, 0x20, 0x12, 12, 12, 1); + rtl9310_sds_field_w(evenSds, 0x20, 0x12, 15, 13, frc_cmu_spd); + } + + pr_info("%s CMU page 0x28 0x7 %08x\n", __func__, rtl931x_read_sds_phy(asds, 0x28, 0x7)); + return; +} + +static void rtl931x_sds_rx_rst(u32 sds) +{ + u32 asds = rtl931x_get_analog_sds(sds); + + if (sds < 2) + return; + + rtl931x_write_sds_phy(asds, 0x2e, 0x12, 0x2740); + rtl931x_write_sds_phy(asds, 0x2f, 0x0, 0x0); + rtl931x_write_sds_phy(asds, 0x2f, 0x2, 0x2010); + rtl931x_write_sds_phy(asds, 0x20, 0x0, 0xc10); + + rtl931x_write_sds_phy(asds, 0x2e, 0x12, 0x27c0); + rtl931x_write_sds_phy(asds, 0x2f, 0x0, 0xc000); + rtl931x_write_sds_phy(asds, 0x2f, 0x2, 0x6010); + rtl931x_write_sds_phy(asds, 0x20, 0x0, 0xc30); + + mdelay(50); +} + +static void rtl931x_sds_disable(u32 sds) +{ + u32 v = 0x1f; + + v |= BIT(7); + sw_w32(v, RTL931X_SERDES_MODE_CTRL + (sds >> 2) * 4); +} + +static void rtl931x_sds_mii_mode_set(u32 sds, phy_interface_t mode) +{ + u32 val; + + switch (mode) { + case PHY_INTERFACE_MODE_QSGMII: + val = 0x6; + break; + case PHY_INTERFACE_MODE_XGMII: + val = 0x10; // serdes mode XSGMII + break; + case PHY_INTERFACE_MODE_USXGMII: + case PHY_INTERFACE_MODE_2500BASEX: + val = 0xD; + break; + case PHY_INTERFACE_MODE_HSGMII: + val = 0x12; + break; + case PHY_INTERFACE_MODE_SGMII: + val = 0x2; + break; + default: + return; + } + + val |= (1 << 7); + + sw_w32(val, RTL931X_SERDES_MODE_CTRL + 4 * (sds >> 2)); +} + +static sds_config sds_config_10p3125g_type1[] = { + { 0x2E, 0x00, 0x0107 }, { 0x2E, 0x01, 0x01A3 }, { 0x2E, 0x02, 0x6A24 }, + { 0x2E, 0x03, 0xD10D }, { 0x2E, 0x04, 0x8000 }, { 0x2E, 0x05, 0xA17E }, + { 0x2E, 0x06, 0xE31D }, { 0x2E, 0x07, 0x800E }, { 0x2E, 0x08, 0x0294 }, + { 0x2E, 0x09, 0x0CE4 }, { 0x2E, 0x0A, 0x7FC8 }, { 0x2E, 0x0B, 0xE0E7 }, + { 0x2E, 0x0C, 0x0200 }, { 0x2E, 0x0D, 0xDF80 }, { 0x2E, 0x0E, 0x0000 }, + { 0x2E, 0x0F, 0x1FC2 }, { 0x2E, 0x10, 0x0C3F }, { 0x2E, 0x11, 0x0000 }, + { 0x2E, 0x12, 0x27C0 }, { 0x2E, 0x13, 0x7E1D }, { 0x2E, 0x14, 0x1300 }, + { 0x2E, 0x15, 0x003F }, { 0x2E, 0x16, 0xBE7F }, { 0x2E, 0x17, 0x0090 }, + { 0x2E, 0x18, 0x0000 }, { 0x2E, 0x19, 0x4000 }, { 0x2E, 0x1A, 0x0000 }, + { 0x2E, 0x1B, 0x8000 }, { 0x2E, 0x1C, 0x011F }, { 0x2E, 0x1D, 0x0000 }, + { 0x2E, 0x1E, 0xC8FF }, { 0x2E, 0x1F, 0x0000 }, { 0x2F, 0x00, 0xC000 }, + { 0x2F, 0x01, 0xF000 }, { 0x2F, 0x02, 0x6010 }, { 0x2F, 0x12, 0x0EE7 }, + { 0x2F, 0x13, 0x0000 } +}; + +static sds_config sds_config_10p3125g_cmu_type1[] = { + { 0x2F, 0x03, 0x4210 }, { 0x2F, 0x04, 0x0000 }, { 0x2F, 0x05, 0x0019 }, + { 0x2F, 0x06, 0x18A6 }, { 0x2F, 0x07, 0x2990 }, { 0x2F, 0x08, 0xFFF4 }, + { 0x2F, 0x09, 0x1F08 }, { 0x2F, 0x0A, 0x0000 }, { 0x2F, 0x0B, 0x8000 }, + { 0x2F, 0x0C, 0x4224 }, { 0x2F, 0x0D, 0x0000 }, { 0x2F, 0x0E, 0x0000 }, + { 0x2F, 0x0F, 0xA470 }, { 0x2F, 0x10, 0x8000 }, { 0x2F, 0x11, 0x037B } +}; + +void rtl931x_sds_init(u32 sds, phy_interface_t mode) +{ + + u32 board_sds_tx_type1[] = { 0x1C3, 0x1C3, 0x1C3, 0x1A3, 0x1A3, + 0x1A3, 0x143, 0x143, 0x143, 0x143, 0x163, 0x163 + }; + + u32 board_sds_tx[] = { 0x1A00, 0x1A00, 0x200, 0x200, 0x200, + 0x200, 0x1A3, 0x1A3, 0x1A3, 0x1A3, 0x1E3, 0x1E3 + }; + + u32 board_sds_tx2[] = { 0xDC0, 0x1C0, 0x200, 0x180, 0x160, + 0x123, 0x123, 0x163, 0x1A3, 0x1A0, 0x1C3, 0x9C3 + }; + + u32 asds, dSds, ori, model_info, val; + int chiptype = 0; + + asds = rtl931x_get_analog_sds(sds); + + if (sds > 13) + return; + + pr_info("%s: set sds %d to mode %d\n", __func__, sds, mode); + val = rtl9310_sds_field_r(asds, 0x1F, 0x9, 11, 6); + + pr_info("%s: fibermode %08X stored mode 0x%x analog SDS %d", __func__, + rtl931x_read_sds_phy(asds, 0x1f, 0x9), val, asds); + pr_info("%s: SGMII mode %08X in 0x24 0x9 analog SDS %d", __func__, + rtl931x_read_sds_phy(asds, 0x24, 0x9), asds); + pr_info("%s: CMU mode %08X stored even SDS %d", __func__, + rtl931x_read_sds_phy(asds & ~1, 0x20, 0x12), asds & ~1); + pr_info("%s: serdes_mode_ctrl %08X", __func__, RTL931X_SERDES_MODE_CTRL + 4 * (sds >> 2)); + pr_info("%s CMU page 0x24 0x7 %08x\n", __func__, rtl931x_read_sds_phy(asds, 0x24, 0x7)); + pr_info("%s CMU page 0x26 0x7 %08x\n", __func__, rtl931x_read_sds_phy(asds, 0x26, 0x7)); + pr_info("%s CMU page 0x28 0x7 %08x\n", __func__, rtl931x_read_sds_phy(asds, 0x28, 0x7)); + pr_info("%s XSG page 0x0 0xe %08x\n", __func__, rtl931x_read_sds_phy(dSds, 0x0, 0xe)); + pr_info("%s XSG2 page 0x0 0xe %08x\n", __func__, rtl931x_read_sds_phy(dSds + 1, 0x0, 0xe)); + + model_info = sw_r32(RTL93XX_MODEL_NAME_INFO); + if ((model_info >> 4) & 0x1) { + pr_info("detected chiptype 1\n"); + chiptype = 1; + } else { + pr_info("detected chiptype 0\n"); + } + + if (sds < 2) + dSds = sds; + else + dSds = (sds - 1) * 2; + + pr_info("%s: 2.5gbit %08X dsds %d", __func__, + rtl931x_read_sds_phy(dSds, 0x1, 0x14), dSds); + + pr_info("%s: RTL931X_PS_SERDES_OFF_MODE_CTRL_ADDR 0x%08X\n", __func__, sw_r32(RTL931X_PS_SERDES_OFF_MODE_CTRL_ADDR)); + ori = sw_r32(RTL931X_PS_SERDES_OFF_MODE_CTRL_ADDR); + val = ori | (1 << sds); + sw_w32(val, RTL931X_PS_SERDES_OFF_MODE_CTRL_ADDR); + + switch (mode) { + case PHY_INTERFACE_MODE_NA: + break; + + case PHY_INTERFACE_MODE_XGMII: // MII_XSGMII + + if (chiptype) { + u32 xsg_sdsid_1; + xsg_sdsid_1 = dSds + 1; + //fifo inv clk + rtl9310_sds_field_w(dSds, 0x1, 0x1, 7, 4, 0xf); + rtl9310_sds_field_w(dSds, 0x1, 0x1, 3, 0, 0xf); + + rtl9310_sds_field_w(xsg_sdsid_1, 0x1, 0x1, 7, 4, 0xf); + rtl9310_sds_field_w(xsg_sdsid_1, 0x1, 0x1, 3, 0, 0xf); + + } + + rtl9310_sds_field_w(dSds, 0x0, 0xE, 12, 12, 1); + rtl9310_sds_field_w(dSds + 1, 0x0, 0xE, 12, 12, 1); + break; + + case PHY_INTERFACE_MODE_USXGMII: // MII_USXGMII_10GSXGMII/10GDXGMII/10GQXGMII: + u32 i, evenSds; + u32 op_code = 0x6003; + + if (chiptype) { + rtl9310_sds_field_w(asds, 0x6, 0x2, 12, 12, 1); + + for (i = 0; i < sizeof(sds_config_10p3125g_type1) / sizeof(sds_config); ++i) { + rtl931x_write_sds_phy(asds, sds_config_10p3125g_type1[i].page - 0x4, sds_config_10p3125g_type1[i].reg, sds_config_10p3125g_type1[i].data); + } + + evenSds = asds - (asds % 2); + + for (i = 0; i < sizeof(sds_config_10p3125g_cmu_type1) / sizeof(sds_config); ++i) { + rtl931x_write_sds_phy(evenSds, + sds_config_10p3125g_cmu_type1[i].page - 0x4, sds_config_10p3125g_cmu_type1[i].reg, sds_config_10p3125g_cmu_type1[i].data); + } + + rtl9310_sds_field_w(asds, 0x6, 0x2, 12, 12, 0); + } else { + + rtl9310_sds_field_w(asds, 0x2e, 0xd, 6, 0, 0x0); + rtl9310_sds_field_w(asds, 0x2e, 0xd, 7, 7, 0x1); + + rtl9310_sds_field_w(asds, 0x2e, 0x1c, 5, 0, 0x1E); + rtl9310_sds_field_w(asds, 0x2e, 0x1d, 11, 0, 0x00); + rtl9310_sds_field_w(asds, 0x2e, 0x1f, 11, 0, 0x00); + rtl9310_sds_field_w(asds, 0x2f, 0x0, 11, 0, 0x00); + rtl9310_sds_field_w(asds, 0x2f, 0x1, 11, 0, 0x00); + + rtl9310_sds_field_w(asds, 0x2e, 0xf, 12, 6, 0x7F); + rtl931x_write_sds_phy(asds, 0x2f, 0x12, 0xaaa); + + rtl931x_sds_rx_rst(sds); + + rtl931x_write_sds_phy(asds, 0x7, 0x10, op_code); + rtl931x_write_sds_phy(asds, 0x6, 0x1d, 0x0480); + rtl931x_write_sds_phy(asds, 0x6, 0xe, 0x0400); + } + break; + + case PHY_INTERFACE_MODE_10GBASER: // MII_10GR / MII_10GR1000BX_AUTO: + // configure 10GR fiber mode=1 + rtl9310_sds_field_w(asds, 0x1f, 0xb, 1, 1, 1); + + // init fiber_1g + rtl9310_sds_field_w(dSds, 0x3, 0x13, 15, 14, 0); + + rtl9310_sds_field_w(dSds, 0x2, 0x0, 12, 12, 1); + rtl9310_sds_field_w(dSds, 0x2, 0x0, 6, 6, 1); + rtl9310_sds_field_w(dSds, 0x2, 0x0, 13, 13, 0); + + // init auto + rtl9310_sds_field_w(asds, 0x1f, 13, 15, 0, 0x109e); + rtl9310_sds_field_w(asds, 0x1f, 0x6, 14, 10, 0x8); + rtl9310_sds_field_w(asds, 0x1f, 0x7, 10, 4, 0x7f); + break; + + case PHY_INTERFACE_MODE_HSGMII: + rtl9310_sds_field_w(dSds, 0x1, 0x14, 8, 8, 1); + break; + + case PHY_INTERFACE_MODE_1000BASEX: // MII_1000BX_FIBER + rtl9310_sds_field_w(dSds, 0x3, 0x13, 15, 14, 0); + + rtl9310_sds_field_w(dSds, 0x2, 0x0, 12, 12, 1); + rtl9310_sds_field_w(dSds, 0x2, 0x0, 6, 6, 1); + rtl9310_sds_field_w(dSds, 0x2, 0x0, 13, 13, 0); + break; + + case PHY_INTERFACE_MODE_SGMII: + rtl9310_sds_field_w(asds, 0x24, 0x9, 15, 15, 0); + break; + + case PHY_INTERFACE_MODE_2500BASEX: + rtl9310_sds_field_w(dSds, 0x1, 0x14, 8, 8, 1); + break; + + case PHY_INTERFACE_MODE_QSGMII: + default: + pr_info("%s: PHY mode %s not supported by SerDes %d\n", + __func__, phy_modes(mode), sds); + return; + } + + rtl931x_cmu_type_set(asds, mode, chiptype); + + if (sds >= 2 && sds <= 13) { + if (chiptype) + rtl931x_write_sds_phy(asds, 0x2E, 0x1, board_sds_tx_type1[sds - 2]); + else { + val = 0xa0000; + sw_w32(val, RTL931X_CHIP_INFO_ADDR); + val = sw_r32(RTL931X_CHIP_INFO_ADDR); + if (val & BIT(28)) // consider 9311 etc. RTL9313_CHIP_ID == HWP_CHIP_ID(unit)) + { + rtl931x_write_sds_phy(asds, 0x2E, 0x1, board_sds_tx2[sds - 2]); + } else { + rtl931x_write_sds_phy(asds, 0x2E, 0x1, board_sds_tx[sds - 2]); + } + val = 0; + sw_w32(val, RTL931X_CHIP_INFO_ADDR); + } + } + + val = ori & ~BIT(sds); + sw_w32(val, RTL931X_PS_SERDES_OFF_MODE_CTRL_ADDR); + pr_debug("%s: RTL931X_PS_SERDES_OFF_MODE_CTRL_ADDR 0x%08X\n", __func__, sw_r32(RTL931X_PS_SERDES_OFF_MODE_CTRL_ADDR)); + + if (mode == PHY_INTERFACE_MODE_XGMII || mode == PHY_INTERFACE_MODE_QSGMII + || mode == PHY_INTERFACE_MODE_HSGMII || mode == PHY_INTERFACE_MODE_SGMII + || mode == PHY_INTERFACE_MODE_USXGMII) { + if (mode == PHY_INTERFACE_MODE_XGMII) + rtl931x_sds_mii_mode_set(sds, mode); + else + rtl931x_sds_fiber_mode_set(sds, mode); + } +} + +int rtl931x_sds_cmu_band_set(int sds, bool enable, u32 band, phy_interface_t mode) +{ + u32 asds; + int page = rtl931x_sds_cmu_page_get(mode); + + sds -= (sds % 2); + sds = sds & ~1; + asds = rtl931x_get_analog_sds(sds); + page += 1; + + if (enable) { + rtl9310_sds_field_w(asds, page, 0x7, 13, 13, 0); + rtl9310_sds_field_w(asds, page, 0x7, 11, 11, 0); + } else { + rtl9310_sds_field_w(asds, page, 0x7, 13, 13, 0); + rtl9310_sds_field_w(asds, page, 0x7, 11, 11, 0); + } + + rtl9310_sds_field_w(asds, page, 0x7, 4, 0, band); + + rtl931x_sds_rst(sds); + + return 0; +} + +int rtl931x_sds_cmu_band_get(int sds, phy_interface_t mode) +{ + int page = rtl931x_sds_cmu_page_get(mode); + u32 asds, band; + + sds -= (sds % 2); + asds = rtl931x_get_analog_sds(sds); + page += 1; + rtl931x_write_sds_phy(asds, 0x1f, 0x02, 73); + + rtl9310_sds_field_w(asds, page, 0x5, 15, 15, 1); + band = rtl9310_sds_field_r(asds, 0x1f, 0x15, 8, 3); + pr_info("%s band is: %d\n", __func__, band); + + return band; +} + + +int rtl931x_link_sts_get(u32 sds) +{ + u32 sts, sts1, latch_sts, latch_sts1; + if (0){ + u32 xsg_sdsid_0, xsg_sdsid_1; + + xsg_sdsid_0 = sds < 2 ? sds : (sds - 1) * 2; + xsg_sdsid_1 = xsg_sdsid_0 + 1; + + sts = rtl9310_sds_field_r(xsg_sdsid_0, 0x1, 29, 8, 0); + sts1 = rtl9310_sds_field_r(xsg_sdsid_1, 0x1, 29, 8, 0); + latch_sts = rtl9310_sds_field_r(xsg_sdsid_0, 0x1, 30, 8, 0); + latch_sts1 = rtl9310_sds_field_r(xsg_sdsid_1, 0x1, 30, 8, 0); + } else { + u32 asds, dsds; + + asds = rtl931x_get_analog_sds(sds); + sts = rtl9310_sds_field_r(asds, 0x5, 0, 12, 12); + latch_sts = rtl9310_sds_field_r(asds, 0x4, 1, 2, 2); + + dsds = sds < 2 ? sds : (sds - 1) * 2; + latch_sts1 = rtl9310_sds_field_r(dsds, 0x2, 1, 2, 2); + sts1 = rtl9310_sds_field_r(dsds, 0x2, 1, 2, 2); + } + + pr_info("%s: serdes %d sts %d, sts1 %d, latch_sts %d, latch_sts1 %d\n", __func__, + sds, sts, sts1, latch_sts, latch_sts1); + return sts1; +} + +static int rtl8214fc_sfp_insert(void *upstream, const struct sfp_eeprom_id *id) +{ + struct phy_device *phydev = upstream; + + rtl8214fc_media_set(phydev, true); + + return 0; +} + +static void rtl8214fc_sfp_remove(void *upstream) +{ + struct phy_device *phydev = upstream; + + rtl8214fc_media_set(phydev, false); +} + +static const struct sfp_upstream_ops rtl8214fc_sfp_ops = { + .attach = phy_sfp_attach, + .detach = phy_sfp_detach, + .module_insert = rtl8214fc_sfp_insert, + .module_remove = rtl8214fc_sfp_remove, +}; + +static int rtl8214fc_phy_probe(struct phy_device *phydev) +{ + struct device *dev = &phydev->mdio.dev; + int addr = phydev->mdio.addr; + int ret = 0; + + /* 839x has internal SerDes */ + if (soc_info.id == 0x8393) + return -ENODEV; + + /* All base addresses of the PHYs start at multiples of 8 */ + devm_phy_package_join(dev, phydev, addr & (~7), + sizeof(struct rtl83xx_shared_private)); + + if (!(addr % 8)) { + struct rtl83xx_shared_private *shared = phydev->shared->priv; + shared->name = "RTL8214FC"; + /* Configuration must be done while patching still possible */ + ret = rtl8380_configure_rtl8214fc(phydev); + if (ret) + return ret; + } + + return phy_sfp_probe(phydev, &rtl8214fc_sfp_ops); +} + +static int rtl8214c_phy_probe(struct phy_device *phydev) +{ + struct device *dev = &phydev->mdio.dev; + int addr = phydev->mdio.addr; + + /* All base addresses of the PHYs start at multiples of 8 */ + devm_phy_package_join(dev, phydev, addr & (~7), + sizeof(struct rtl83xx_shared_private)); + + if (!(addr % 8)) { + struct rtl83xx_shared_private *shared = phydev->shared->priv; + shared->name = "RTL8214C"; + /* Configuration must be done whil patching still possible */ + return rtl8380_configure_rtl8214c(phydev); + } + return 0; +} + +static int rtl8218b_ext_phy_probe(struct phy_device *phydev) +{ + struct device *dev = &phydev->mdio.dev; + int addr = phydev->mdio.addr; + + /* All base addresses of the PHYs start at multiples of 8 */ + devm_phy_package_join(dev, phydev, addr & (~7), + sizeof(struct rtl83xx_shared_private)); + + if (!(addr % 8)) { + struct rtl83xx_shared_private *shared = phydev->shared->priv; + shared->name = "RTL8218B (external)"; + if (soc_info.family == RTL8380_FAMILY_ID) { + /* Configuration must be done while patching still possible */ + return rtl8380_configure_ext_rtl8218b(phydev); + } + } + + return 0; +} + +static int rtl8218b_int_phy_probe(struct phy_device *phydev) +{ + struct device *dev = &phydev->mdio.dev; + int addr = phydev->mdio.addr; + + if (soc_info.family != RTL8380_FAMILY_ID) + return -ENODEV; + if (addr >= 24) + return -ENODEV; + + pr_debug("%s: id: %d\n", __func__, addr); + /* All base addresses of the PHYs start at multiples of 8 */ + devm_phy_package_join(dev, phydev, addr & (~7), + sizeof(struct rtl83xx_shared_private)); + + if (!(addr % 8)) { + struct rtl83xx_shared_private *shared = phydev->shared->priv; + shared->name = "RTL8218B (internal)"; + /* Configuration must be done while patching still possible */ + return rtl8380_configure_int_rtl8218b(phydev); + } + + return 0; +} + +static int rtl8218d_phy_probe(struct phy_device *phydev) +{ + struct device *dev = &phydev->mdio.dev; + int addr = phydev->mdio.addr; + + pr_debug("%s: id: %d\n", __func__, addr); + /* All base addresses of the PHYs start at multiples of 8 */ + devm_phy_package_join(dev, phydev, addr & (~7), + sizeof(struct rtl83xx_shared_private)); + + /* All base addresses of the PHYs start at multiples of 8 */ + if (!(addr % 8)) { + struct rtl83xx_shared_private *shared = phydev->shared->priv; + shared->name = "RTL8218D"; + /* Configuration must be done while patching still possible */ +// TODO: return configure_rtl8218d(phydev); + } + return 0; +} + +static int rtl838x_serdes_probe(struct phy_device *phydev) +{ + int addr = phydev->mdio.addr; + + if (soc_info.family != RTL8380_FAMILY_ID) + return -ENODEV; + if (addr < 24) + return -ENODEV; + + /* On the RTL8380M, PHYs 24-27 connect to the internal SerDes */ + if (soc_info.id == 0x8380) { + if (addr == 24) + return rtl8380_configure_serdes(phydev); + return 0; + } + return -ENODEV; +} + +static int rtl8393_serdes_probe(struct phy_device *phydev) +{ + int addr = phydev->mdio.addr; + + pr_info("%s: id: %d\n", __func__, addr); + if (soc_info.family != RTL8390_FAMILY_ID) + return -ENODEV; + + if (addr < 24) + return -ENODEV; + + return rtl8390_configure_serdes(phydev); +} + +static int rtl8390_serdes_probe(struct phy_device *phydev) +{ + int addr = phydev->mdio.addr; + + if (soc_info.family != RTL8390_FAMILY_ID) + return -ENODEV; + + if (addr < 24) + return -ENODEV; + + return rtl8390_configure_generic(phydev); +} + +static int rtl9300_serdes_probe(struct phy_device *phydev) +{ + if (soc_info.family != RTL9300_FAMILY_ID) + return -ENODEV; + + phydev_info(phydev, "Detected internal RTL9300 Serdes\n"); + + return rtl9300_configure_serdes(phydev); +} + +static struct phy_driver rtl83xx_phy_driver[] = { + { + PHY_ID_MATCH_MODEL(PHY_ID_RTL8214C), + .name = "Realtek RTL8214C", + .features = PHY_GBIT_FEATURES, + .flags = PHY_HAS_REALTEK_PAGES, + .match_phy_device = rtl8214c_match_phy_device, + .probe = rtl8214c_phy_probe, + .suspend = genphy_suspend, + .resume = genphy_resume, + .set_loopback = genphy_loopback, + }, + { + PHY_ID_MATCH_MODEL(PHY_ID_RTL8214FC), + .name = "Realtek RTL8214FC", + .features = PHY_GBIT_FIBRE_FEATURES, + .flags = PHY_HAS_REALTEK_PAGES, + .match_phy_device = rtl8214fc_match_phy_device, + .probe = rtl8214fc_phy_probe, + .suspend = rtl8214fc_suspend, + .resume = rtl8214fc_resume, + .set_loopback = genphy_loopback, + .set_port = rtl8214fc_set_port, + .get_port = rtl8214fc_get_port, + .set_eee = rtl8214fc_set_eee, + .get_eee = rtl8214fc_get_eee, + }, + { + PHY_ID_MATCH_MODEL(PHY_ID_RTL8218B_E), + .name = "Realtek RTL8218B (external)", + .features = PHY_GBIT_FEATURES, + .flags = PHY_HAS_REALTEK_PAGES, + .match_phy_device = rtl8218b_ext_match_phy_device, + .probe = rtl8218b_ext_phy_probe, + .suspend = genphy_suspend, + .resume = genphy_resume, + .set_loopback = genphy_loopback, + .set_eee = rtl8218b_set_eee, + .get_eee = rtl8218b_get_eee, + }, + { + PHY_ID_MATCH_MODEL(PHY_ID_RTL8218D), + .name = "REALTEK RTL8218D", + .features = PHY_GBIT_FEATURES, + .flags = PHY_HAS_REALTEK_PAGES, + .probe = rtl8218d_phy_probe, + .suspend = genphy_suspend, + .resume = genphy_resume, + .set_loopback = genphy_loopback, + .set_eee = rtl8218d_set_eee, + .get_eee = rtl8218d_get_eee, + }, + { + PHY_ID_MATCH_MODEL(PHY_ID_RTL8221B), + .name = "REALTEK RTL8221B", + .features = PHY_GBIT_FEATURES, + .flags = PHY_HAS_REALTEK_PAGES, + .suspend = genphy_suspend, + .resume = genphy_resume, + .set_loopback = genphy_loopback, + .read_page = rtl8226_read_page, + .write_page = rtl8226_write_page, + .read_status = rtl8226_read_status, + .config_aneg = rtl8226_config_aneg, + .set_eee = rtl8226_set_eee, + .get_eee = rtl8226_get_eee, + }, + { + PHY_ID_MATCH_MODEL(PHY_ID_RTL8226), + .name = "REALTEK RTL8226", + .features = PHY_GBIT_FEATURES, + .flags = PHY_HAS_REALTEK_PAGES, + .suspend = genphy_suspend, + .resume = genphy_resume, + .set_loopback = genphy_loopback, + .read_page = rtl8226_read_page, + .write_page = rtl8226_write_page, + .read_status = rtl8226_read_status, + .config_aneg = rtl8226_config_aneg, + .set_eee = rtl8226_set_eee, + .get_eee = rtl8226_get_eee, + }, + { + PHY_ID_MATCH_MODEL(PHY_ID_RTL8218B_I), + .name = "Realtek RTL8218B (internal)", + .features = PHY_GBIT_FEATURES, + .flags = PHY_HAS_REALTEK_PAGES, + .probe = rtl8218b_int_phy_probe, + .suspend = genphy_suspend, + .resume = genphy_resume, + .set_loopback = genphy_loopback, + .set_eee = rtl8218b_set_eee, + .get_eee = rtl8218b_get_eee, + }, + { + PHY_ID_MATCH_MODEL(PHY_ID_RTL8218B_I), + .name = "Realtek RTL8380 SERDES", + .features = PHY_GBIT_FIBRE_FEATURES, + .flags = PHY_HAS_REALTEK_PAGES, + .probe = rtl838x_serdes_probe, + .suspend = genphy_suspend, + .resume = genphy_resume, + .set_loopback = genphy_loopback, + .read_status = rtl8380_read_status, + }, + { + PHY_ID_MATCH_MODEL(PHY_ID_RTL8393_I), + .name = "Realtek RTL8393 SERDES", + .features = PHY_GBIT_FIBRE_FEATURES, + .flags = PHY_HAS_REALTEK_PAGES, + .probe = rtl8393_serdes_probe, + .suspend = genphy_suspend, + .resume = genphy_resume, + .set_loopback = genphy_loopback, + .read_status = rtl8393_read_status, + }, + { + PHY_ID_MATCH_MODEL(PHY_ID_RTL8390_GENERIC), + .name = "Realtek RTL8390 Generic", + .features = PHY_GBIT_FIBRE_FEATURES, + .flags = PHY_HAS_REALTEK_PAGES, + .probe = rtl8390_serdes_probe, + .suspend = genphy_suspend, + .resume = genphy_resume, + .set_loopback = genphy_loopback, + }, + { + PHY_ID_MATCH_MODEL(PHY_ID_RTL9300_I), + .name = "REALTEK RTL9300 SERDES", + .features = PHY_GBIT_FIBRE_FEATURES, + .flags = PHY_HAS_REALTEK_PAGES, + .probe = rtl9300_serdes_probe, + .suspend = genphy_suspend, + .resume = genphy_resume, + .set_loopback = genphy_loopback, + .read_status = rtl9300_read_status, + }, +}; + +module_phy_driver(rtl83xx_phy_driver); + +static struct mdio_device_id __maybe_unused rtl83xx_tbl[] = { + { PHY_ID_MATCH_MODEL(PHY_ID_RTL8214FC) }, + { } +}; + +MODULE_DEVICE_TABLE(mdio, rtl83xx_tbl); + +MODULE_AUTHOR("B. Koblitz"); +MODULE_DESCRIPTION("RTL83xx PHY driver"); +MODULE_LICENSE("GPL"); diff --git a/target/linux/realtek/files-5.15/drivers/net/phy/rtl83xx-phy.h b/target/linux/realtek/files-5.15/drivers/net/phy/rtl83xx-phy.h new file mode 100644 index 0000000000..553d9a1575 --- /dev/null +++ b/target/linux/realtek/files-5.15/drivers/net/phy/rtl83xx-phy.h @@ -0,0 +1,68 @@ +// SPDX-License-Identifier: GPL-2.0-only + +struct rtl83xx_shared_private { + char *name; +}; + +struct __attribute__ ((__packed__)) part { + uint16_t start; + uint8_t wordsize; + uint8_t words; +}; + +struct __attribute__ ((__packed__)) fw_header { + uint32_t magic; + uint32_t phy; + uint32_t checksum; + uint32_t version; + struct part parts[10]; +}; + +// TODO: fixed path? +#define FIRMWARE_838X_8380_1 "rtl838x_phy/rtl838x_8380.fw" +#define FIRMWARE_838X_8214FC_1 "rtl838x_phy/rtl838x_8214fc.fw" +#define FIRMWARE_838X_8218b_1 "rtl838x_phy/rtl838x_8218b.fw" + +/* External RTL8218B and RTL8214FC IDs are identical */ +#define PHY_ID_RTL8214C 0x001cc942 +#define PHY_ID_RTL8214FC 0x001cc981 +#define PHY_ID_RTL8218B_E 0x001cc981 +#define PHY_ID_RTL8218D 0x001cc983 +#define PHY_ID_RTL8218B_I 0x001cca40 +#define PHY_ID_RTL8221B 0x001cc849 +#define PHY_ID_RTL8226 0x001cc838 +#define PHY_ID_RTL8390_GENERIC 0x001ccab0 +#define PHY_ID_RTL8393_I 0x001c8393 +#define PHY_ID_RTL9300_I 0x70d03106 + +// PHY MMD devices +#define MMD_AN 7 +#define MMD_VEND2 31 + +/* Registers of the internal Serdes of the 8380 */ +#define RTL838X_SDS_MODE_SEL (0x0028) +#define RTL838X_SDS_CFG_REG (0x0034) +#define RTL838X_INT_MODE_CTRL (0x005c) +#define RTL838X_DMY_REG31 (0x3b28) + +#define RTL8380_SDS4_FIB_REG0 (0xF800) +#define RTL838X_SDS4_REG28 (0xef80) +#define RTL838X_SDS4_DUMMY0 (0xef8c) +#define RTL838X_SDS5_EXT_REG6 (0xf18c) +#define RTL838X_SDS4_FIB_REG0 (RTL838X_SDS4_REG28 + 0x880) +#define RTL838X_SDS5_FIB_REG0 (RTL838X_SDS4_REG28 + 0x980) + +/* Registers of the internal SerDes of the RTL8390 */ +#define RTL839X_SDS12_13_XSG0 (0xB800) + +/* Registers of the internal Serdes of the 9300 */ +#define RTL930X_SDS_INDACS_CMD (0x03B0) +#define RTL930X_SDS_INDACS_DATA (0x03B4) +#define RTL930X_MAC_FORCE_MODE_CTRL (0xCA1C) + +/*Registers of the internal SerDes of the 9310 */ +#define RTL931X_SERDES_INDRT_ACCESS_CTRL (0x5638) +#define RTL931X_SERDES_INDRT_DATA_CTRL (0x563C) +#define RTL931X_SERDES_MODE_CTRL (0x13cc) +#define RTL931X_PS_SERDES_OFF_MODE_CTRL_ADDR (0x13F4) +#define RTL931X_MAC_SERDES_MODE_CTRL(sds) (0x136C + (((sds) << 2))) -- cgit v1.2.3