aboutsummaryrefslogtreecommitdiffstats
path: root/target/linux/layerscape/patches-4.9/705-dpaa2-support-layerscape.patch
diff options
context:
space:
mode:
Diffstat (limited to 'target/linux/layerscape/patches-4.9/705-dpaa2-support-layerscape.patch')
-rw-r--r--target/linux/layerscape/patches-4.9/705-dpaa2-support-layerscape.patch10255
1 files changed, 4858 insertions, 5397 deletions
diff --git a/target/linux/layerscape/patches-4.9/705-dpaa2-support-layerscape.patch b/target/linux/layerscape/patches-4.9/705-dpaa2-support-layerscape.patch
index 5363f0a356..23e4ff9202 100644
--- a/target/linux/layerscape/patches-4.9/705-dpaa2-support-layerscape.patch
+++ b/target/linux/layerscape/patches-4.9/705-dpaa2-support-layerscape.patch
@@ -1,6 +1,6 @@
-From e729e648e4259940473e256dd4f9c8df99e774b0 Mon Sep 17 00:00:00 2001
+From 77cc39e936f87463f92f7fddaaf0de51eec3972f Mon Sep 17 00:00:00 2001
From: Yangbo Lu <yangbo.lu@nxp.com>
-Date: Wed, 17 Jan 2018 15:12:58 +0800
+Date: Fri, 6 Jul 2018 15:30:21 +0800
Subject: [PATCH] dpaa2: support layerscape
This is an integrated patch for layerscape dpaa2 support.
@@ -13,51 +13,61 @@ Signed-off-by: Catalin Horghidan <catalin.horghidan@nxp.com>
Signed-off-by: Mathew McBride <matt@traverse.com.au>
Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
---
- drivers/soc/fsl/ls2-console/Kconfig | 4 +
- drivers/soc/fsl/ls2-console/Makefile | 1 +
- drivers/soc/fsl/ls2-console/ls2-console.c | 284 ++
- drivers/staging/fsl-dpaa2/ethernet/Makefile | 11 +
- drivers/staging/fsl-dpaa2/ethernet/README | 186 ++
- .../staging/fsl-dpaa2/ethernet/dpaa2-eth-debugfs.c | 352 ++
- .../staging/fsl-dpaa2/ethernet/dpaa2-eth-debugfs.h | 60 +
- .../staging/fsl-dpaa2/ethernet/dpaa2-eth-trace.h | 184 +
- drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.c | 3516 ++++++++++++++++++++
- drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.h | 499 +++
- drivers/staging/fsl-dpaa2/ethernet/dpaa2-ethtool.c | 864 +++++
- drivers/staging/fsl-dpaa2/ethernet/dpkg.h | 176 +
- drivers/staging/fsl-dpaa2/ethernet/dpni-cmd.h | 658 ++++
- drivers/staging/fsl-dpaa2/ethernet/dpni.c | 1903 +++++++++++
- drivers/staging/fsl-dpaa2/ethernet/dpni.h | 1053 ++++++
- drivers/staging/fsl-dpaa2/ethernet/net.h | 480 +++
- drivers/staging/fsl-dpaa2/ethsw/Kconfig | 6 +
- drivers/staging/fsl-dpaa2/ethsw/Makefile | 10 +
- drivers/staging/fsl-dpaa2/ethsw/dpsw-cmd.h | 851 +++++
- drivers/staging/fsl-dpaa2/ethsw/dpsw.c | 2762 +++++++++++++++
- drivers/staging/fsl-dpaa2/ethsw/dpsw.h | 1269 +++++++
- drivers/staging/fsl-dpaa2/ethsw/switch.c | 1857 +++++++++++
- drivers/staging/fsl-dpaa2/evb/Kconfig | 7 +
- drivers/staging/fsl-dpaa2/evb/Makefile | 10 +
- drivers/staging/fsl-dpaa2/evb/dpdmux-cmd.h | 279 ++
- drivers/staging/fsl-dpaa2/evb/dpdmux.c | 1112 +++++++
- drivers/staging/fsl-dpaa2/evb/dpdmux.h | 453 +++
- drivers/staging/fsl-dpaa2/evb/evb.c | 1350 ++++++++
- drivers/staging/fsl-dpaa2/mac/Kconfig | 23 +
- drivers/staging/fsl-dpaa2/mac/Makefile | 10 +
- drivers/staging/fsl-dpaa2/mac/dpmac-cmd.h | 172 +
- drivers/staging/fsl-dpaa2/mac/dpmac.c | 620 ++++
- drivers/staging/fsl-dpaa2/mac/dpmac.h | 342 ++
- drivers/staging/fsl-dpaa2/mac/mac.c | 670 ++++
- drivers/staging/fsl-dpaa2/rtc/Makefile | 10 +
- drivers/staging/fsl-dpaa2/rtc/dprtc-cmd.h | 160 +
- drivers/staging/fsl-dpaa2/rtc/dprtc.c | 746 +++++
- drivers/staging/fsl-dpaa2/rtc/dprtc.h | 172 +
- drivers/staging/fsl-dpaa2/rtc/rtc.c | 243 ++
- 39 files changed, 23365 insertions(+)
+ drivers/soc/fsl/ls2-console/Kconfig | 4 +
+ drivers/soc/fsl/ls2-console/Makefile | 1 +
+ drivers/soc/fsl/ls2-console/ls2-console.c | 284 ++
+ drivers/staging/fsl-dpaa2/ethernet/Makefile | 12 +
+ drivers/staging/fsl-dpaa2/ethernet/README | 186 +
+ drivers/staging/fsl-dpaa2/ethernet/TODO | 18 +
+ .../fsl-dpaa2/ethernet/dpaa2-eth-ceetm.c | 1253 ++++++
+ .../fsl-dpaa2/ethernet/dpaa2-eth-ceetm.h | 182 +
+ .../fsl-dpaa2/ethernet/dpaa2-eth-debugfs.c | 357 ++
+ .../fsl-dpaa2/ethernet/dpaa2-eth-debugfs.h | 60 +
+ .../fsl-dpaa2/ethernet/dpaa2-eth-trace.h | 185 +
+ .../staging/fsl-dpaa2/ethernet/dpaa2-eth.c | 3734 +++++++++++++++++
+ .../staging/fsl-dpaa2/ethernet/dpaa2-eth.h | 601 +++
+ .../fsl-dpaa2/ethernet/dpaa2-ethtool.c | 878 ++++
+ drivers/staging/fsl-dpaa2/ethernet/dpkg.h | 176 +
+ drivers/staging/fsl-dpaa2/ethernet/dpni-cmd.h | 719 ++++
+ drivers/staging/fsl-dpaa2/ethernet/dpni.c | 2112 ++++++++++
+ drivers/staging/fsl-dpaa2/ethernet/dpni.h | 1172 ++++++
+ drivers/staging/fsl-dpaa2/ethernet/net.h | 480 +++
+ drivers/staging/fsl-dpaa2/ethsw/Makefile | 10 +
+ drivers/staging/fsl-dpaa2/ethsw/README | 106 +
+ drivers/staging/fsl-dpaa2/ethsw/TODO | 14 +
+ drivers/staging/fsl-dpaa2/ethsw/dpsw-cmd.h | 359 ++
+ drivers/staging/fsl-dpaa2/ethsw/dpsw.c | 1165 +++++
+ drivers/staging/fsl-dpaa2/ethsw/dpsw.h | 592 +++
+ .../staging/fsl-dpaa2/ethsw/ethsw-ethtool.c | 206 +
+ drivers/staging/fsl-dpaa2/ethsw/ethsw.c | 1438 +++++++
+ drivers/staging/fsl-dpaa2/ethsw/ethsw.h | 90 +
+ drivers/staging/fsl-dpaa2/evb/Kconfig | 7 +
+ drivers/staging/fsl-dpaa2/evb/Makefile | 10 +
+ drivers/staging/fsl-dpaa2/evb/dpdmux-cmd.h | 279 ++
+ drivers/staging/fsl-dpaa2/evb/dpdmux.c | 1111 +++++
+ drivers/staging/fsl-dpaa2/evb/dpdmux.h | 453 ++
+ drivers/staging/fsl-dpaa2/evb/evb.c | 1354 ++++++
+ drivers/staging/fsl-dpaa2/mac/Kconfig | 23 +
+ drivers/staging/fsl-dpaa2/mac/Makefile | 10 +
+ drivers/staging/fsl-dpaa2/mac/dpmac-cmd.h | 172 +
+ drivers/staging/fsl-dpaa2/mac/dpmac.c | 619 +++
+ drivers/staging/fsl-dpaa2/mac/dpmac.h | 342 ++
+ drivers/staging/fsl-dpaa2/mac/mac.c | 673 +++
+ drivers/staging/fsl-dpaa2/rtc/Makefile | 10 +
+ drivers/staging/fsl-dpaa2/rtc/dprtc-cmd.h | 160 +
+ drivers/staging/fsl-dpaa2/rtc/dprtc.c | 746 ++++
+ drivers/staging/fsl-dpaa2/rtc/dprtc.h | 172 +
+ drivers/staging/fsl-dpaa2/rtc/rtc.c | 242 ++
+ include/linux/filter.h | 3 +
+ 46 files changed, 22780 insertions(+)
create mode 100644 drivers/soc/fsl/ls2-console/Kconfig
create mode 100644 drivers/soc/fsl/ls2-console/Makefile
create mode 100644 drivers/soc/fsl/ls2-console/ls2-console.c
create mode 100644 drivers/staging/fsl-dpaa2/ethernet/Makefile
create mode 100644 drivers/staging/fsl-dpaa2/ethernet/README
+ create mode 100644 drivers/staging/fsl-dpaa2/ethernet/TODO
+ create mode 100644 drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-ceetm.c
+ create mode 100644 drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-ceetm.h
create mode 100644 drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-debugfs.c
create mode 100644 drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-debugfs.h
create mode 100644 drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-trace.h
@@ -69,12 +79,15 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
create mode 100644 drivers/staging/fsl-dpaa2/ethernet/dpni.c
create mode 100644 drivers/staging/fsl-dpaa2/ethernet/dpni.h
create mode 100644 drivers/staging/fsl-dpaa2/ethernet/net.h
- create mode 100644 drivers/staging/fsl-dpaa2/ethsw/Kconfig
create mode 100644 drivers/staging/fsl-dpaa2/ethsw/Makefile
+ create mode 100644 drivers/staging/fsl-dpaa2/ethsw/README
+ create mode 100644 drivers/staging/fsl-dpaa2/ethsw/TODO
create mode 100644 drivers/staging/fsl-dpaa2/ethsw/dpsw-cmd.h
create mode 100644 drivers/staging/fsl-dpaa2/ethsw/dpsw.c
create mode 100644 drivers/staging/fsl-dpaa2/ethsw/dpsw.h
- create mode 100644 drivers/staging/fsl-dpaa2/ethsw/switch.c
+ create mode 100644 drivers/staging/fsl-dpaa2/ethsw/ethsw-ethtool.c
+ create mode 100644 drivers/staging/fsl-dpaa2/ethsw/ethsw.c
+ create mode 100644 drivers/staging/fsl-dpaa2/ethsw/ethsw.h
create mode 100644 drivers/staging/fsl-dpaa2/evb/Kconfig
create mode 100644 drivers/staging/fsl-dpaa2/evb/Makefile
create mode 100644 drivers/staging/fsl-dpaa2/evb/dpdmux-cmd.h
@@ -393,7 +406,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+MODULE_DESCRIPTION("Freescale LS2 console driver");
--- /dev/null
+++ b/drivers/staging/fsl-dpaa2/ethernet/Makefile
-@@ -0,0 +1,11 @@
+@@ -0,0 +1,12 @@
+#
+# Makefile for the Freescale DPAA2 Ethernet controller
+#
@@ -402,6 +415,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+
+fsl-dpaa2-eth-objs := dpaa2-eth.o dpaa2-ethtool.o dpni.o
+fsl-dpaa2-eth-${CONFIG_FSL_DPAA2_ETH_DEBUGFS} += dpaa2-eth-debugfs.o
++fsl-dpaa2-eth-${CONFIG_FSL_DPAA2_ETH_CEETM} += dpaa2-eth-ceetm.o
+
+# Needed by the tracing framework
+CFLAGS_dpaa2-eth.o := -I$(src)
@@ -595,8 +609,1470 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+Hardware specific statistics for the network interface as well as some
+non-standard driver stats can be consulted through ethtool -S option.
--- /dev/null
++++ b/drivers/staging/fsl-dpaa2/ethernet/TODO
+@@ -0,0 +1,18 @@
++* Add a DPAA2 MAC kernel driver in order to allow PHY management; currently
++ the DPMAC objects and their link to DPNIs are handled by MC internally
++ and all PHYs are seen as fixed-link
++* add more debug support: decide how to expose detailed debug statistics,
++ add ingress error queue support
++* MC firmware uprev; the DPAA2 objects used by the Ethernet driver need to
++ be kept in sync with binary interface changes in MC
++* refine README file
++* cleanup
++
++NOTE: None of the above is must-have before getting the DPAA2 Ethernet driver
++out of staging. The main requirement for that is to have the drivers it
++depends on, fsl-mc bus and DPIO driver, moved to drivers/bus and drivers/soc
++respectively.
++
++ Please send any patches to Greg Kroah-Hartman <gregkh@linuxfoundation.org>,
++ ruxandra.radulescu@nxp.com, devel@driverdev.osuosl.org,
++ linux-kernel@vger.kernel.org
+--- /dev/null
++++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-ceetm.c
+@@ -0,0 +1,1253 @@
++// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
++/*
++ * Copyright 2017 NXP
++ *
++ */
++
++#include <linux/init.h>
++#include <linux/module.h>
++
++#include "dpaa2-eth-ceetm.h"
++#include "dpaa2-eth.h"
++
++#define DPAA2_CEETM_DESCRIPTION "FSL DPAA2 CEETM qdisc"
++/* Conversion formula from userspace passed Bps to expected Mbit */
++#define dpaa2_eth_bps_to_mbit(rate) (rate >> 17)
++
++static const struct nla_policy dpaa2_ceetm_policy[DPAA2_CEETM_TCA_MAX] = {
++ [DPAA2_CEETM_TCA_COPT] = { .len = sizeof(struct dpaa2_ceetm_tc_copt) },
++ [DPAA2_CEETM_TCA_QOPS] = { .len = sizeof(struct dpaa2_ceetm_tc_qopt) },
++};
++
++struct Qdisc_ops dpaa2_ceetm_qdisc_ops;
++
++static inline int dpaa2_eth_set_ch_shaping(struct dpaa2_eth_priv *priv,
++ struct dpni_tx_shaping_cfg *scfg,
++ struct dpni_tx_shaping_cfg *ecfg,
++ int coupled, int ch_id)
++{
++ int err = 0;
++
++ netdev_dbg(priv->net_dev, "%s: ch_id %d rate %d mbps\n", __func__,
++ ch_id, scfg->rate_limit);
++ err = dpni_set_tx_shaping(priv->mc_io, 0, priv->mc_token, scfg,
++ ecfg, coupled);
++ if (err)
++ netdev_err(priv->net_dev, "dpni_set_tx_shaping err\n");
++
++ return err;
++}
++
++static inline int dpaa2_eth_reset_ch_shaping(struct dpaa2_eth_priv *priv,
++ int ch_id)
++{
++ struct dpni_tx_shaping_cfg cfg = { 0 };
++
++ return dpaa2_eth_set_ch_shaping(priv, &cfg, &cfg, 0, ch_id);
++}
++
++static inline int
++dpaa2_eth_update_shaping_cfg(struct net_device *dev,
++ struct dpaa2_ceetm_shaping_cfg cfg,
++ struct dpni_tx_shaping_cfg *scfg,
++ struct dpni_tx_shaping_cfg *ecfg)
++{
++ scfg->rate_limit = dpaa2_eth_bps_to_mbit(cfg.cir);
++ ecfg->rate_limit = dpaa2_eth_bps_to_mbit(cfg.eir);
++
++ if (cfg.cbs > DPAA2_ETH_MAX_BURST_SIZE) {
++ netdev_err(dev, "Committed burst size must be under %d\n",
++ DPAA2_ETH_MAX_BURST_SIZE);
++ return -EINVAL;
++ }
++
++ scfg->max_burst_size = cfg.cbs;
++
++ if (cfg.ebs > DPAA2_ETH_MAX_BURST_SIZE) {
++ netdev_err(dev, "Excess burst size must be under %d\n",
++ DPAA2_ETH_MAX_BURST_SIZE);
++ return -EINVAL;
++ }
++
++ ecfg->max_burst_size = cfg.ebs;
++
++ if ((!cfg.cir || !cfg.eir) && cfg.coupled) {
++ netdev_err(dev, "Coupling can be set when both CIR and EIR are finite\n");
++ return -EINVAL;
++ }
++
++ return 0;
++}
++
++enum update_tx_prio {
++ DPAA2_ETH_ADD_CQ,
++ DPAA2_ETH_DEL_CQ,
++};
++
++/* Normalize weights based on max passed value */
++static inline int dpaa2_eth_normalize_tx_prio(struct dpaa2_ceetm_qdisc *priv)
++{
++ struct dpni_tx_schedule_cfg *sched_cfg;
++ struct dpaa2_ceetm_class *cl;
++ u32 qpri;
++ u16 weight_max = 0, increment;
++ int i;
++
++ /* Check the boundaries of the provided values */
++ for (i = 0; i < priv->clhash.hashsize; i++)
++ hlist_for_each_entry(cl, &priv->clhash.hash[i], common.hnode)
++ weight_max = (weight_max == 0 ? cl->prio.weight :
++ (weight_max < cl->prio.weight ?
++ cl->prio.weight : weight_max));
++
++ /* If there are no elements, there's nothing to do */
++ if (weight_max == 0)
++ return 0;
++
++ increment = (DPAA2_CEETM_MAX_WEIGHT - DPAA2_CEETM_MIN_WEIGHT) /
++ weight_max;
++
++ for (i = 0; i < priv->clhash.hashsize; i++) {
++ hlist_for_each_entry(cl, &priv->clhash.hash[i], common.hnode) {
++ if (cl->prio.mode == STRICT_PRIORITY)
++ continue;
++
++ qpri = cl->prio.qpri;
++ sched_cfg = &priv->prio.tx_prio_cfg.tc_sched[qpri];
++
++ sched_cfg->delta_bandwidth =
++ DPAA2_CEETM_MIN_WEIGHT +
++ (cl->prio.weight * increment);
++
++ pr_debug("%s: Normalized CQ qpri %d weight to %d\n",
++ __func__, qpri, sched_cfg->delta_bandwidth);
++ }
++ }
++
++ return 0;
++}
++
++static inline int dpaa2_eth_update_tx_prio(struct dpaa2_eth_priv *priv,
++ struct dpaa2_ceetm_class *cl,
++ enum update_tx_prio type)
++{
++ struct dpaa2_ceetm_qdisc *sch = qdisc_priv(cl->parent);
++ struct dpni_congestion_notification_cfg notif_cfg = {0};
++ struct dpni_tx_schedule_cfg *sched_cfg;
++ struct dpni_taildrop td = {0};
++ u8 ch_id = 0, tc_id = 0;
++ u32 qpri = 0;
++ int err = 0;
++
++ qpri = cl->prio.qpri;
++ tc_id = DPNI_BUILD_CH_TC(ch_id, qpri);
++
++ switch (type) {
++ case DPAA2_ETH_ADD_CQ:
++ /* Disable congestion notifications */
++ notif_cfg.threshold_entry = 0;
++ notif_cfg.threshold_exit = 0;
++ err = dpni_set_congestion_notification(priv->mc_io, 0,
++ priv->mc_token,
++ DPNI_QUEUE_TX, tc_id,
++ &notif_cfg);
++ if (err) {
++ netdev_err(priv->net_dev, "Error disabling congestion notifications %d\n",
++ err);
++ return err;
++ }
++ /* Enable taildrop */
++ td.enable = 1;
++ td.units = DPNI_CONGESTION_UNIT_FRAMES;
++ td.threshold = DPAA2_CEETM_TD_THRESHOLD;
++ err = dpni_set_taildrop(priv->mc_io, 0, priv->mc_token,
++ DPNI_CP_GROUP, DPNI_QUEUE_TX, tc_id,
++ 0, &td);
++ if (err) {
++ netdev_err(priv->net_dev, "Error enabling Tx taildrop %d\n",
++ err);
++ return err;
++ }
++ break;
++ case DPAA2_ETH_DEL_CQ:
++ /* Disable taildrop */
++ td.enable = 0;
++ err = dpni_set_taildrop(priv->mc_io, 0, priv->mc_token,
++ DPNI_CP_GROUP, DPNI_QUEUE_TX, tc_id,
++ 0, &td);
++ if (err) {
++ netdev_err(priv->net_dev, "Error disabling Tx taildrop %d\n",
++ err);
++ return err;
++ }
++ /* Enable congestion notifications */
++ notif_cfg.units = DPNI_CONGESTION_UNIT_BYTES;
++ notif_cfg.threshold_entry = DPAA2_ETH_TX_CONG_ENTRY_THRESH;
++ notif_cfg.threshold_exit = DPAA2_ETH_TX_CONG_EXIT_THRESH;
++ notif_cfg.message_ctx = (u64)priv;
++ notif_cfg.message_iova = priv->cscn_dma;
++ notif_cfg.notification_mode = DPNI_CONG_OPT_WRITE_MEM_ON_ENTER |
++ DPNI_CONG_OPT_WRITE_MEM_ON_EXIT |
++ DPNI_CONG_OPT_COHERENT_WRITE;
++ err = dpni_set_congestion_notification(priv->mc_io, 0,
++ priv->mc_token,
++ DPNI_QUEUE_TX, tc_id,
++ &notif_cfg);
++ if (err) {
++ netdev_err(priv->net_dev, "Error enabling congestion notifications %d\n",
++ err);
++ return err;
++ }
++ break;
++ }
++
++ /* We can zero out the structure in the tx_prio_conf array */
++ if (type == DPAA2_ETH_DEL_CQ) {
++ sched_cfg = &sch->prio.tx_prio_cfg.tc_sched[qpri];
++ memset(sched_cfg, 0, sizeof(*sched_cfg));
++ }
++
++ /* Normalize priorities */
++ err = dpaa2_eth_normalize_tx_prio(sch);
++
++ /* Debug print goes here */
++ print_hex_dump_debug("tx_prio: ", DUMP_PREFIX_OFFSET, 16, 1,
++ &sch->prio.tx_prio_cfg,
++ sizeof(sch->prio.tx_prio_cfg), 0);
++
++ /* Call dpni_set_tx_priorities for the entire prio qdisc */
++ err = dpni_set_tx_priorities(priv->mc_io, 0, priv->mc_token,
++ &sch->prio.tx_prio_cfg);
++ if (err)
++ netdev_err(priv->net_dev, "dpni_set_tx_priorities err %d\n",
++ err);
++
++ return err;
++}
++
++static void dpaa2_eth_ceetm_enable(struct dpaa2_eth_priv *priv)
++{
++ priv->ceetm_en = true;
++}
++
++static void dpaa2_eth_ceetm_disable(struct dpaa2_eth_priv *priv)
++{
++ priv->ceetm_en = false;
++}
++
++/* Find class in qdisc hash table using given handle */
++static inline struct dpaa2_ceetm_class *dpaa2_ceetm_find(u32 handle,
++ struct Qdisc *sch)
++{
++ struct dpaa2_ceetm_qdisc *priv = qdisc_priv(sch);
++ struct Qdisc_class_common *clc;
++
++ pr_debug(KBUILD_BASENAME " : %s : find class %X in qdisc %X\n",
++ __func__, handle, sch->handle);
++
++ clc = qdisc_class_find(&priv->clhash, handle);
++ return clc ? container_of(clc, struct dpaa2_ceetm_class, common) : NULL;
++}
++
++/* Insert a class in the qdisc's class hash */
++static void dpaa2_ceetm_link_class(struct Qdisc *sch,
++ struct Qdisc_class_hash *clhash,
++ struct Qdisc_class_common *common)
++{
++ sch_tree_lock(sch);
++ qdisc_class_hash_insert(clhash, common);
++ sch_tree_unlock(sch);
++ qdisc_class_hash_grow(sch, clhash);
++}
++
++/* Destroy a ceetm class */
++static void dpaa2_ceetm_cls_destroy(struct Qdisc *sch,
++ struct dpaa2_ceetm_class *cl)
++{
++ struct net_device *dev = qdisc_dev(sch);
++ struct dpaa2_eth_priv *priv = netdev_priv(dev);
++
++ if (!cl)
++ return;
++
++ pr_debug(KBUILD_BASENAME " : %s : destroy class %X from under %X\n",
++ __func__, cl->common.classid, sch->handle);
++
++ /* Recurse into child first */
++ if (cl->child) {
++ qdisc_destroy(cl->child);
++ cl->child = NULL;
++ }
++
++ switch (cl->type) {
++ case CEETM_ROOT:
++ if (dpaa2_eth_reset_ch_shaping(priv, cl->root.ch_id))
++ netdev_err(dev, "Error resetting channel shaping\n");
++
++ break;
++
++ case CEETM_PRIO:
++ if (dpaa2_eth_update_tx_prio(priv, cl, DPAA2_ETH_DEL_CQ))
++ netdev_err(dev, "Error resetting tx_priorities\n");
++
++ if (cl->prio.cstats)
++ free_percpu(cl->prio.cstats);
++
++ break;
++ }
++
++ tcf_destroy_chain(&cl->filter_list);
++ kfree(cl);
++}
++
++/* Destroy a ceetm qdisc */
++static void dpaa2_ceetm_destroy(struct Qdisc *sch)
++{
++ unsigned int i;
++ struct hlist_node *next;
++ struct dpaa2_ceetm_class *cl;
++ struct dpaa2_ceetm_qdisc *priv = qdisc_priv(sch);
++ struct net_device *dev = qdisc_dev(sch);
++ struct dpaa2_eth_priv *priv_eth = netdev_priv(dev);
++
++ pr_debug(KBUILD_BASENAME " : %s : destroy qdisc %X\n",
++ __func__, sch->handle);
++
++ /* All filters need to be removed before destroying the classes */
++ tcf_destroy_chain(&priv->filter_list);
++
++ for (i = 0; i < priv->clhash.hashsize; i++) {
++ hlist_for_each_entry(cl, &priv->clhash.hash[i], common.hnode)
++ tcf_destroy_chain(&cl->filter_list);
++ }
++
++ for (i = 0; i < priv->clhash.hashsize; i++) {
++ hlist_for_each_entry_safe(cl, next, &priv->clhash.hash[i],
++ common.hnode)
++ dpaa2_ceetm_cls_destroy(sch, cl);
++ }
++
++ qdisc_class_hash_destroy(&priv->clhash);
++
++ switch (priv->type) {
++ case CEETM_ROOT:
++ dpaa2_eth_ceetm_disable(priv_eth);
++
++ if (priv->root.qstats)
++ free_percpu(priv->root.qstats);
++
++ if (!priv->root.qdiscs)
++ break;
++
++ /* Destroy the pfifo qdiscs in case they haven't been attached
++ * to the netdev queues yet.
++ */
++ for (i = 0; i < dev->num_tx_queues; i++)
++ if (priv->root.qdiscs[i])
++ qdisc_destroy(priv->root.qdiscs[i]);
++
++ kfree(priv->root.qdiscs);
++ break;
++
++ case CEETM_PRIO:
++ if (priv->prio.parent)
++ priv->prio.parent->child = NULL;
++ break;
++ }
++}
++
++static int dpaa2_ceetm_dump(struct Qdisc *sch, struct sk_buff *skb)
++{
++ struct Qdisc *qdisc;
++ unsigned int ntx, i;
++ struct nlattr *nest;
++ struct dpaa2_ceetm_tc_qopt qopt;
++ struct dpaa2_ceetm_qdisc_stats *qstats;
++ struct net_device *dev = qdisc_dev(sch);
++ struct dpaa2_ceetm_qdisc *priv = qdisc_priv(sch);
++
++ pr_debug(KBUILD_BASENAME " : %s : qdisc %X\n", __func__, sch->handle);
++
++ sch_tree_lock(sch);
++ memset(&qopt, 0, sizeof(qopt));
++ qopt.type = priv->type;
++ qopt.shaped = priv->shaped;
++
++ switch (priv->type) {
++ case CEETM_ROOT:
++ /* Gather statistics from the underlying pfifo qdiscs */
++ sch->q.qlen = 0;
++ memset(&sch->bstats, 0, sizeof(sch->bstats));
++ memset(&sch->qstats, 0, sizeof(sch->qstats));
++
++ for (ntx = 0; ntx < dev->num_tx_queues; ntx++) {
++ qdisc = netdev_get_tx_queue(dev, ntx)->qdisc_sleeping;
++ sch->q.qlen += qdisc->q.qlen;
++ sch->bstats.bytes += qdisc->bstats.bytes;
++ sch->bstats.packets += qdisc->bstats.packets;
++ sch->qstats.qlen += qdisc->qstats.qlen;
++ sch->qstats.backlog += qdisc->qstats.backlog;
++ sch->qstats.drops += qdisc->qstats.drops;
++ sch->qstats.requeues += qdisc->qstats.requeues;
++ sch->qstats.overlimits += qdisc->qstats.overlimits;
++ }
++
++ for_each_online_cpu(i) {
++ qstats = per_cpu_ptr(priv->root.qstats, i);
++ sch->qstats.drops += qstats->drops;
++ }
++
++ break;
++
++ case CEETM_PRIO:
++ qopt.prio_group_A = priv->prio.tx_prio_cfg.prio_group_A;
++ qopt.prio_group_B = priv->prio.tx_prio_cfg.prio_group_B;
++ qopt.separate_groups = priv->prio.tx_prio_cfg.separate_groups;
++ break;
++
++ default:
++ pr_err(KBUILD_BASENAME " : %s : invalid qdisc\n", __func__);
++ sch_tree_unlock(sch);
++ return -EINVAL;
++ }
++
++ nest = nla_nest_start(skb, TCA_OPTIONS);
++ if (!nest)
++ goto nla_put_failure;
++ if (nla_put(skb, DPAA2_CEETM_TCA_QOPS, sizeof(qopt), &qopt))
++ goto nla_put_failure;
++ nla_nest_end(skb, nest);
++
++ sch_tree_unlock(sch);
++ return skb->len;
++
++nla_put_failure:
++ sch_tree_unlock(sch);
++ nla_nest_cancel(skb, nest);
++ return -EMSGSIZE;
++}
++
++static int dpaa2_ceetm_change_prio(struct Qdisc *sch,
++ struct dpaa2_ceetm_qdisc *priv,
++ struct dpaa2_ceetm_tc_qopt *qopt)
++{
++ /* TODO: Once LX2 support is added */
++ /* priv->shaped = parent_cl->shaped; */
++ priv->prio.tx_prio_cfg.prio_group_A = qopt->prio_group_A;
++ priv->prio.tx_prio_cfg.prio_group_B = qopt->prio_group_B;
++ priv->prio.tx_prio_cfg.separate_groups = qopt->separate_groups;
++
++ return 0;
++}
++
++/* Edit a ceetm qdisc */
++static int dpaa2_ceetm_change(struct Qdisc *sch, struct nlattr *opt)
++{
++ struct dpaa2_ceetm_qdisc *priv = qdisc_priv(sch);
++ struct nlattr *tb[DPAA2_CEETM_TCA_QOPS + 1];
++ struct dpaa2_ceetm_tc_qopt *qopt;
++ int err;
++
++ pr_debug(KBUILD_BASENAME " : %s : qdisc %X\n", __func__, sch->handle);
++
++ err = nla_parse_nested(tb, DPAA2_CEETM_TCA_QOPS, opt,
++ dpaa2_ceetm_policy);
++ if (err < 0) {
++ pr_err(KBUILD_BASENAME " : %s : tc error in %s\n", __func__,
++ "nla_parse_nested");
++ return err;
++ }
++
++ if (!tb[DPAA2_CEETM_TCA_QOPS]) {
++ pr_err(KBUILD_BASENAME " : %s : tc error in %s\n", __func__,
++ "tb");
++ return -EINVAL;
++ }
++
++ if (TC_H_MIN(sch->handle)) {
++ pr_err("CEETM: a qdisc should not have a minor\n");
++ return -EINVAL;
++ }
++
++ qopt = nla_data(tb[DPAA2_CEETM_TCA_QOPS]);
++
++ if (priv->type != qopt->type) {
++ pr_err("CEETM: qdisc %X is not of the provided type\n",
++ sch->handle);
++ return -EINVAL;
++ }
++
++ switch (priv->type) {
++ case CEETM_PRIO:
++ err = dpaa2_ceetm_change_prio(sch, priv, qopt);
++ break;
++ default:
++ pr_err(KBUILD_BASENAME " : %s : invalid qdisc\n", __func__);
++ err = -EINVAL;
++ }
++
++ return err;
++}
++
++/* Configure a root ceetm qdisc */
++static int dpaa2_ceetm_init_root(struct Qdisc *sch,
++ struct dpaa2_ceetm_qdisc *priv,
++ struct dpaa2_ceetm_tc_qopt *qopt)
++{
++ struct net_device *dev = qdisc_dev(sch);
++ struct dpaa2_eth_priv *priv_eth = netdev_priv(dev);
++ struct netdev_queue *dev_queue;
++ unsigned int i, parent_id;
++ struct Qdisc *qdisc;
++ int err;
++
++ pr_debug(KBUILD_BASENAME " : %s : qdisc %X\n", __func__, sch->handle);
++
++ /* Validate inputs */
++ if (sch->parent != TC_H_ROOT) {
++ pr_err("CEETM: a root ceetm qdisc can not be attached to a class\n");
++ tcf_destroy_chain(&priv->filter_list);
++ qdisc_class_hash_destroy(&priv->clhash);
++ return -EINVAL;
++ }
++
++ /* Pre-allocate underlying pfifo qdiscs.
++ *
++ * We want to offload shaping and scheduling decisions to the hardware.
++ * The pfifo qdiscs will be attached to the netdev queues and will
++ * guide the traffic from the IP stack down to the driver with minimum
++ * interference.
++ *
++ * The CEETM qdiscs and classes will be crossed when the traffic
++ * reaches the driver.
++ */
++ priv->root.qdiscs = kcalloc(dev->num_tx_queues,
++ sizeof(priv->root.qdiscs[0]),
++ GFP_KERNEL);
++ if (!priv->root.qdiscs) {
++ err = -ENOMEM;
++ goto err_init_root;
++ }
++
++ for (i = 0; i < dev->num_tx_queues; i++) {
++ dev_queue = netdev_get_tx_queue(dev, i);
++ parent_id = TC_H_MAKE(TC_H_MAJ(sch->handle),
++ TC_H_MIN(i + PFIFO_MIN_OFFSET));
++
++ qdisc = qdisc_create_dflt(dev_queue, &pfifo_qdisc_ops,
++ parent_id);
++ if (!qdisc) {
++ err = -ENOMEM;
++ goto err_init_root;
++ }
++
++ priv->root.qdiscs[i] = qdisc;
++ qdisc->flags |= TCQ_F_ONETXQUEUE;
++ }
++
++ sch->flags |= TCQ_F_MQROOT;
++
++ priv->root.qstats = alloc_percpu(struct dpaa2_ceetm_qdisc_stats);
++ if (!priv->root.qstats) {
++ pr_err(KBUILD_BASENAME " : %s : alloc_percpu() failed\n",
++ __func__);
++ err = -ENOMEM;
++ goto err_init_root;
++ }
++
++ dpaa2_eth_ceetm_enable(priv_eth);
++ return 0;
++
++err_init_root:
++ dpaa2_ceetm_destroy(sch);
++ return err;
++}
++
++/* Configure a prio ceetm qdisc */
++static int dpaa2_ceetm_init_prio(struct Qdisc *sch,
++ struct dpaa2_ceetm_qdisc *priv,
++ struct dpaa2_ceetm_tc_qopt *qopt)
++{
++ struct net_device *dev = qdisc_dev(sch);
++ struct dpaa2_ceetm_class *parent_cl;
++ struct Qdisc *parent_qdisc;
++ int err;
++
++ pr_debug(KBUILD_BASENAME " : %s : qdisc %X\n", __func__, sch->handle);
++
++ if (sch->parent == TC_H_ROOT) {
++ pr_err("CEETM: a prio ceetm qdisc can not be root\n");
++ err = -EINVAL;
++ goto err_init_prio;
++ }
++
++ parent_qdisc = qdisc_lookup(dev, TC_H_MAJ(sch->parent));
++ if (strcmp(parent_qdisc->ops->id, dpaa2_ceetm_qdisc_ops.id)) {
++ pr_err("CEETM: a ceetm qdisc can not be attached to other qdisc/class types\n");
++ err = -EINVAL;
++ goto err_init_prio;
++ }
++
++ /* Obtain the parent root ceetm_class */
++ parent_cl = dpaa2_ceetm_find(sch->parent, parent_qdisc);
++
++ if (!parent_cl || parent_cl->type != CEETM_ROOT) {
++ pr_err("CEETM: a prio ceetm qdiscs can be added only under a root ceetm class\n");
++ err = -EINVAL;
++ goto err_init_prio;
++ }
++
++ priv->prio.parent = parent_cl;
++ parent_cl->child = sch;
++
++ err = dpaa2_ceetm_change_prio(sch, priv, qopt);
++
++ return 0;
++
++err_init_prio:
++ dpaa2_ceetm_destroy(sch);
++ return err;
++}
++
++/* Configure a generic ceetm qdisc */
++static int dpaa2_ceetm_init(struct Qdisc *sch, struct nlattr *opt)
++{
++ struct dpaa2_ceetm_qdisc *priv = qdisc_priv(sch);
++ struct net_device *dev = qdisc_dev(sch);
++ struct nlattr *tb[DPAA2_CEETM_TCA_QOPS + 1];
++ struct dpaa2_ceetm_tc_qopt *qopt;
++ int err;
++
++ pr_debug(KBUILD_BASENAME " : %s : qdisc %X\n", __func__, sch->handle);
++
++ if (!netif_is_multiqueue(dev))
++ return -EOPNOTSUPP;
++
++ RCU_INIT_POINTER(priv->filter_list, NULL);
++
++ if (!opt) {
++ pr_err(KBUILD_BASENAME " : %s : tc error - opt = NULL\n",
++ __func__);
++ return -EINVAL;
++ }
++
++ err = nla_parse_nested(tb, DPAA2_CEETM_TCA_QOPS, opt,
++ dpaa2_ceetm_policy);
++ if (err < 0) {
++ pr_err(KBUILD_BASENAME " : %s : tc error in %s\n", __func__,
++ "nla_parse_nested");
++ return err;
++ }
++
++ if (!tb[DPAA2_CEETM_TCA_QOPS]) {
++ pr_err(KBUILD_BASENAME " : %s : tc error in %s\n", __func__,
++ "tb");
++ return -EINVAL;
++ }
++
++ if (TC_H_MIN(sch->handle)) {
++ pr_err("CEETM: a qdisc should not have a minor\n");
++ return -EINVAL;
++ }
++
++ qopt = nla_data(tb[DPAA2_CEETM_TCA_QOPS]);
++
++ /* Initialize the class hash list. Each qdisc has its own class hash */
++ err = qdisc_class_hash_init(&priv->clhash);
++ if (err < 0) {
++ pr_err(KBUILD_BASENAME " : %s : qdisc_class_hash_init failed\n",
++ __func__);
++ return err;
++ }
++
++ priv->type = qopt->type;
++ priv->shaped = qopt->shaped;
++
++ switch (priv->type) {
++ case CEETM_ROOT:
++ err = dpaa2_ceetm_init_root(sch, priv, qopt);
++ break;
++ case CEETM_PRIO:
++ err = dpaa2_ceetm_init_prio(sch, priv, qopt);
++ break;
++ default:
++ pr_err(KBUILD_BASENAME " : %s : invalid qdisc\n", __func__);
++ dpaa2_ceetm_destroy(sch);
++ err = -EINVAL;
++ }
++
++ return err;
++}
++
++/* Attach the underlying pfifo qdiscs */
++static void dpaa2_ceetm_attach(struct Qdisc *sch)
++{
++ struct net_device *dev = qdisc_dev(sch);
++ struct dpaa2_ceetm_qdisc *priv = qdisc_priv(sch);
++ struct Qdisc *qdisc, *old_qdisc;
++ unsigned int i;
++
++ pr_debug(KBUILD_BASENAME " : %s : qdisc %X\n", __func__, sch->handle);
++
++ for (i = 0; i < dev->num_tx_queues; i++) {
++ qdisc = priv->root.qdiscs[i];
++ old_qdisc = dev_graft_qdisc(qdisc->dev_queue, qdisc);
++ if (old_qdisc)
++ qdisc_destroy(old_qdisc);
++ }
++
++ /* Remove the references to the pfifo qdiscs since the kernel will
++ * destroy them when needed. No cleanup from our part is required from
++ * this point on.
++ */
++ kfree(priv->root.qdiscs);
++ priv->root.qdiscs = NULL;
++}
++
++static unsigned long dpaa2_ceetm_cls_get(struct Qdisc *sch, u32 classid)
++{
++ struct dpaa2_ceetm_class *cl;
++
++ pr_debug(KBUILD_BASENAME " : %s : classid %X from qdisc %X\n",
++ __func__, classid, sch->handle);
++ cl = dpaa2_ceetm_find(classid, sch);
++
++ if (cl)
++ cl->refcnt++;
++
++ return (unsigned long)cl;
++}
++
++static void dpaa2_ceetm_cls_put(struct Qdisc *sch, unsigned long arg)
++{
++ struct dpaa2_ceetm_class *cl = (struct dpaa2_ceetm_class *)arg;
++ pr_debug(KBUILD_BASENAME " : %s : classid %X from qdisc %X\n",
++ __func__, cl->common.classid, sch->handle);
++ cl->refcnt--;
++
++ if (cl->refcnt == 0)
++ dpaa2_ceetm_cls_destroy(sch, cl);
++}
++
++static int dpaa2_ceetm_cls_change_root(struct dpaa2_ceetm_class *cl,
++ struct dpaa2_ceetm_tc_copt *copt,
++ struct net_device *dev)
++{
++ struct dpaa2_eth_priv *priv = netdev_priv(dev);
++ struct dpni_tx_shaping_cfg scfg = { 0 }, ecfg = { 0 };
++ int err = 0;
++
++ pr_debug(KBUILD_BASENAME " : %s : class %X\n", __func__,
++ cl->common.classid);
++
++ if (!cl->shaped)
++ return 0;
++
++ if (dpaa2_eth_update_shaping_cfg(dev, copt->shaping_cfg,
++ &scfg, &ecfg))
++ return -EINVAL;
++
++ err = dpaa2_eth_set_ch_shaping(priv, &scfg, &ecfg,
++ copt->shaping_cfg.coupled,
++ cl->root.ch_id);
++ if (err)
++ return err;
++
++ memcpy(&cl->root.shaping_cfg, &copt->shaping_cfg,
++ sizeof(struct dpaa2_ceetm_shaping_cfg));
++
++ return err;
++}
++
++static int dpaa2_ceetm_cls_change_prio(struct dpaa2_ceetm_class *cl,
++ struct dpaa2_ceetm_tc_copt *copt,
++ struct net_device *dev)
++{
++ struct dpaa2_ceetm_qdisc *sch = qdisc_priv(cl->parent);
++ struct dpni_tx_schedule_cfg *sched_cfg;
++ struct dpaa2_eth_priv *priv = netdev_priv(dev);
++ int err;
++
++ pr_debug(KBUILD_BASENAME " : %s : class %X mode %d weight %d\n",
++ __func__, cl->common.classid, copt->mode, copt->weight);
++
++ if (!cl->prio.cstats) {
++ cl->prio.cstats = alloc_percpu(struct dpaa2_ceetm_class_stats);
++ if (!cl->prio.cstats) {
++ pr_err(KBUILD_BASENAME " : %s : alloc_percpu() failed\n",
++ __func__);
++ return -ENOMEM;
++ }
++ }
++
++ cl->prio.mode = copt->mode;
++ cl->prio.weight = copt->weight;
++
++ sched_cfg = &sch->prio.tx_prio_cfg.tc_sched[cl->prio.qpri];
++
++ switch (copt->mode) {
++ case STRICT_PRIORITY:
++ sched_cfg->mode = DPNI_TX_SCHED_STRICT_PRIORITY;
++ break;
++ case WEIGHTED_A:
++ sched_cfg->mode = DPNI_TX_SCHED_WEIGHTED_A;
++ break;
++ case WEIGHTED_B:
++ sched_cfg->mode = DPNI_TX_SCHED_WEIGHTED_B;
++ break;
++ }
++
++ err = dpaa2_eth_update_tx_prio(priv, cl, DPAA2_ETH_ADD_CQ);
++
++ return err;
++}
++
++/* Add a new ceetm class */
++static int dpaa2_ceetm_cls_add(struct Qdisc *sch, u32 classid,
++ struct dpaa2_ceetm_tc_copt *copt,
++ unsigned long *arg)
++{
++ struct dpaa2_ceetm_qdisc *priv = qdisc_priv(sch);
++ struct net_device *dev = qdisc_dev(sch);
++ struct dpaa2_eth_priv *priv_eth = netdev_priv(dev);
++ struct dpaa2_ceetm_class *cl;
++ int err;
++
++ if (copt->type == CEETM_ROOT &&
++ priv->clhash.hashelems == dpaa2_eth_ch_count(priv_eth)) {
++ pr_err("CEETM: only %d channel%s per DPNI allowed, sorry\n",
++ dpaa2_eth_ch_count(priv_eth),
++ dpaa2_eth_ch_count(priv_eth) == 1 ? "" : "s");
++ return -EINVAL;
++ }
++
++ if (copt->type == CEETM_PRIO &&
++ priv->clhash.hashelems == dpaa2_eth_tc_count(priv_eth)) {
++ pr_err("CEETM: only %d queue%s per channel allowed, sorry\n",
++ dpaa2_eth_tc_count(priv_eth),
++ dpaa2_eth_tc_count(priv_eth) == 1 ? "" : "s");
++ return -EINVAL;
++ }
++
++ cl = kzalloc(sizeof(*cl), GFP_KERNEL);
++ if (!cl)
++ return -ENOMEM;
++
++ RCU_INIT_POINTER(cl->filter_list, NULL);
++
++ cl->common.classid = classid;
++ cl->refcnt = 1;
++ cl->parent = sch;
++ cl->child = NULL;
++
++ /* Add class handle in Qdisc */
++ dpaa2_ceetm_link_class(sch, &priv->clhash, &cl->common);
++
++ cl->shaped = copt->shaped;
++ cl->type = copt->type;
++
++ /* Claim a CEETM channel / tc - DPAA2. will assume transition from
++ * classid to qdid/qpri, starting from qdid / qpri 0
++ */
++ switch (copt->type) {
++ case CEETM_ROOT:
++ cl->root.ch_id = classid - sch->handle - 1;
++ err = dpaa2_ceetm_cls_change_root(cl, copt, dev);
++ break;
++ case CEETM_PRIO:
++ cl->prio.qpri = classid - sch->handle - 1;
++ err = dpaa2_ceetm_cls_change_prio(cl, copt, dev);
++ break;
++ default:
++ err = -EINVAL;
++ break;
++ }
++
++ if (err) {
++ pr_err("%s: Unable to set new %s class\n", __func__,
++ (copt->type == CEETM_ROOT ? "root" : "prio"));
++ goto out_free;
++ }
++
++ switch (copt->type) {
++ case CEETM_ROOT:
++ pr_debug(KBUILD_BASENAME " : %s : configured root class %X associated with channel qdid %d\n",
++ __func__, classid, cl->root.ch_id);
++ break;
++ case CEETM_PRIO:
++ pr_debug(KBUILD_BASENAME " : %s : configured prio class %X associated with queue qpri %d\n",
++ __func__, classid, cl->prio.qpri);
++ break;
++ }
++
++ *arg = (unsigned long)cl;
++ return 0;
++
++out_free:
++ kfree(cl);
++ return err;
++}
++
++/* Add or configure a ceetm class */
++static int dpaa2_ceetm_cls_change(struct Qdisc *sch, u32 classid, u32 parentid,
++ struct nlattr **tca, unsigned long *arg)
++{
++ struct dpaa2_ceetm_qdisc *priv;
++ struct dpaa2_ceetm_class *cl = (struct dpaa2_ceetm_class *)*arg;
++ struct nlattr *opt = tca[TCA_OPTIONS];
++ struct nlattr *tb[DPAA2_CEETM_TCA_MAX];
++ struct dpaa2_ceetm_tc_copt *copt;
++ struct net_device *dev = qdisc_dev(sch);
++ int err;
++
++ pr_debug(KBUILD_BASENAME " : %s : classid %X under qdisc %X\n",
++ __func__, classid, sch->handle);
++
++ if (strcmp(sch->ops->id, dpaa2_ceetm_qdisc_ops.id)) {
++ pr_err("CEETM: a ceetm class can not be attached to other qdisc/class types\n");
++ return -EINVAL;
++ }
++
++ priv = qdisc_priv(sch);
++
++ if (!opt) {
++ pr_err(KBUILD_BASENAME " : %s : tc error NULL opt\n", __func__);
++ return -EINVAL;
++ }
++
++ err = nla_parse_nested(tb, DPAA2_CEETM_TCA_COPT, opt,
++ dpaa2_ceetm_policy);
++ if (err < 0) {
++ pr_err(KBUILD_BASENAME " : %s : tc error in %s\n", __func__,
++ "nla_parse_nested");
++ return -EINVAL;
++ }
++
++ if (!tb[DPAA2_CEETM_TCA_COPT]) {
++ pr_err(KBUILD_BASENAME " : %s : tc error in %s\n", __func__,
++ "tb");
++ return -EINVAL;
++ }
++
++ copt = nla_data(tb[DPAA2_CEETM_TCA_COPT]);
++
++ /* Configure an existing ceetm class */
++ if (cl) {
++ if (copt->type != cl->type) {
++ pr_err("CEETM: class %X is not of the provided type\n",
++ cl->common.classid);
++ return -EINVAL;
++ }
++
++ switch (copt->type) {
++ case CEETM_ROOT:
++ return dpaa2_ceetm_cls_change_root(cl, copt, dev);
++ case CEETM_PRIO:
++ return dpaa2_ceetm_cls_change_prio(cl, copt, dev);
++
++ default:
++ pr_err(KBUILD_BASENAME " : %s : invalid class\n",
++ __func__);
++ return -EINVAL;
++ }
++ }
++
++ return dpaa2_ceetm_cls_add(sch, classid, copt, arg);
++}
++
++static void dpaa2_ceetm_cls_walk(struct Qdisc *sch, struct qdisc_walker *arg)
++{
++ struct dpaa2_ceetm_qdisc *priv = qdisc_priv(sch);
++ struct dpaa2_ceetm_class *cl;
++ unsigned int i;
++
++ pr_debug(KBUILD_BASENAME " : %s : qdisc %X\n", __func__, sch->handle);
++
++ if (arg->stop)
++ return;
++
++ for (i = 0; i < priv->clhash.hashsize; i++) {
++ hlist_for_each_entry(cl, &priv->clhash.hash[i], common.hnode) {
++ if (arg->count < arg->skip) {
++ arg->count++;
++ continue;
++ }
++ if (arg->fn(sch, (unsigned long)cl, arg) < 0) {
++ arg->stop = 1;
++ return;
++ }
++ arg->count++;
++ }
++ }
++}
++
++static int dpaa2_ceetm_cls_dump(struct Qdisc *sch, unsigned long arg,
++ struct sk_buff *skb, struct tcmsg *tcm)
++{
++ struct dpaa2_ceetm_class *cl = (struct dpaa2_ceetm_class *)arg;
++ struct nlattr *nest;
++ struct dpaa2_ceetm_tc_copt copt;
++
++ pr_debug(KBUILD_BASENAME " : %s : class %X under qdisc %X\n",
++ __func__, cl->common.classid, sch->handle);
++
++ sch_tree_lock(sch);
++
++ tcm->tcm_parent = ((struct Qdisc *)cl->parent)->handle;
++ tcm->tcm_handle = cl->common.classid;
++
++ memset(&copt, 0, sizeof(copt));
++
++ copt.shaped = cl->shaped;
++ copt.type = cl->type;
++
++ switch (cl->type) {
++ case CEETM_ROOT:
++ if (cl->child)
++ tcm->tcm_info = cl->child->handle;
++
++ memcpy(&copt.shaping_cfg, &cl->root.shaping_cfg,
++ sizeof(struct dpaa2_ceetm_shaping_cfg));
++
++ break;
++
++ case CEETM_PRIO:
++ if (cl->child)
++ tcm->tcm_info = cl->child->handle;
++
++ copt.mode = cl->prio.mode;
++ copt.weight = cl->prio.weight;
++
++ break;
++ }
++
++ nest = nla_nest_start(skb, TCA_OPTIONS);
++ if (!nest)
++ goto nla_put_failure;
++ if (nla_put(skb, DPAA2_CEETM_TCA_COPT, sizeof(copt), &copt))
++ goto nla_put_failure;
++ nla_nest_end(skb, nest);
++ sch_tree_unlock(sch);
++ return skb->len;
++
++nla_put_failure:
++ sch_tree_unlock(sch);
++ nla_nest_cancel(skb, nest);
++ return -EMSGSIZE;
++}
++
++static int dpaa2_ceetm_cls_delete(struct Qdisc *sch, unsigned long arg)
++{
++ struct dpaa2_ceetm_qdisc *priv = qdisc_priv(sch);
++ struct dpaa2_ceetm_class *cl = (struct dpaa2_ceetm_class *)arg;
++
++ pr_debug(KBUILD_BASENAME " : %s : class %X under qdisc %X\n",
++ __func__, cl->common.classid, sch->handle);
++
++ sch_tree_lock(sch);
++ qdisc_class_hash_remove(&priv->clhash, &cl->common);
++ cl->refcnt--;
++ WARN_ON(cl->refcnt == 0);
++ sch_tree_unlock(sch);
++ return 0;
++}
++
++/* Get the class' child qdisc, if any */
++static struct Qdisc *dpaa2_ceetm_cls_leaf(struct Qdisc *sch, unsigned long arg)
++{
++ struct dpaa2_ceetm_class *cl = (struct dpaa2_ceetm_class *)arg;
++
++ pr_debug(KBUILD_BASENAME " : %s : class %X under qdisc %X\n",
++ __func__, cl->common.classid, sch->handle);
++
++ switch (cl->type) {
++ case CEETM_ROOT:
++ case CEETM_PRIO:
++ return cl->child;
++ }
++
++ return NULL;
++}
++
++static int dpaa2_ceetm_cls_graft(struct Qdisc *sch, unsigned long arg,
++ struct Qdisc *new, struct Qdisc **old)
++{
++ if (new && strcmp(new->ops->id, dpaa2_ceetm_qdisc_ops.id)) {
++ pr_err("CEETM: only ceetm qdiscs can be attached to ceetm classes\n");
++ return -EOPNOTSUPP;
++ }
++
++ return 0;
++}
++
++static int dpaa2_ceetm_cls_dump_stats(struct Qdisc *sch, unsigned long arg,
++ struct gnet_dump *d)
++{
++ struct dpaa2_ceetm_class *cl = (struct dpaa2_ceetm_class *)arg;
++ struct gnet_stats_basic_packed tmp_bstats;
++ struct dpaa2_ceetm_tc_xstats xstats;
++ union dpni_statistics dpni_stats;
++ struct net_device *dev = qdisc_dev(sch);
++ struct dpaa2_eth_priv *priv_eth = netdev_priv(dev);
++ u8 ch_id = 0;
++ int err;
++
++ memset(&xstats, 0, sizeof(xstats));
++ memset(&tmp_bstats, 0, sizeof(tmp_bstats));
++
++ if (cl->type == CEETM_ROOT)
++ return 0;
++
++ err = dpni_get_statistics(priv_eth->mc_io, 0, priv_eth->mc_token, 3,
++ DPNI_BUILD_CH_TC(ch_id, cl->prio.qpri),
++ &dpni_stats);
++ if (err)
++ netdev_warn(dev, "dpni_get_stats(%d) failed - %d\n", 3, err);
++
++ xstats.ceetm_dequeue_bytes = dpni_stats.page_3.ceetm_dequeue_bytes;
++ xstats.ceetm_dequeue_frames = dpni_stats.page_3.ceetm_dequeue_frames;
++ xstats.ceetm_reject_bytes = dpni_stats.page_3.ceetm_reject_bytes;
++ xstats.ceetm_reject_frames = dpni_stats.page_3.ceetm_reject_frames;
++
++ return gnet_stats_copy_app(d, &xstats, sizeof(xstats));
++}
++
++static struct tcf_proto __rcu **dpaa2_ceetm_tcf_chain(struct Qdisc *sch,
++ unsigned long arg)
++{
++ struct dpaa2_ceetm_qdisc *priv = qdisc_priv(sch);
++ struct dpaa2_ceetm_class *cl = (struct dpaa2_ceetm_class *)arg;
++
++ pr_debug(KBUILD_BASENAME " : %s : class %X under qdisc %X\n", __func__,
++ cl ? cl->common.classid : 0, sch->handle);
++ return cl ? &cl->filter_list : &priv->filter_list;
++}
++
++static unsigned long dpaa2_ceetm_tcf_bind(struct Qdisc *sch,
++ unsigned long parent,
++ u32 classid)
++{
++ struct dpaa2_ceetm_class *cl = dpaa2_ceetm_find(classid, sch);
++
++ pr_debug(KBUILD_BASENAME " : %s : class %X under qdisc %X\n", __func__,
++ cl ? cl->common.classid : 0, sch->handle);
++ return (unsigned long)cl;
++}
++
++static void dpaa2_ceetm_tcf_unbind(struct Qdisc *sch, unsigned long arg)
++{
++ struct dpaa2_ceetm_class *cl = (struct dpaa2_ceetm_class *)arg;
++
++ pr_debug(KBUILD_BASENAME " : %s : class %X under qdisc %X\n", __func__,
++ cl ? cl->common.classid : 0, sch->handle);
++}
++
++const struct Qdisc_class_ops dpaa2_ceetm_cls_ops = {
++ .graft = dpaa2_ceetm_cls_graft,
++ .leaf = dpaa2_ceetm_cls_leaf,
++ .get = dpaa2_ceetm_cls_get,
++ .put = dpaa2_ceetm_cls_put,
++ .change = dpaa2_ceetm_cls_change,
++ .delete = dpaa2_ceetm_cls_delete,
++ .walk = dpaa2_ceetm_cls_walk,
++ .tcf_chain = dpaa2_ceetm_tcf_chain,
++ .bind_tcf = dpaa2_ceetm_tcf_bind,
++ .unbind_tcf = dpaa2_ceetm_tcf_unbind,
++ .dump = dpaa2_ceetm_cls_dump,
++ .dump_stats = dpaa2_ceetm_cls_dump_stats,
++};
++
++struct Qdisc_ops dpaa2_ceetm_qdisc_ops __read_mostly = {
++ .id = "ceetm",
++ .priv_size = sizeof(struct dpaa2_ceetm_qdisc),
++ .cl_ops = &dpaa2_ceetm_cls_ops,
++ .init = dpaa2_ceetm_init,
++ .destroy = dpaa2_ceetm_destroy,
++ .change = dpaa2_ceetm_change,
++ .dump = dpaa2_ceetm_dump,
++ .attach = dpaa2_ceetm_attach,
++ .owner = THIS_MODULE,
++};
++
++/* Run the filters and classifiers attached to the qdisc on the provided skb */
++int dpaa2_ceetm_classify(struct sk_buff *skb, struct Qdisc *sch,
++ int *qdid, int *qpri)
++{
++ struct dpaa2_ceetm_qdisc *priv = qdisc_priv(sch);
++ struct dpaa2_ceetm_class *cl = NULL;
++ struct tcf_result res;
++ struct tcf_proto *tcf;
++ int result;
++
++ tcf = rcu_dereference_bh(priv->filter_list);
++ while (tcf && (result = tc_classify(skb, tcf, &res, false)) >= 0) {
++#ifdef CONFIG_NET_CLS_ACT
++ switch (result) {
++ case TC_ACT_QUEUED:
++ case TC_ACT_STOLEN:
++ case TC_ACT_SHOT:
++ /* No valid class found due to action */
++ return -1;
++ }
++#endif
++ cl = (void *)res.class;
++ if (!cl) {
++ /* The filter leads to the qdisc */
++ if (res.classid == sch->handle)
++ return 0;
++
++ cl = dpaa2_ceetm_find(res.classid, sch);
++ /* The filter leads to an invalid class */
++ if (!cl)
++ break;
++ }
++
++ /* The class might have its own filters attached */
++ tcf = rcu_dereference_bh(cl->filter_list);
++ }
++
++ /* No valid class found */
++ if (!cl)
++ return 0;
++
++ switch (cl->type) {
++ case CEETM_ROOT:
++ *qdid = cl->root.ch_id;
++
++ /* The root class does not have a child prio qdisc */
++ if (!cl->child)
++ return 0;
++
++ /* Run the prio qdisc classifiers */
++ return dpaa2_ceetm_classify(skb, cl->child, qdid, qpri);
++
++ case CEETM_PRIO:
++ *qpri = cl->prio.qpri;
++ break;
++ }
++
++ return 0;
++}
++
++int __init dpaa2_ceetm_register(void)
++{
++ int err = 0;
++
++ pr_debug(KBUILD_MODNAME ": " DPAA2_CEETM_DESCRIPTION "\n");
++
++ err = register_qdisc(&dpaa2_ceetm_qdisc_ops);
++ if (unlikely(err))
++ pr_err(KBUILD_MODNAME
++ ": %s:%hu:%s(): register_qdisc() = %d\n",
++ KBUILD_BASENAME ".c", __LINE__, __func__, err);
++
++ return err;
++}
++
++void __exit dpaa2_ceetm_unregister(void)
++{
++ pr_debug(KBUILD_MODNAME ": %s:%s() ->\n",
++ KBUILD_BASENAME ".c", __func__);
++
++ unregister_qdisc(&dpaa2_ceetm_qdisc_ops);
++}
+--- /dev/null
++++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-ceetm.h
+@@ -0,0 +1,182 @@
++// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
++/*
++ * Copyright 2017 NXP
++ *
++ */
++
++#ifndef __DPAA2_ETH_CEETM_H
++#define __DPAA2_ETH_CEETM_H
++
++#include <net/pkt_sched.h>
++#include <net/pkt_cls.h>
++#include <net/netlink.h>
++
++#include "dpaa2-eth.h"
++
++/* For functional purposes, there are num_tx_queues pfifo qdiscs through which
++ * frames reach the driver. Their handles start from 1:21. Handles 1:1 to 1:20
++ * are reserved for the maximum 32 CEETM channels (majors and minors are in
++ * hex).
++ */
++#define PFIFO_MIN_OFFSET 0x21
++
++#define DPAA2_CEETM_MIN_WEIGHT 100
++#define DPAA2_CEETM_MAX_WEIGHT 24800
++
++#define DPAA2_CEETM_TD_THRESHOLD 1000
++
++enum wbfs_group_type {
++ WBFS_GRP_A,
++ WBFS_GRP_B,
++ WBFS_GRP_LARGE
++};
++
++enum {
++ DPAA2_CEETM_TCA_UNSPEC,
++ DPAA2_CEETM_TCA_COPT,
++ DPAA2_CEETM_TCA_QOPS,
++ DPAA2_CEETM_TCA_MAX,
++};
++
++/* CEETM configuration types */
++enum dpaa2_ceetm_type {
++ CEETM_ROOT = 1,
++ CEETM_PRIO,
++};
++
++enum {
++ STRICT_PRIORITY = 0,
++ WEIGHTED_A,
++ WEIGHTED_B,
++};
++
++struct dpaa2_ceetm_shaping_cfg {
++ __u64 cir; /* committed information rate */
++ __u64 eir; /* excess information rate */
++ __u16 cbs; /* committed burst size */
++ __u16 ebs; /* excess burst size */
++ __u8 coupled; /* shaper coupling */
++};
++
++extern const struct nla_policy ceetm_policy[DPAA2_CEETM_TCA_MAX];
++
++struct dpaa2_ceetm_class;
++struct dpaa2_ceetm_qdisc_stats;
++struct dpaa2_ceetm_class_stats;
++
++/* corresponds to CEETM shaping at LNI level */
++struct dpaa2_root_q {
++ struct Qdisc **qdiscs;
++ struct dpaa2_ceetm_qdisc_stats __percpu *qstats;
++};
++
++/* corresponds to the number of priorities a channel serves */
++struct dpaa2_prio_q {
++ struct dpaa2_ceetm_class *parent;
++ struct dpni_tx_priorities_cfg tx_prio_cfg;
++};
++
++struct dpaa2_ceetm_qdisc {
++ struct Qdisc_class_hash clhash;
++ struct tcf_proto *filter_list; /* qdisc attached filters */
++
++ enum dpaa2_ceetm_type type; /* ROOT/PRIO */
++ bool shaped;
++ union {
++ struct dpaa2_root_q root;
++ struct dpaa2_prio_q prio;
++ };
++};
++
++/* CEETM Qdisc configuration parameters */
++struct dpaa2_ceetm_tc_qopt {
++ enum dpaa2_ceetm_type type;
++ __u16 shaped;
++ __u8 prio_group_A;
++ __u8 prio_group_B;
++ __u8 separate_groups;
++};
++
++/* root class - corresponds to a channel */
++struct dpaa2_root_c {
++ struct dpaa2_ceetm_shaping_cfg shaping_cfg;
++ u32 ch_id;
++};
++
++/* prio class - corresponds to a strict priority queue (group) */
++struct dpaa2_prio_c {
++ struct dpaa2_ceetm_class_stats __percpu *cstats;
++ u32 qpri;
++ u8 mode;
++ u16 weight;
++};
++
++struct dpaa2_ceetm_class {
++ struct Qdisc_class_common common;
++ int refcnt;
++ struct tcf_proto *filter_list; /* class attached filters */
++ struct Qdisc *parent;
++ struct Qdisc *child;
++
++ enum dpaa2_ceetm_type type; /* ROOT/PRIO */
++ bool shaped;
++ union {
++ struct dpaa2_root_c root;
++ struct dpaa2_prio_c prio;
++ };
++};
++
++/* CEETM Class configuration parameters */
++struct dpaa2_ceetm_tc_copt {
++ enum dpaa2_ceetm_type type;
++ struct dpaa2_ceetm_shaping_cfg shaping_cfg;
++ __u16 shaped;
++ __u8 mode;
++ __u16 weight;
++};
++
++/* CEETM stats */
++struct dpaa2_ceetm_qdisc_stats {
++ __u32 drops;
++};
++
++struct dpaa2_ceetm_class_stats {
++ /* Software counters */
++ struct gnet_stats_basic_packed bstats;
++ __u32 ern_drop_count;
++ __u32 congested_count;
++};
++
++struct dpaa2_ceetm_tc_xstats {
++ __u64 ceetm_dequeue_bytes;
++ __u64 ceetm_dequeue_frames;
++ __u64 ceetm_reject_bytes;
++ __u64 ceetm_reject_frames;
++};
++
++#ifdef CONFIG_FSL_DPAA2_ETH_CEETM
++int __init dpaa2_ceetm_register(void);
++void __exit dpaa2_ceetm_unregister(void);
++int dpaa2_ceetm_classify(struct sk_buff *skb, struct Qdisc *sch,
++ int *qdid, int *qpri);
++#else
++static inline int dpaa2_ceetm_register(void)
++{
++ return 0;
++}
++
++static inline void dpaa2_ceetm_unregister(void) {}
++
++static inline int dpaa2_ceetm_classify(struct sk_buff *skb, struct Qdisc *sch,
++ int *qdid, int *qpri)
++{
++ return 0;
++}
++#endif
++
++static inline bool dpaa2_eth_ceetm_is_enabled(struct dpaa2_eth_priv *priv)
++{
++ return priv->ceetm_en;
++}
++
++#endif
+--- /dev/null
+++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-debugfs.c
-@@ -0,0 +1,352 @@
+@@ -0,0 +1,357 @@
+
+/* Copyright 2015 Freescale Semiconductor Inc.
+ *
@@ -646,14 +2122,14 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ int i;
+
+ seq_printf(file, "Per-CPU stats for %s\n", priv->net_dev->name);
-+ seq_printf(file, "%s%16s%16s%16s%16s%16s%16s%16s%16s\n",
++ seq_printf(file, "%s%16s%16s%16s%16s%16s%16s%16s%16s%16s\n",
+ "CPU", "Rx", "Rx Err", "Rx SG", "Tx", "Tx Err", "Tx conf",
-+ "Tx SG", "Enq busy");
++ "Tx SG", "Tx realloc", "Enq busy");
+
+ for_each_online_cpu(i) {
+ stats = per_cpu_ptr(priv->percpu_stats, i);
+ extras = per_cpu_ptr(priv->percpu_extras, i);
-+ seq_printf(file, "%3d%16llu%16llu%16llu%16llu%16llu%16llu%16llu%16llu\n",
++ seq_printf(file, "%3d%16llu%16llu%16llu%16llu%16llu%16llu%16llu%16llu%16llu\n",
+ i,
+ stats->rx_packets,
+ stats->rx_errors,
@@ -662,6 +2138,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ stats->tx_errors,
+ extras->tx_conf_frames,
+ extras->tx_sg_frames,
++ extras->tx_reallocs,
+ extras->tx_portal_busy);
+ }
+
@@ -708,7 +2185,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ u32 fcnt, bcnt;
+ int i, err;
+
-+ seq_printf(file, "FQ stats for %s:\n", priv->net_dev->name);
++ seq_printf(file, "non-zero FQ stats for %s:\n", priv->net_dev->name);
+ seq_printf(file, "%s%16s%16s%16s%16s%16s%16s\n",
+ "VFQID", "CPU", "Traffic Class", "Type", "Frames",
+ "Pending frames", "Congestion");
@@ -719,6 +2196,10 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ if (err)
+ fcnt = 0;
+
++ /* A lot of queues, no use displaying zero traffic ones */
++ if (!fq->stats.frames && !fcnt)
++ continue;
++
+ seq_printf(file, "%5d%16d%16d%16s%16llu%16u%16llu\n",
+ fq->fqid,
+ fq->target_cpu,
@@ -1014,7 +2495,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+#endif /* DPAA2_ETH_DEBUGFS_H */
--- /dev/null
+++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-trace.h
-@@ -0,0 +1,184 @@
+@@ -0,0 +1,185 @@
+/* Copyright 2014-2015 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
@@ -1054,6 +2535,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
++#include "dpaa2-eth.h"
+#include <linux/tracepoint.h>
+
+#define TR_FMT "[%s] fd: addr=0x%llx, len=%u, off=%u"
@@ -1201,8 +2683,9 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+#include <trace/define_trace.h>
--- /dev/null
+++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.c
-@@ -0,0 +1,3516 @@
-+/* Copyright 2014-2015 Freescale Semiconductor Inc.
+@@ -0,0 +1,3734 @@
++/* Copyright 2014-2016 Freescale Semiconductor Inc.
++ * Copyright 2016-2017 NXP
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
@@ -1238,18 +2721,17 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+#include <linux/etherdevice.h>
+#include <linux/of_net.h>
+#include <linux/interrupt.h>
-+#include <linux/debugfs.h>
-+#include <linux/kthread.h>
+#include <linux/msi.h>
-+#include <linux/net_tstamp.h>
++#include <linux/kthread.h>
+#include <linux/iommu.h>
-+
-+#include "../../fsl-mc/include/dpbp.h"
-+#include "../../fsl-mc/include/dpcon.h"
-+#include "../../fsl-mc/include/mc.h"
-+#include "../../fsl-mc/include/mc-sys.h"
++#include <linux/net_tstamp.h>
++#include <linux/bpf.h>
++#include <linux/filter.h>
++#include <linux/atomic.h>
++#include <net/sock.h>
++#include <linux/fsl/mc.h>
+#include "dpaa2-eth.h"
-+#include "dpkg.h"
++#include "dpaa2-eth-ceetm.h"
+
+/* CREATE_TRACE_POINTS only needs to be defined once. Other dpa files
+ * using trace events only need to #include <trace/events/sched.h>
@@ -1263,7 +2745,8 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+
+const char dpaa2_eth_drv_version[] = "0.1";
+
-+void *dpaa2_eth_iova_to_virt(struct iommu_domain *domain, dma_addr_t iova_addr)
++static void *dpaa2_iova_to_virt(struct iommu_domain *domain,
++ dma_addr_t iova_addr)
+{
+ phys_addr_t phys_addr;
+
@@ -1309,26 +2792,26 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ if (fd_format == dpaa2_fd_single)
+ goto free_buf;
+ else if (fd_format != dpaa2_fd_sg)
-+ /* we don't support any other format */
++ /* We don't support any other format */
+ return;
+
-+ /* For S/G frames, we first need to free all SG entries */
++ /* For S/G frames, we first need to free all SG entries
++ * except the first one, which was taken care of already
++ */
+ sgt = vaddr + dpaa2_fd_get_offset(fd);
-+ for (i = 0; i < DPAA2_ETH_MAX_SG_ENTRIES; i++) {
++ for (i = 1; i < DPAA2_ETH_MAX_SG_ENTRIES; i++) {
+ addr = dpaa2_sg_get_addr(&sgt[i]);
-+ sg_vaddr = dpaa2_eth_iova_to_virt(priv->iommu_domain, addr);
-+
++ sg_vaddr = dpaa2_iova_to_virt(priv->iommu_domain, addr);
+ dma_unmap_single(dev, addr, DPAA2_ETH_RX_BUF_SIZE,
-+ DMA_FROM_DEVICE);
-+
-+ put_page(virt_to_head_page(sg_vaddr));
++ DMA_BIDIRECTIONAL);
+
++ skb_free_frag(sg_vaddr);
+ if (dpaa2_sg_is_final(&sgt[i]))
+ break;
+ }
+
+free_buf:
-+ put_page(virt_to_head_page(vaddr));
++ skb_free_frag(vaddr);
+}
+
+/* Build a linear skb based on a single-buffer frame descriptor */
@@ -1377,17 +2860,29 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+
+ /* Get the address and length from the S/G entry */
+ sg_addr = dpaa2_sg_get_addr(sge);
-+ sg_vaddr = dpaa2_eth_iova_to_virt(priv->iommu_domain, sg_addr);
++ sg_vaddr = dpaa2_iova_to_virt(priv->iommu_domain, sg_addr);
+ dma_unmap_single(dev, sg_addr, DPAA2_ETH_RX_BUF_SIZE,
-+ DMA_FROM_DEVICE);
++ DMA_BIDIRECTIONAL);
+
+ sg_length = dpaa2_sg_get_len(sge);
+
+ if (i == 0) {
+ /* We build the skb around the first data buffer */
+ skb = build_skb(sg_vaddr, DPAA2_ETH_SKB_SIZE);
-+ if (unlikely(!skb))
-+ goto err_build;
++ if (unlikely(!skb)) {
++ /* Free the first SG entry now, since we already
++ * unmapped it and obtained the virtual address
++ */
++ skb_free_frag(sg_vaddr);
++
++ /* We still need to subtract the buffers used
++ * by this FD from our software counter
++ */
++ while (!dpaa2_sg_is_final(&sgt[i]) &&
++ i < DPAA2_ETH_MAX_SG_ENTRIES)
++ i++;
++ break;
++ }
+
+ sg_offset = dpaa2_sg_get_offset(sge);
+ skb_reserve(skb, sg_offset);
@@ -1414,21 +2909,57 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ break;
+ }
+
++ WARN_ONCE(i == DPAA2_ETH_MAX_SG_ENTRIES, "Final bit not set in SGT");
++
+ /* Count all data buffers + SG table buffer */
+ ch->buf_count -= i + 2;
+
+ return skb;
++}
+
-+err_build:
-+ /* We still need to subtract the buffers used by this FD from our
-+ * software counter
-+ */
-+ for (i = 0; i < DPAA2_ETH_MAX_SG_ENTRIES; i++)
-+ if (dpaa2_sg_is_final(&sgt[i]))
++static int dpaa2_eth_xdp_tx(struct dpaa2_eth_priv *priv,
++ struct dpaa2_fd *fd,
++ void *buf_start,
++ u16 queue_id)
++{
++ struct dpaa2_eth_fq *fq;
++ struct rtnl_link_stats64 *percpu_stats;
++ struct dpaa2_eth_drv_stats *percpu_extras;
++ struct dpaa2_faead *faead;
++ u32 ctrl, frc;
++ int i, err;
++
++ /* Mark the egress frame annotation area as valid */
++ frc = dpaa2_fd_get_frc(fd);
++ dpaa2_fd_set_frc(fd, frc | DPAA2_FD_FRC_FAEADV);
++ dpaa2_fd_set_ctrl(fd, DPAA2_FD_CTRL_ASAL);
++
++ ctrl = DPAA2_FAEAD_A4V | DPAA2_FAEAD_A2V | DPAA2_FAEAD_EBDDV;
++ faead = dpaa2_get_faead(buf_start, false);
++ faead->ctrl = cpu_to_le32(ctrl);
++ faead->conf_fqid = 0;
++
++ percpu_stats = this_cpu_ptr(priv->percpu_stats);
++ percpu_extras = this_cpu_ptr(priv->percpu_extras);
++
++ fq = &priv->fq[queue_id];
++ for (i = 0; i < DPAA2_ETH_ENQUEUE_RETRIES; i++) {
++ err = dpaa2_io_service_enqueue_qd(fq->channel->dpio,
++ priv->tx_qdid, 0,
++ fq->tx_qdbin, fd);
++ if (err != -EBUSY)
+ break;
-+ ch->buf_count -= i + 2;
++ }
+
-+ return NULL;
++ percpu_extras->tx_portal_busy += i;
++ if (unlikely(err)) {
++ percpu_stats->tx_errors++;
++ } else {
++ percpu_stats->tx_packets++;
++ percpu_stats->tx_bytes += dpaa2_fd_get_len(fd);
++ }
++
++ return err;
+}
+
+static void free_bufs(struct dpaa2_eth_priv *priv, u64 *buf_array, int count)
@@ -1439,13 +2970,34 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+
+ for (i = 0; i < count; i++) {
+ /* Same logic as on regular Rx path */
-+ vaddr = dpaa2_eth_iova_to_virt(priv->iommu_domain, buf_array[i]);
++ vaddr = dpaa2_iova_to_virt(priv->iommu_domain, buf_array[i]);
+ dma_unmap_single(dev, buf_array[i], DPAA2_ETH_RX_BUF_SIZE,
-+ DMA_FROM_DEVICE);
-+ put_page(virt_to_head_page(vaddr));
++ DMA_BIDIRECTIONAL);
++ skb_free_frag(vaddr);
+ }
+}
+
++static void release_fd_buf(struct dpaa2_eth_priv *priv,
++ struct dpaa2_eth_channel *ch,
++ dma_addr_t addr)
++{
++ int err;
++
++ ch->rel_buf_array[ch->rel_buf_cnt++] = addr;
++ if (likely(ch->rel_buf_cnt < DPAA2_ETH_BUFS_PER_CMD))
++ return;
++
++ while ((err = dpaa2_io_service_release(ch->dpio, priv->bpid,
++ ch->rel_buf_array,
++ ch->rel_buf_cnt)) == -EBUSY)
++ cpu_relax();
++
++ if (err)
++ free_bufs(priv, ch->rel_buf_array, ch->rel_buf_cnt);
++
++ ch->rel_buf_cnt = 0;
++}
++
+/* Main Rx frame processing routine */
+static void dpaa2_eth_rx(struct dpaa2_eth_priv *priv,
+ struct dpaa2_eth_channel *ch,
@@ -1463,47 +3015,86 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ struct dpaa2_fas *fas;
+ void *buf_data;
+ u32 status = 0;
++ struct bpf_prog *xdp_prog;
++ struct xdp_buff xdp;
++ u32 xdp_act;
+
+ /* Tracing point */
+ trace_dpaa2_rx_fd(priv->net_dev, fd);
+
-+ vaddr = dpaa2_eth_iova_to_virt(priv->iommu_domain, addr);
-+ dma_unmap_single(dev, addr, DPAA2_ETH_RX_BUF_SIZE, DMA_FROM_DEVICE);
++ vaddr = dpaa2_iova_to_virt(priv->iommu_domain, addr);
++ dma_sync_single_for_cpu(dev, addr, DPAA2_ETH_RX_BUF_SIZE,
++ DMA_BIDIRECTIONAL);
+
-+ /* HWA - FAS, timestamp */
-+ fas = dpaa2_eth_get_fas(vaddr);
++ fas = dpaa2_get_fas(vaddr, false);
+ prefetch(fas);
-+ /* data / SG table */
+ buf_data = vaddr + dpaa2_fd_get_offset(fd);
+ prefetch(buf_data);
+
+ percpu_stats = this_cpu_ptr(priv->percpu_stats);
+ percpu_extras = this_cpu_ptr(priv->percpu_extras);
+
-+ switch (fd_format) {
-+ case dpaa2_fd_single:
++ xdp_prog = READ_ONCE(ch->xdp_prog);
++
++ if (fd_format == dpaa2_fd_single) {
++ if (xdp_prog) {
++ xdp.data = buf_data;
++ xdp.data_end = buf_data + dpaa2_fd_get_len(fd);
++ /* for now, we don't support changes in header size */
++ xdp.data_hard_start = buf_data;
++
++ /* update stats here, as we won't reach the code
++ * that does that for standard frames
++ */
++ percpu_stats->rx_packets++;
++ percpu_stats->rx_bytes += dpaa2_fd_get_len(fd);
++
++ xdp_act = bpf_prog_run_xdp(xdp_prog, &xdp);
++ switch (xdp_act) {
++ case XDP_PASS:
++ break;
++ default:
++ bpf_warn_invalid_xdp_action(xdp_act);
++ case XDP_ABORTED:
++ case XDP_DROP:
++ release_fd_buf(priv, ch, addr);
++ goto drop_cnt;
++ case XDP_TX:
++ if (dpaa2_eth_xdp_tx(priv, (struct dpaa2_fd *)fd, vaddr,
++ queue_id)) {
++ dma_unmap_single(dev, addr,
++ DPAA2_ETH_RX_BUF_SIZE,
++ DMA_BIDIRECTIONAL);
++ free_rx_fd(priv, fd, vaddr);
++ ch->buf_count--;
++ }
++ return;
++ }
++ }
++ dma_unmap_single(dev, addr, DPAA2_ETH_RX_BUF_SIZE,
++ DMA_BIDIRECTIONAL);
+ skb = build_linear_skb(priv, ch, fd, vaddr);
-+ break;
-+ case dpaa2_fd_sg:
++ } else if (fd_format == dpaa2_fd_sg) {
++ dma_unmap_single(dev, addr, DPAA2_ETH_RX_BUF_SIZE,
++ DMA_BIDIRECTIONAL);
+ skb = build_frag_skb(priv, ch, buf_data);
-+ put_page(virt_to_head_page(vaddr));
++ skb_free_frag(vaddr);
+ percpu_extras->rx_sg_frames++;
+ percpu_extras->rx_sg_bytes += dpaa2_fd_get_len(fd);
-+ break;
-+ default:
++ } else {
+ /* We don't support any other format */
-+ goto err_frame_format;
++ goto drop_cnt;
+ }
+
+ if (unlikely(!skb))
-+ goto err_build_skb;
++ goto drop_fd;
+
+ prefetch(skb->data);
+
+ /* Get the timestamp value */
+ if (priv->ts_rx_en) {
+ struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
-+ u64 *ns = (u64 *)dpaa2_eth_get_ts(vaddr);
++ u64 *ns = dpaa2_get_ts(vaddr, false);
+
+ *ns = DPAA2_PTP_NOMINAL_FREQ_PERIOD_NS * le64_to_cpup(ns);
+ memset(shhwtstamps, 0, sizeof(*shhwtstamps));
@@ -1531,9 +3122,9 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+
+ return;
+
-+err_build_skb:
++drop_fd:
+ free_rx_fd(priv, fd, vaddr);
-+err_frame_format:
++drop_cnt:
+ percpu_stats->rx_dropped++;
+}
+
@@ -1553,23 +3144,25 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ struct rtnl_link_stats64 *percpu_stats;
+ struct dpaa2_fas *fas;
+ u32 status = 0;
-+ bool check_fas_errors = false;
++ u32 fd_errors;
++ bool has_fas_errors = false;
+
-+ vaddr = dpaa2_eth_iova_to_virt(priv->iommu_domain, addr);
-+ dma_unmap_single(dev, addr, DPAA2_ETH_RX_BUF_SIZE, DMA_FROM_DEVICE);
++ vaddr = dpaa2_iova_to_virt(priv->iommu_domain, addr);
++ dma_unmap_single(dev, addr, DPAA2_ETH_RX_BUF_SIZE, DMA_BIDIRECTIONAL);
+
+ /* check frame errors in the FD field */
-+ if (fd->simple.ctrl & DPAA2_FD_RX_ERR_MASK) {
-+ check_fas_errors = !!(fd->simple.ctrl & FD_CTRL_FAERR) &&
-+ !!(dpaa2_fd_get_frc(fd) & DPAA2_FD_FRC_FASV);
++ fd_errors = dpaa2_fd_get_ctrl(fd) & DPAA2_FD_RX_ERR_MASK;
++ if (likely(fd_errors)) {
++ has_fas_errors = (fd_errors & FD_CTRL_FAERR) &&
++ !!(dpaa2_fd_get_frc(fd) & DPAA2_FD_FRC_FASV);
+ if (net_ratelimit())
-+ netdev_dbg(priv->net_dev, "Rx frame FD err: %x08\n",
-+ fd->simple.ctrl & DPAA2_FD_RX_ERR_MASK);
++ netdev_dbg(priv->net_dev, "RX frame FD err: %08x\n",
++ fd_errors);
+ }
+
+ /* check frame errors in the FAS field */
-+ if (check_fas_errors) {
-+ fas = dpaa2_eth_get_fas(vaddr);
++ if (has_fas_errors) {
++ fas = dpaa2_get_fas(vaddr, false);
+ status = le32_to_cpu(fas->status);
+ if (net_ratelimit())
+ netdev_dbg(priv->net_dev, "Rx frame FAS err: 0x%08x\n",
@@ -1579,6 +3172,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+
+ percpu_stats = this_cpu_ptr(priv->percpu_stats);
+ percpu_stats->rx_errors++;
++ ch->buf_count--;
+}
+#endif
+
@@ -1613,11 +3207,9 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ }
+
+ fd = dpaa2_dq_fd(dq);
-+
-+ /* prefetch the frame descriptor */
+ prefetch(fd);
+
-+ fq = (struct dpaa2_eth_fq *)dpaa2_dq_fqd_ctx(dq);
++ fq = (struct dpaa2_eth_fq *)(uintptr_t)dpaa2_dq_fqd_ctx(dq);
+ fq->consume(priv, ch, fd, &ch->napi, fq->flowid);
+ cleaned++;
+ } while (!is_last);
@@ -1643,18 +3235,21 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+static void enable_tx_tstamp(struct dpaa2_fd *fd, void *buf_start)
+{
+ struct dpaa2_faead *faead;
-+ u32 ctrl;
-+ u32 frc;
++ u32 ctrl, frc;
+
+ /* Mark the egress frame annotation area as valid */
+ frc = dpaa2_fd_get_frc(fd);
+ dpaa2_fd_set_frc(fd, frc | DPAA2_FD_FRC_FAEADV);
+
++ /* Set hardware annotation size */
++ ctrl = dpaa2_fd_get_ctrl(fd);
++ dpaa2_fd_set_ctrl(fd, ctrl | DPAA2_FD_CTRL_ASAL);
++
+ /* enable UPD (update prepanded data) bit in FAEAD field of
+ * hardware frame annotation area
+ */
+ ctrl = DPAA2_FAEAD_A2V | DPAA2_FAEAD_UPDV | DPAA2_FAEAD_UPD;
-+ faead = dpaa2_eth_get_faead(buf_start);
++ faead = dpaa2_get_faead(buf_start, true);
+ faead->ctrl = cpu_to_le32(ctrl);
+}
+
@@ -1673,7 +3268,6 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ struct scatterlist *scl, *crt_scl;
+ int num_sg;
+ int num_dma_bufs;
-+ struct dpaa2_fas *fas;
+ struct dpaa2_eth_swa *swa;
+
+ /* Create and map scatterlist.
@@ -1690,7 +3284,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+
+ sg_init_table(scl, nr_frags + 1);
+ num_sg = skb_to_sgvec(skb, scl, 0, skb->len);
-+ num_dma_bufs = dma_map_sg(dev, scl, num_sg, DMA_TO_DEVICE);
++ num_dma_bufs = dma_map_sg(dev, scl, num_sg, DMA_BIDIRECTIONAL);
+ if (unlikely(!num_dma_bufs)) {
+ err = -ENOMEM;
+ goto dma_map_sg_failed;
@@ -1698,21 +3292,14 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+
+ /* Prepare the HW SGT structure */
+ sgt_buf_size = priv->tx_data_offset +
-+ sizeof(struct dpaa2_sg_entry) * (1 + num_dma_bufs);
-+ sgt_buf = kzalloc(sgt_buf_size + DPAA2_ETH_TX_BUF_ALIGN, GFP_ATOMIC);
++ sizeof(struct dpaa2_sg_entry) * num_dma_bufs;
++ sgt_buf = netdev_alloc_frag(sgt_buf_size + DPAA2_ETH_TX_BUF_ALIGN);
+ if (unlikely(!sgt_buf)) {
+ err = -ENOMEM;
+ goto sgt_buf_alloc_failed;
+ }
+ sgt_buf = PTR_ALIGN(sgt_buf, DPAA2_ETH_TX_BUF_ALIGN);
-+
-+ /* PTA from egress side is passed as is to the confirmation side so
-+ * we need to clear some fields here in order to find consistent values
-+ * on TX confirmation. We are clearing FAS (Frame Annotation Status)
-+ * field from the hardware annotation area
-+ */
-+ fas = dpaa2_eth_get_fas(sgt_buf);
-+ memset(fas, 0, DPAA2_FAS_SIZE);
++ memset(sgt_buf, 0, sgt_buf_size);
+
+ sgt = (struct dpaa2_sg_entry *)(sgt_buf + priv->tx_data_offset);
+
@@ -1735,10 +3322,11 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ * all of them on Tx Conf.
+ */
+ swa = (struct dpaa2_eth_swa *)sgt_buf;
-+ swa->skb = skb;
-+ swa->scl = scl;
-+ swa->num_sg = num_sg;
-+ swa->num_dma_bufs = num_dma_bufs;
++ swa->type = DPAA2_ETH_SWA_SG;
++ swa->sg.skb = skb;
++ swa->sg.scl = scl;
++ swa->sg.num_sg = num_sg;
++ swa->sg.sgt_size = sgt_buf_size;
+
+ /* Separately map the SGT buffer */
+ addr = dma_map_single(dev, sgt_buf, sgt_buf_size, DMA_BIDIRECTIONAL);
@@ -1750,8 +3338,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ dpaa2_fd_set_format(fd, dpaa2_fd_sg);
+ dpaa2_fd_set_addr(fd, addr);
+ dpaa2_fd_set_len(fd, skb->len);
-+
-+ fd->simple.ctrl = DPAA2_FD_CTRL_ASAL | FD_CTRL_PTA;
++ dpaa2_fd_set_ctrl(fd, FD_CTRL_PTA);
+
+ if (priv->ts_tx_en && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)
+ enable_tx_tstamp(fd, sgt_buf);
@@ -1759,9 +3346,9 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ return 0;
+
+dma_map_single_failed:
-+ kfree(sgt_buf);
++ skb_free_frag(sgt_buf);
+sgt_buf_alloc_failed:
-+ dma_unmap_sg(dev, scl, num_sg, DMA_TO_DEVICE);
++ dma_unmap_sg(dev, scl, num_sg, DMA_BIDIRECTIONAL);
+dma_map_sg_failed:
+ kfree(scl);
+ return err;
@@ -1773,29 +3360,27 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ struct dpaa2_fd *fd)
+{
+ struct device *dev = priv->net_dev->dev.parent;
-+ u8 *buffer_start;
-+ struct sk_buff **skbh;
++ u8 *buffer_start, *aligned_start;
++ struct dpaa2_eth_swa *swa;
+ dma_addr_t addr;
-+ struct dpaa2_fas *fas;
+
-+ buffer_start = PTR_ALIGN(skb->data - priv->tx_data_offset -
-+ DPAA2_ETH_TX_BUF_ALIGN,
-+ DPAA2_ETH_TX_BUF_ALIGN);
++ buffer_start = skb->data - dpaa2_eth_needed_headroom(priv, skb);
+
-+ /* PTA from egress side is passed as is to the confirmation side so
-+ * we need to clear some fields here in order to find consistent values
-+ * on TX confirmation. We are clearing FAS (Frame Annotation Status)
-+ * field from the hardware annotation area
++ /* If there's enough room to align the FD address, do it.
++ * It will help hardware optimize accesses.
+ */
-+ fas = dpaa2_eth_get_fas(buffer_start);
-+ memset(fas, 0, DPAA2_FAS_SIZE);
++ aligned_start = PTR_ALIGN(buffer_start - DPAA2_ETH_TX_BUF_ALIGN,
++ DPAA2_ETH_TX_BUF_ALIGN);
++ if (aligned_start >= skb->head)
++ buffer_start = aligned_start;
+
+ /* Store a backpointer to the skb at the beginning of the buffer
+ * (in the private data area) such that we can release it
+ * on Tx confirm
+ */
-+ skbh = (struct sk_buff **)buffer_start;
-+ *skbh = skb;
++ swa = (struct dpaa2_eth_swa *)buffer_start;
++ swa->type = DPAA2_ETH_SWA_SINGLE;
++ swa->single.skb = skb;
+
+ addr = dma_map_single(dev, buffer_start,
+ skb_tail_pointer(skb) - buffer_start,
@@ -1807,8 +3392,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ dpaa2_fd_set_offset(fd, (u16)(skb->data - buffer_start));
+ dpaa2_fd_set_len(fd, skb->len);
+ dpaa2_fd_set_format(fd, dpaa2_fd_single);
-+
-+ fd->simple.ctrl = DPAA2_FD_CTRL_ASAL | FD_CTRL_PTA;
++ dpaa2_fd_set_ctrl(fd, FD_CTRL_PTA);
+
+ if (priv->ts_tx_en && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)
+ enable_tx_tstamp(fd, buffer_start);
@@ -1825,59 +3409,41 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ * Optionally, return the frame annotation status word (FAS), which needs
+ * to be checked if we're on the confirmation path.
+ */
-+static void free_tx_fd(const struct dpaa2_eth_priv *priv,
++static void free_tx_fd(struct dpaa2_eth_priv *priv,
+ const struct dpaa2_fd *fd,
-+ u32 *status, bool in_napi)
++ bool in_napi)
+{
+ struct device *dev = priv->net_dev->dev.parent;
+ dma_addr_t fd_addr;
-+ struct sk_buff **skbh, *skb;
++ struct sk_buff *skb = NULL;
+ unsigned char *buffer_start;
-+ int unmap_size;
-+ struct scatterlist *scl;
-+ int num_sg, num_dma_bufs;
+ struct dpaa2_eth_swa *swa;
+ u8 fd_format = dpaa2_fd_get_format(fd);
-+ struct dpaa2_fas *fas;
+
+ fd_addr = dpaa2_fd_get_addr(fd);
-+ skbh = dpaa2_eth_iova_to_virt(priv->iommu_domain, fd_addr);
++ buffer_start = dpaa2_iova_to_virt(priv->iommu_domain, fd_addr);
++ swa = (struct dpaa2_eth_swa *)buffer_start;
+
-+ /* HWA - FAS, timestamp (for Tx confirmation frames) */
-+ fas = dpaa2_eth_get_fas(skbh);
-+ prefetch(fas);
-+
-+ switch (fd_format) {
-+ case dpaa2_fd_single:
-+ skb = *skbh;
-+ buffer_start = (unsigned char *)skbh;
++ if (fd_format == dpaa2_fd_single) {
++ skb = swa->single.skb;
+ /* Accessing the skb buffer is safe before dma unmap, because
+ * we didn't map the actual skb shell.
+ */
+ dma_unmap_single(dev, fd_addr,
+ skb_tail_pointer(skb) - buffer_start,
+ DMA_BIDIRECTIONAL);
-+ break;
-+ case dpaa2_fd_sg:
-+ swa = (struct dpaa2_eth_swa *)skbh;
-+ skb = swa->skb;
-+ scl = swa->scl;
-+ num_sg = swa->num_sg;
-+ num_dma_bufs = swa->num_dma_bufs;
++ } else if (fd_format == dpaa2_fd_sg) {
++ skb = swa->sg.skb;
+
+ /* Unmap the scatterlist */
-+ dma_unmap_sg(dev, scl, num_sg, DMA_TO_DEVICE);
-+ kfree(scl);
++ dma_unmap_sg(dev, swa->sg.scl, swa->sg.num_sg, DMA_BIDIRECTIONAL);
++ kfree(swa->sg.scl);
+
+ /* Unmap the SGT buffer */
-+ unmap_size = priv->tx_data_offset +
-+ sizeof(struct dpaa2_sg_entry) * (1 + num_dma_bufs);
-+ dma_unmap_single(dev, fd_addr, unmap_size, DMA_BIDIRECTIONAL);
-+ break;
-+ default:
-+ /* Unsupported format, mark it as errored and give up */
-+ if (status)
-+ *status = ~0;
++ dma_unmap_single(dev, fd_addr, swa->sg.sgt_size,
++ DMA_BIDIRECTIONAL);
++ } else {
++ netdev_dbg(priv->net_dev, "Invalid FD format\n");
+ return;
+ }
+
@@ -1888,43 +3454,38 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+
+ memset(&shhwtstamps, 0, sizeof(shhwtstamps));
+
-+ ns = (u64 *)dpaa2_eth_get_ts(skbh);
++ ns = dpaa2_get_ts(buffer_start, true);
+ *ns = DPAA2_PTP_NOMINAL_FREQ_PERIOD_NS * le64_to_cpup(ns);
+ shhwtstamps.hwtstamp = ns_to_ktime(*ns);
+ skb_tstamp_tx(skb, &shhwtstamps);
+ }
+
-+ /* Read the status from the Frame Annotation after we unmap the first
-+ * buffer but before we free it. The caller function is responsible
-+ * for checking the status value.
-+ */
-+ if (status)
-+ *status = le32_to_cpu(fas->status);
-+
-+ /* Free SGT buffer kmalloc'ed on tx */
++ /* Free SGT buffer allocated on tx */
+ if (fd_format != dpaa2_fd_single)
-+ kfree(skbh);
++ skb_free_frag(buffer_start);
+
+ /* Move on with skb release */
+ napi_consume_skb(skb, in_napi);
+}
+
-+static int dpaa2_eth_tx(struct sk_buff *skb, struct net_device *net_dev)
++static netdev_tx_t dpaa2_eth_tx(struct sk_buff *skb, struct net_device *net_dev)
+{
+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+ struct device *dev = net_dev->dev.parent;
+ struct dpaa2_fd fd;
+ struct rtnl_link_stats64 *percpu_stats;
+ struct dpaa2_eth_drv_stats *percpu_extras;
++ unsigned int needed_headroom;
+ struct dpaa2_eth_fq *fq;
-+ u16 queue_mapping = skb_get_queue_mapping(skb);
-+ int err, i;
++ u16 queue_mapping;
++ int err, i, ch_id = 0, qpri = 0;
+
-+ /* If we're congested, stop this tx queue; transmission of the
-+ * current skb happens regardless of congestion state
-+ */
++ queue_mapping = skb_get_queue_mapping(skb);
+ fq = &priv->fq[queue_mapping];
+
++ /* If we're congested, stop this tx queue; transmission of
++ * the current skb happens regardless of congestion state
++ */
+ dma_sync_single_for_cpu(dev, priv->cscn_dma,
+ DPAA2_CSCN_SIZE, DMA_FROM_DEVICE);
+ if (unlikely(dpaa2_cscn_state_congested(priv->cscn_mem))) {
@@ -1935,14 +3496,19 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ percpu_stats = this_cpu_ptr(priv->percpu_stats);
+ percpu_extras = this_cpu_ptr(priv->percpu_extras);
+
-+ if (unlikely(skb_headroom(skb) < DPAA2_ETH_NEEDED_HEADROOM(priv))) {
++ /* For non-linear skb we don't need a minimum headroom */
++ needed_headroom = dpaa2_eth_needed_headroom(priv, skb);
++ if (skb_headroom(skb) < needed_headroom) {
+ struct sk_buff *ns;
+
-+ ns = skb_realloc_headroom(skb, DPAA2_ETH_NEEDED_HEADROOM(priv));
++ ns = skb_realloc_headroom(skb, needed_headroom);
+ if (unlikely(!ns)) {
+ percpu_stats->tx_dropped++;
+ goto err_alloc_headroom;
+ }
++ percpu_extras->tx_reallocs++;
++ if (skb->sk)
++ skb_set_owner_w(ns, skb->sk);
+ dev_kfree_skb(skb);
+ skb = ns;
+ }
@@ -1976,13 +3542,16 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ /* Tracing point */
+ trace_dpaa2_tx_fd(net_dev, &fd);
+
++ if (dpaa2_eth_ceetm_is_enabled(priv)) {
++ err = dpaa2_ceetm_classify(skb, net_dev->qdisc, &ch_id, &qpri);
++ if (err)
++ goto err_ceetm_classify;
++ }
++
+ for (i = 0; i < DPAA2_ETH_ENQUEUE_RETRIES; i++) {
-+ err = dpaa2_io_service_enqueue_qd(NULL, priv->tx_qdid, 0,
++ err = dpaa2_io_service_enqueue_qd(fq->channel->dpio,
++ priv->tx_qdid, qpri,
+ fq->tx_qdbin, &fd);
-+ /* TODO: This doesn't work. Check on simulator.
-+ * err = dpaa2_io_service_enqueue_fq(NULL,
-+ * priv->fq[0].fqid_tx, &fd);
-+ */
+ if (err != -EBUSY)
+ break;
+ }
@@ -1990,7 +3559,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ if (unlikely(err < 0)) {
+ percpu_stats->tx_errors++;
+ /* Clean up everything, including freeing the skb */
-+ free_tx_fd(priv, &fd, NULL, false);
++ free_tx_fd(priv, &fd, false);
+ } else {
+ percpu_stats->tx_packets++;
+ percpu_stats->tx_bytes += dpaa2_fd_get_len(&fd);
@@ -1998,6 +3567,8 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+
+ return NETDEV_TX_OK;
+
++err_ceetm_classify:
++ free_tx_fd(priv, &fd, false);
+err_build_fd:
+err_alloc_headroom:
+ dev_kfree_skb(skb);
@@ -2015,9 +3586,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ struct device *dev = priv->net_dev->dev.parent;
+ struct rtnl_link_stats64 *percpu_stats;
+ struct dpaa2_eth_drv_stats *percpu_extras;
-+ u32 status = 0;
-+ bool errors = !!(fd->simple.ctrl & DPAA2_FD_TX_ERR_MASK);
-+ bool check_fas_errors = false;
++ u32 fd_errors;
+
+ /* Tracing point */
+ trace_dpaa2_tx_conf_fd(priv->net_dev, fd);
@@ -2034,28 +3603,20 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ netif_tx_wake_all_queues(priv->net_dev);
+ }
+
-+ /* check frame errors in the FD field */
-+ if (unlikely(errors)) {
-+ check_fas_errors = !!(fd->simple.ctrl & FD_CTRL_FAERR) &&
-+ !!(dpaa2_fd_get_frc(fd) & DPAA2_FD_FRC_FASV);
-+ if (net_ratelimit())
-+ netdev_dbg(priv->net_dev, "Tx frame FD err: %x08\n",
-+ fd->simple.ctrl & DPAA2_FD_TX_ERR_MASK);
-+ }
++ /* Check frame errors in the FD field */
++ fd_errors = dpaa2_fd_get_ctrl(fd) & DPAA2_FD_TX_ERR_MASK;
++ free_tx_fd(priv, fd, true);
+
-+ free_tx_fd(priv, fd, check_fas_errors ? &status : NULL, true);
-+
-+ /* if there are no errors, we're done */
-+ if (likely(!errors))
++ if (likely(!fd_errors))
+ return;
+
++ if (net_ratelimit())
++ netdev_dbg(priv->net_dev, "TX frame FD error: 0x%08x\n",
++ fd_errors);
++
+ percpu_stats = this_cpu_ptr(priv->percpu_stats);
+ /* Tx-conf logically pertains to the egress path. */
+ percpu_stats->tx_errors++;
-+
-+ if (net_ratelimit())
-+ netdev_dbg(priv->net_dev, "Tx frame FAS err: %x08\n",
-+ status & DPAA2_FAS_TX_ERR_MASK);
+}
+
+static int set_rx_csum(struct dpaa2_eth_priv *priv, bool enable)
@@ -2066,7 +3627,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ DPNI_OFF_RX_L3_CSUM, enable);
+ if (err) {
+ netdev_err(priv->net_dev,
-+ "dpni_set_offload() DPNI_OFF_RX_L3_CSUM failed\n");
++ "dpni_set_offload(RX_L3_CSUM) failed\n");
+ return err;
+ }
+
@@ -2074,7 +3635,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ DPNI_OFF_RX_L4_CSUM, enable);
+ if (err) {
+ netdev_err(priv->net_dev,
-+ "dpni_set_offload() DPNI_OFF_RX_L4_CSUM failed\n");
++ "dpni_set_offload(RX_L4_CSUM) failed\n");
+ return err;
+ }
+
@@ -2088,16 +3649,14 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ err = dpni_set_offload(priv->mc_io, 0, priv->mc_token,
+ DPNI_OFF_TX_L3_CSUM, enable);
+ if (err) {
-+ netdev_err(priv->net_dev,
-+ "dpni_set_offload() DPNI_OFF_RX_L3_CSUM failed\n");
++ netdev_err(priv->net_dev, "dpni_set_offload(TX_L3_CSUM) failed\n");
+ return err;
+ }
+
+ err = dpni_set_offload(priv->mc_io, 0, priv->mc_token,
+ DPNI_OFF_TX_L4_CSUM, enable);
+ if (err) {
-+ netdev_err(priv->net_dev,
-+ "dpni_set_offload() DPNI_OFF_RX_L4_CSUM failed\n");
++ netdev_err(priv->net_dev, "dpni_set_offload(TX_L4_CSUM) failed\n");
+ return err;
+ }
+
@@ -2107,7 +3666,8 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+/* Perform a single release command to add buffers
+ * to the specified buffer pool
+ */
-+static int add_bufs(struct dpaa2_eth_priv *priv, u16 bpid)
++static int add_bufs(struct dpaa2_eth_priv *priv,
++ struct dpaa2_eth_channel *ch, u16 bpid)
+{
+ struct device *dev = priv->net_dev->dev.parent;
+ u64 buf_array[DPAA2_ETH_BUFS_PER_CMD];
@@ -2117,16 +3677,16 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+
+ for (i = 0; i < DPAA2_ETH_BUFS_PER_CMD; i++) {
+ /* Allocate buffer visible to WRIOP + skb shared info +
-+ * alignment padding.
++ * alignment padding
+ */
-+ buf = napi_alloc_frag(DPAA2_ETH_BUF_RAW_SIZE(priv));
++ buf = napi_alloc_frag(dpaa2_eth_buf_raw_size(priv));
+ if (unlikely(!buf))
+ goto err_alloc;
+
+ buf = PTR_ALIGN(buf, priv->rx_buf_align);
+
+ addr = dma_map_single(dev, buf, DPAA2_ETH_RX_BUF_SIZE,
-+ DMA_FROM_DEVICE);
++ DMA_BIDIRECTIONAL);
+ if (unlikely(dma_mapping_error(dev, addr)))
+ goto err_map;
+
@@ -2134,21 +3694,21 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+
+ /* tracing point */
+ trace_dpaa2_eth_buf_seed(priv->net_dev,
-+ buf, DPAA2_ETH_BUF_RAW_SIZE(priv),
++ buf, dpaa2_eth_buf_raw_size(priv),
+ addr, DPAA2_ETH_RX_BUF_SIZE,
+ bpid);
+ }
+
+release_bufs:
+ /* In case the portal is busy, retry until successful */
-+ while ((err = dpaa2_io_service_release(NULL, bpid,
++ while ((err = dpaa2_io_service_release(ch->dpio, bpid,
+ buf_array, i)) == -EBUSY)
+ cpu_relax();
+
+ /* If release command failed, clean up and bail out; not much
+ * else we can do about it
+ */
-+ if (unlikely(err)) {
++ if (err) {
+ free_bufs(priv, buf_array, i);
+ return 0;
+ }
@@ -2156,7 +3716,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ return i;
+
+err_map:
-+ put_page(virt_to_head_page(buf));
++ skb_free_frag(buf);
+err_alloc:
+ /* If we managed to allocate at least some buffers, release them */
+ if (i)
@@ -2179,9 +3739,9 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ preempt_disable();
+ for (j = 0; j < priv->num_channels; j++) {
+ priv->channel[j]->buf_count = 0;
-+ for (i = 0; i < priv->num_bufs;
++ for (i = 0; i < priv->max_bufs_per_ch;
+ i += DPAA2_ETH_BUFS_PER_CMD) {
-+ new_count = add_bufs(priv, bpid);
++ new_count = add_bufs(priv, priv->channel[j], bpid);
+ priv->channel[j]->buf_count += new_count;
+
+ if (new_count < DPAA2_ETH_BUFS_PER_CMD) {
@@ -2236,15 +3796,15 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ return 0;
+
+ do {
-+ new_count = add_bufs(priv, bpid);
++ new_count = add_bufs(priv, ch, bpid);
+ if (unlikely(!new_count)) {
+ /* Out of memory; abort for now, we'll try later on */
+ break;
+ }
+ ch->buf_count += new_count;
-+ } while (ch->buf_count < priv->num_bufs);
++ } while (ch->buf_count < priv->max_bufs_per_ch);
+
-+ if (unlikely(ch->buf_count < priv->num_bufs))
++ if (unlikely(ch->buf_count < priv->max_bufs_per_ch))
+ return -ENOMEM;
+
+ return 0;
@@ -2257,7 +3817,8 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+
+ /* Retry while portal is busy */
+ do {
-+ err = dpaa2_io_service_pull_channel(NULL, ch->ch_id, ch->store);
++ err = dpaa2_io_service_pull_channel(ch->dpio, ch->ch_id,
++ ch->store);
+ dequeues++;
+ cpu_relax();
+ } while (err == -EBUSY);
@@ -2278,7 +3839,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+static int dpaa2_eth_poll(struct napi_struct *napi, int budget)
+{
+ struct dpaa2_eth_channel *ch;
-+ int rx_cleaned = 0, tx_conf_cleaned = 0;
++ int rx_cleaned = 0, tx_conf_cleaned = 0;
+ bool store_cleaned;
+ struct dpaa2_eth_priv *priv;
+ int err;
@@ -2306,13 +3867,15 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ } while (store_cleaned);
+
+ /* We didn't consume the entire budget, finish napi and
-+ * re-enable data availability notifications.
++ * re-enable data availability notifications
+ */
+ napi_complete(napi);
+ do {
-+ err = dpaa2_io_service_rearm(NULL, &ch->nctx);
++ err = dpaa2_io_service_rearm(ch->dpio, &ch->nctx);
+ cpu_relax();
+ } while (err == -EBUSY);
++ WARN_ONCE(err, "CDAN notifications rearm failed on core %d",
++ ch->nctx.desired_cpu);
+
+ return max(rx_cleaned, 1);
+}
@@ -2364,7 +3927,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ netif_carrier_off(priv->net_dev);
+ }
+
-+ netdev_info(priv->net_dev, "Link Event: state %s",
++ netdev_info(priv->net_dev, "Link Event: state %s\n",
+ state.up ? "up" : "down");
+
+ return 0;
@@ -2397,10 +3960,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ priv->dpbp_dev->obj_desc.id, priv->bpid);
+ }
+
-+ if (priv->tx_pause_frames)
-+ priv->refill_thresh = priv->num_bufs - DPAA2_ETH_BUFS_PER_CMD;
-+ else
-+ priv->refill_thresh = DPAA2_ETH_REFILL_THRESH_TD;
++ priv->refill_thresh = DPAA2_ETH_REFILL_THRESH(priv);
+
+ err = dpni_enable(priv->mc_io, 0, priv->mc_token);
+ if (err < 0) {
@@ -2431,6 +3991,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+ int dpni_enabled;
+ int retries = 10, i;
++ int err = 0;
+
+ netif_tx_stop_all_queues(net_dev);
+ netif_carrier_off(net_dev);
@@ -2442,20 +4003,21 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ dpni_disable(priv->mc_io, 0, priv->mc_token);
+ dpni_is_enabled(priv->mc_io, 0, priv->mc_token, &dpni_enabled);
+ if (dpni_enabled)
-+ /* Allow the MC some slack */
++ /* Allow the hardware some slack */
+ msleep(100);
+ } while (dpni_enabled && --retries);
+ if (!retries) {
+ netdev_warn(net_dev, "Retry count exceeded disabling DPNI\n");
-+ /* Must go on and disable NAPI nonetheless, so we don't crash at
-+ * the next "ifconfig up"
++ /* Must go on and finish processing pending frames, so we don't
++ * crash at the next "ifconfig up"
+ */
++ err = -ETIMEDOUT;
+ }
+
+ priv->refill_thresh = 0;
+
+ /* Wait for all running napi poll routines to finish, so that no
-+ * new refill operations are started.
++ * new refill operations are started
+ */
+ for (i = 0; i < priv->num_channels; i++)
+ napi_synchronize(&priv->channel[i]->napi);
@@ -2463,7 +4025,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ /* Empty the buffer pool */
+ drain_pool(priv);
+
-+ return 0;
++ return err;
+}
+
+static int dpaa2_eth_init(struct net_device *net_dev)
@@ -2538,25 +4100,6 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ return stats;
+}
+
-+static int dpaa2_eth_change_mtu(struct net_device *net_dev, int mtu)
-+{
-+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
-+ int err;
-+
-+ /* Set the maximum Rx frame length to match the transmit side;
-+ * account for L2 headers when computing the MFL
-+ */
-+ err = dpni_set_max_frame_length(priv->mc_io, 0, priv->mc_token,
-+ (u16)DPAA2_ETH_L2_MAX_FRM(mtu));
-+ if (err) {
-+ netdev_err(net_dev, "dpni_set_max_frame_length() failed\n");
-+ return err;
-+ }
-+
-+ net_dev->mtu = mtu;
-+ return 0;
-+}
-+
+/* Copy mac unicast addresses from @net_dev to @priv.
+ * Its sole purpose is to make dpaa2_eth_set_rx_mode() more readable.
+ */
@@ -2621,7 +4164,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ }
+ if (mc_count + uc_count > max_mac) {
+ netdev_info(net_dev,
-+ "Unicast + Multicast addr count reached %d, max allowed is %d; forcing promisc\n",
++ "Unicast + multicast addr count reached %d, max allowed is %d; forcing promisc\n",
+ uc_count + mc_count, max_mac);
+ goto force_mc_promisc;
+ }
@@ -2757,6 +4300,134 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ return -EINVAL;
+}
+
++static int set_buffer_layout(struct dpaa2_eth_priv *priv)
++{
++ struct device *dev = priv->net_dev->dev.parent;
++ struct dpni_buffer_layout buf_layout = {0};
++ int err;
++
++ /* We need to check for WRIOP version 1.0.0, but depending on the MC
++ * version, this number is not always provided correctly on rev1.
++ * We need to check for both alternatives in this situation.
++ */
++ if (priv->dpni_attrs.wriop_version == DPAA2_WRIOP_VERSION(0, 0, 0) ||
++ priv->dpni_attrs.wriop_version == DPAA2_WRIOP_VERSION(1, 0, 0))
++ priv->rx_buf_align = DPAA2_ETH_RX_BUF_ALIGN_REV1;
++ else
++ priv->rx_buf_align = DPAA2_ETH_RX_BUF_ALIGN;
++
++ /* tx buffer */
++ buf_layout.pass_timestamp = true;
++ buf_layout.private_data_size = DPAA2_ETH_SWA_SIZE;
++ buf_layout.options = DPNI_BUF_LAYOUT_OPT_TIMESTAMP |
++ DPNI_BUF_LAYOUT_OPT_PRIVATE_DATA_SIZE;
++ err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token,
++ DPNI_QUEUE_TX, &buf_layout);
++ if (err) {
++ dev_err(dev, "dpni_set_buffer_layout(TX) failed\n");
++ return err;
++ }
++
++ /* tx-confirm buffer */
++ buf_layout.options = DPNI_BUF_LAYOUT_OPT_TIMESTAMP;
++ err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token,
++ DPNI_QUEUE_TX_CONFIRM, &buf_layout);
++ if (err) {
++ dev_err(dev, "dpni_set_buffer_layout(TX_CONF) failed\n");
++ return err;
++ }
++
++ /* Now that we've set our tx buffer layout, retrieve the minimum
++ * required tx data offset.
++ */
++ err = dpni_get_tx_data_offset(priv->mc_io, 0, priv->mc_token,
++ &priv->tx_data_offset);
++ if (err) {
++ dev_err(dev, "dpni_get_tx_data_offset() failed\n");
++ return err;
++ }
++
++ if ((priv->tx_data_offset % 64) != 0)
++ dev_warn(dev, "Tx data offset (%d) not a multiple of 64B\n",
++ priv->tx_data_offset);
++
++ /* rx buffer */
++ buf_layout.pass_frame_status = true;
++ buf_layout.pass_parser_result = true;
++ buf_layout.data_align = priv->rx_buf_align;
++ buf_layout.private_data_size = 0;
++ buf_layout.data_head_room = dpaa2_eth_rx_headroom(priv);
++ buf_layout.options = DPNI_BUF_LAYOUT_OPT_PARSER_RESULT |
++ DPNI_BUF_LAYOUT_OPT_FRAME_STATUS |
++ DPNI_BUF_LAYOUT_OPT_DATA_ALIGN |
++ DPNI_BUF_LAYOUT_OPT_DATA_HEAD_ROOM |
++ DPNI_BUF_LAYOUT_OPT_TIMESTAMP;
++ err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token,
++ DPNI_QUEUE_RX, &buf_layout);
++ if (err) {
++ dev_err(dev, "dpni_set_buffer_layout(RX) failed\n");
++ return err;
++ }
++
++ return 0;
++}
++
++static int dpaa2_eth_set_xdp(struct net_device *net_dev, struct bpf_prog *prog)
++{
++ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
++ struct dpaa2_eth_channel *ch;
++ struct bpf_prog *old_prog;
++ int i, err;
++
++ /* No support for SG frames */
++ if (DPAA2_ETH_L2_MAX_FRM(net_dev->mtu) > DPAA2_ETH_RX_BUF_SIZE)
++ return -EINVAL;
++
++ if (netif_running(net_dev)) {
++ err = dpaa2_eth_stop(net_dev);
++ if (err)
++ return err;
++ }
++
++ if (prog) {
++ prog = bpf_prog_add(prog, priv->num_channels - 1);
++ if (IS_ERR(prog))
++ return PTR_ERR(prog);
++ }
++
++ priv->has_xdp_prog = !!prog;
++
++ for (i = 0; i < priv->num_channels; i++) {
++ ch = priv->channel[i];
++ old_prog = xchg(&ch->xdp_prog, prog);
++ if (old_prog)
++ bpf_prog_put(old_prog);
++ }
++
++ if (netif_running(net_dev)) {
++ err = dpaa2_eth_open(net_dev);
++ if (err)
++ return err;
++ }
++
++ return 0;
++}
++
++static int dpaa2_eth_xdp(struct net_device *dev, struct netdev_xdp *xdp)
++{
++ struct dpaa2_eth_priv *priv = netdev_priv(dev);
++
++ switch (xdp->command) {
++ case XDP_SETUP_PROG:
++ return dpaa2_eth_set_xdp(dev, xdp->prog);
++ case XDP_QUERY_PROG:
++ xdp->prog_attached = priv->has_xdp_prog;
++ return 0;
++ default:
++ return -EINVAL;
++ }
++}
++
+static const struct net_device_ops dpaa2_eth_ops = {
+ .ndo_open = dpaa2_eth_open,
+ .ndo_start_xmit = dpaa2_eth_tx,
@@ -2764,10 +4435,10 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ .ndo_init = dpaa2_eth_init,
+ .ndo_set_mac_address = dpaa2_eth_set_addr,
+ .ndo_get_stats64 = dpaa2_eth_get_stats,
-+ .ndo_change_mtu = dpaa2_eth_change_mtu,
+ .ndo_set_rx_mode = dpaa2_eth_set_rx_mode,
+ .ndo_set_features = dpaa2_eth_set_features,
+ .ndo_do_ioctl = dpaa2_eth_ioctl,
++ .ndo_xdp = dpaa2_eth_xdp,
+};
+
+static void cdan_cb(struct dpaa2_io_notification_ctx *ctx)
@@ -2800,34 +4471,32 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ err = dpcon_open(priv->mc_io, 0, dpcon->obj_desc.id, &dpcon->mc_handle);
+ if (err) {
+ dev_err(dev, "dpcon_open() failed\n");
-+ goto err_open;
++ goto free;
+ }
+
+ err = dpcon_reset(priv->mc_io, 0, dpcon->mc_handle);
+ if (err) {
+ dev_err(dev, "dpcon_reset() failed\n");
-+ goto err_reset;
++ goto close;
+ }
+
+ err = dpcon_get_attributes(priv->mc_io, 0, dpcon->mc_handle, &attrs);
+ if (err) {
+ dev_err(dev, "dpcon_get_attributes() failed\n");
-+ goto err_get_attr;
++ goto close;
+ }
+
+ err = dpcon_enable(priv->mc_io, 0, dpcon->mc_handle);
+ if (err) {
+ dev_err(dev, "dpcon_enable() failed\n");
-+ goto err_enable;
++ goto close;
+ }
+
+ return dpcon;
+
-+err_enable:
-+err_get_attr:
-+err_reset:
++close:
+ dpcon_close(priv->mc_io, 0, dpcon->mc_handle);
-+err_open:
++free:
+ fsl_mc_object_free(dpcon);
+
+ return NULL;
@@ -2880,7 +4549,14 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+static void free_channel(struct dpaa2_eth_priv *priv,
+ struct dpaa2_eth_channel *channel)
+{
++ struct bpf_prog *prog;
++
+ free_dpcon(priv, channel->dpcon);
++
++ prog = READ_ONCE(channel->xdp_prog);
++ if (prog)
++ bpf_prog_put(prog);
++
+ kfree(channel);
+}
+
@@ -2911,6 +4587,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ if (!channel) {
+ dev_info(dev,
+ "No affine channel for cpu %d and above\n", i);
++ err = -ENODEV;
+ goto err_alloc_ch;
+ }
+
@@ -2923,12 +4600,16 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ nctx->desired_cpu = i;
+
+ /* Register the new context */
-+ err = dpaa2_io_service_register(NULL, nctx);
++ channel->dpio = dpaa2_io_service_select(i);
++ err = dpaa2_io_service_register(channel->dpio, nctx);
+ if (err) {
+ dev_dbg(dev, "No affine DPIO for cpu %d\n", i);
+ /* If no affine DPIO for this core, there's probably
-+ * none available for next cores either.
++ * none available for next cores either. Signal we want
++ * to retry later, in case the DPIO devices weren't
++ * probed yet.
+ */
++ err = -EPROBE_DEFER;
+ goto err_service_reg;
+ }
+
@@ -2957,23 +4638,17 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ break;
+ }
+
-+ /* Tx confirmation queues can only be serviced by cpus
-+ * with an affine DPIO/channel
-+ */
-+ cpumask_copy(&priv->txconf_cpumask, &priv->dpio_cpumask);
-+
+ return 0;
+
+err_set_cdan:
-+ dpaa2_io_service_deregister(NULL, nctx);
++ dpaa2_io_service_deregister(channel->dpio, nctx);
+err_service_reg:
+ free_channel(priv, channel);
+err_alloc_ch:
+ if (cpumask_empty(&priv->dpio_cpumask)) {
-+ dev_dbg(dev, "No cpu with an affine DPIO/DPCON\n");
-+ return -ENODEV;
++ dev_err(dev, "No cpu with an affine DPIO/DPCON\n");
++ return err;
+ }
-+ cpumask_copy(&priv->txconf_cpumask, &priv->dpio_cpumask);
+
+ dev_info(dev, "Cores %*pbl available for processing ingress traffic\n",
+ cpumask_pr_args(&priv->dpio_cpumask));
@@ -2989,7 +4664,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ /* deregister CDAN notifications and free channels */
+ for (i = 0; i < priv->num_channels; i++) {
+ ch = priv->channel[i];
-+ dpaa2_io_service_deregister(NULL, &ch->nctx);
++ dpaa2_io_service_deregister(ch->dpio, &ch->nctx);
+ free_channel(priv, ch);
+ }
+}
@@ -3015,7 +4690,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+static void set_fq_affinity(struct dpaa2_eth_priv *priv)
+{
+ struct device *dev = priv->net_dev->dev.parent;
-+ struct cpumask xps_mask = CPU_MASK_NONE;
++ struct cpumask xps_mask;
+ struct dpaa2_eth_fq *fq;
+ int rx_cpu, txc_cpu;
+ int i, err;
@@ -3024,8 +4699,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ * This may well change at runtime, either through irqbalance or
+ * through direct user intervention.
+ */
-+ rx_cpu = cpumask_first(&priv->dpio_cpumask);
-+ txc_cpu = cpumask_first(&priv->txconf_cpumask);
++ rx_cpu = txc_cpu = cpumask_first(&priv->dpio_cpumask);
+
+ for (i = 0; i < priv->num_fqs; i++) {
+ fq = &priv->fq[i];
@@ -3040,18 +4714,19 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ case DPAA2_TX_CONF_FQ:
+ fq->target_cpu = txc_cpu;
+
-+ /* register txc_cpu to XPS */
++ /* Tell the stack to affine to txc_cpu the Tx queue
++ * associated with the confirmation one
++ */
++ cpumask_clear(&xps_mask);
+ cpumask_set_cpu(txc_cpu, &xps_mask);
+ err = netif_set_xps_queue(priv->net_dev, &xps_mask,
+ fq->flowid);
+ if (err)
-+ dev_info_once(dev,
-+ "Tx: error setting XPS queue\n");
-+ cpumask_clear_cpu(txc_cpu, &xps_mask);
++ dev_info_once(dev, "Error setting XPS queue\n");
+
-+ txc_cpu = cpumask_next(txc_cpu, &priv->txconf_cpumask);
++ txc_cpu = cpumask_next(txc_cpu, &priv->dpio_cpumask);
+ if (txc_cpu >= nr_cpu_ids)
-+ txc_cpu = cpumask_first(&priv->txconf_cpumask);
++ txc_cpu = cpumask_first(&priv->dpio_cpumask);
+ break;
+ default:
+ dev_err(dev, "Unknown FQ type: %d\n", fq->type);
@@ -3064,10 +4739,9 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+{
+ int i, j;
+
-+ /* We have one TxConf FQ per Tx flow. Tx queues MUST be at the
-+ * beginning of the queue array.
-+ * Number of Rx and Tx queues are the same.
-+ * We only support one traffic class for now.
++ /* We have one TxConf FQ per Tx flow.
++ * The number of Tx and Rx queues is the same.
++ * Tx queues come first in the fq array.
+ */
+ for (i = 0; i < dpaa2_eth_queue_count(priv); i++) {
+ priv->fq[priv->num_fqs].type = DPAA2_TX_CONF_FQ;
@@ -3098,8 +4772,8 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+{
+ int err;
+ struct fsl_mc_device *dpbp_dev;
-+ struct dpbp_attr dpbp_attrs;
+ struct device *dev = priv->net_dev->dev.parent;
++ struct dpbp_attr dpbp_attrs;
+
+ err = fsl_mc_object_allocate(to_fsl_mc_device(dev), FSL_MC_POOL_DPBP,
+ &dpbp_dev);
@@ -3135,9 +4809,10 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ dev_err(dev, "dpbp_get_attributes() failed\n");
+ goto err_get_attr;
+ }
-+
+ priv->bpid = dpbp_attrs.bpid;
-+ priv->num_bufs = DPAA2_ETH_NUM_BUFS_FC / priv->num_channels;
++
++ /* By default we start with flow control enabled */
++ priv->max_bufs_per_ch = DPAA2_ETH_NUM_BUFS_FC / priv->num_channels;
+
+ return 0;
+
@@ -3162,12 +4837,13 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+
+static int setup_tx_congestion(struct dpaa2_eth_priv *priv)
+{
-+ struct dpni_congestion_notification_cfg cong_notif_cfg = { 0 };
++ struct dpni_congestion_notification_cfg notif_cfg = {0};
+ struct device *dev = priv->net_dev->dev.parent;
+ int err;
+
+ priv->cscn_unaligned = kzalloc(DPAA2_CSCN_SIZE + DPAA2_CSCN_ALIGN,
+ GFP_KERNEL);
++
+ if (!priv->cscn_unaligned)
+ return -ENOMEM;
+
@@ -3180,17 +4856,16 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ goto err_dma_map;
+ }
+
-+ cong_notif_cfg.units = DPNI_CONGESTION_UNIT_BYTES;
-+ cong_notif_cfg.threshold_entry = DPAA2_ETH_TX_CONG_ENTRY_THRESH;
-+ cong_notif_cfg.threshold_exit = DPAA2_ETH_TX_CONG_EXIT_THRESH;
-+ cong_notif_cfg.message_ctx = (u64)priv;
-+ cong_notif_cfg.message_iova = priv->cscn_dma;
-+ cong_notif_cfg.notification_mode = DPNI_CONG_OPT_WRITE_MEM_ON_ENTER |
-+ DPNI_CONG_OPT_WRITE_MEM_ON_EXIT |
-+ DPNI_CONG_OPT_COHERENT_WRITE;
++ notif_cfg.units = DPNI_CONGESTION_UNIT_BYTES;
++ notif_cfg.threshold_entry = DPAA2_ETH_TX_CONG_ENTRY_THRESH;
++ notif_cfg.threshold_exit = DPAA2_ETH_TX_CONG_EXIT_THRESH;
++ notif_cfg.message_ctx = (u64)priv;
++ notif_cfg.message_iova = priv->cscn_dma;
++ notif_cfg.notification_mode = DPNI_CONG_OPT_WRITE_MEM_ON_ENTER |
++ DPNI_CONG_OPT_WRITE_MEM_ON_EXIT |
++ DPNI_CONG_OPT_COHERENT_WRITE;
+ err = dpni_set_congestion_notification(priv->mc_io, 0, priv->mc_token,
-+ DPNI_QUEUE_TX, 0,
-+ &cong_notif_cfg);
++ DPNI_QUEUE_TX, 0, &notif_cfg);
+ if (err) {
+ dev_err(dev, "dpni_set_congestion_notification failed\n");
+ goto err_set_cong;
@@ -3212,20 +4887,32 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ struct device *dev = &ls_dev->dev;
+ struct dpaa2_eth_priv *priv;
+ struct net_device *net_dev;
-+ struct dpni_buffer_layout buf_layout;
+ struct dpni_link_cfg cfg = {0};
+ int err;
+
+ net_dev = dev_get_drvdata(dev);
+ priv = netdev_priv(net_dev);
+
-+ priv->dpni_id = ls_dev->obj_desc.id;
-+
+ /* get a handle for the DPNI object */
-+ err = dpni_open(priv->mc_io, 0, priv->dpni_id, &priv->mc_token);
++ err = dpni_open(priv->mc_io, 0, ls_dev->obj_desc.id, &priv->mc_token);
+ if (err) {
+ dev_err(dev, "dpni_open() failed\n");
-+ goto err_open;
++ return err;
++ }
++
++ /* Check if we can work with this DPNI object */
++ err = dpni_get_api_version(priv->mc_io, 0, &priv->dpni_ver_major,
++ &priv->dpni_ver_minor);
++ if (err) {
++ dev_err(dev, "dpni_get_api_version() failed\n");
++ goto close;
++ }
++ if (dpaa2_eth_cmp_dpni_ver(priv, DPNI_VER_MAJOR, DPNI_VER_MINOR) < 0) {
++ dev_err(dev, "DPNI version %u.%u not supported, need >= %u.%u\n",
++ priv->dpni_ver_major, priv->dpni_ver_minor,
++ DPNI_VER_MAJOR, DPNI_VER_MINOR);
++ err = -ENOTSUPP;
++ goto close;
+ }
+
+ ls_dev->mc_io = priv->mc_io;
@@ -3234,130 +4921,47 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ err = dpni_reset(priv->mc_io, 0, priv->mc_token);
+ if (err) {
+ dev_err(dev, "dpni_reset() failed\n");
-+ goto err_reset;
++ goto close;
+ }
+
+ err = dpni_get_attributes(priv->mc_io, 0, priv->mc_token,
+ &priv->dpni_attrs);
-+
+ if (err) {
+ dev_err(dev, "dpni_get_attributes() failed (err=%d)\n", err);
-+ goto err_get_attr;
-+ }
-+
-+ /* due to a limitation in WRIOP 1.0.0 (ERR009354), the Rx buf
-+ * align value must be a multiple of 256.
-+ */
-+ priv->rx_buf_align =
-+ priv->dpni_attrs.wriop_version & 0x3ff ?
-+ DPAA2_ETH_RX_BUF_ALIGN : DPAA2_ETH_RX_BUF_ALIGN_V1;
-+
-+ /* Update number of logical FQs in netdev */
-+ err = netif_set_real_num_tx_queues(net_dev,
-+ dpaa2_eth_queue_count(priv));
-+ if (err) {
-+ dev_err(dev, "netif_set_real_num_tx_queues failed (%d)\n", err);
-+ goto err_set_tx_queues;
++ goto close;
+ }
+
-+ err = netif_set_real_num_rx_queues(net_dev,
-+ dpaa2_eth_queue_count(priv));
-+ if (err) {
-+ dev_err(dev, "netif_set_real_num_rx_queues failed (%d)\n", err);
-+ goto err_set_rx_queues;
-+ }
-+
-+ /* Configure buffer layouts */
-+ /* rx buffer */
-+ buf_layout.pass_parser_result = true;
-+ buf_layout.pass_frame_status = true;
-+ buf_layout.private_data_size = DPAA2_ETH_SWA_SIZE;
-+ buf_layout.data_align = priv->rx_buf_align;
-+ buf_layout.data_head_room = DPAA2_ETH_RX_HEAD_ROOM;
-+ buf_layout.options = DPNI_BUF_LAYOUT_OPT_PARSER_RESULT |
-+ DPNI_BUF_LAYOUT_OPT_FRAME_STATUS |
-+ DPNI_BUF_LAYOUT_OPT_PRIVATE_DATA_SIZE |
-+ DPNI_BUF_LAYOUT_OPT_DATA_ALIGN |
-+ DPNI_BUF_LAYOUT_OPT_DATA_HEAD_ROOM;
-+ err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token,
-+ DPNI_QUEUE_RX, &buf_layout);
-+ if (err) {
-+ dev_err(dev,
-+ "dpni_set_buffer_layout(RX) failed\n");
-+ goto err_buf_layout;
-+ }
-+
-+ /* tx buffer */
-+ buf_layout.options = DPNI_BUF_LAYOUT_OPT_FRAME_STATUS |
-+ DPNI_BUF_LAYOUT_OPT_TIMESTAMP |
-+ DPNI_BUF_LAYOUT_OPT_PRIVATE_DATA_SIZE;
-+ buf_layout.pass_timestamp = true;
-+ err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token,
-+ DPNI_QUEUE_TX, &buf_layout);
-+ if (err) {
-+ dev_err(dev,
-+ "dpni_set_buffer_layout(TX) failed\n");
-+ goto err_buf_layout;
-+ }
-+
-+ /* tx-confirm buffer */
-+ buf_layout.options = DPNI_BUF_LAYOUT_OPT_FRAME_STATUS |
-+ DPNI_BUF_LAYOUT_OPT_TIMESTAMP;
-+ err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token,
-+ DPNI_QUEUE_TX_CONFIRM, &buf_layout);
-+ if (err) {
-+ dev_err(dev, "dpni_set_buffer_layout(TX_CONF) failed\n");
-+ goto err_buf_layout;
-+ }
-+
-+ /* Now that we've set our tx buffer layout, retrieve the minimum
-+ * required tx data offset.
-+ */
-+ err = dpni_get_tx_data_offset(priv->mc_io, 0, priv->mc_token,
-+ &priv->tx_data_offset);
-+ if (err) {
-+ dev_err(dev, "dpni_get_tx_data_offset() failed (%d)\n", err);
-+ goto err_data_offset;
-+ }
-+
-+ if ((priv->tx_data_offset % 64) != 0)
-+ dev_warn(dev, "Tx data offset (%d) not a multiple of 64B",
-+ priv->tx_data_offset);
++ err = set_buffer_layout(priv);
++ if (err)
++ goto close;
+
+ /* Enable congestion notifications for Tx queues */
+ err = setup_tx_congestion(priv);
+ if (err)
-+ goto err_tx_cong;
++ goto close;
+
+ /* allocate classification rule space */
+ priv->cls_rule = kzalloc(sizeof(*priv->cls_rule) *
+ dpaa2_eth_fs_count(priv), GFP_KERNEL);
+ if (!priv->cls_rule)
-+ goto err_cls_rule;
++ goto close;
+
+ /* Enable flow control */
+ cfg.options = DPNI_LINK_OPT_AUTONEG | DPNI_LINK_OPT_PAUSE;
-+ priv->tx_pause_frames = 1;
-+
++ priv->tx_pause_frames = true;
+ err = dpni_set_link_cfg(priv->mc_io, 0, priv->mc_token, &cfg);
+ if (err) {
-+ netdev_err(net_dev, "ERROR %d setting link cfg", err);
-+ goto err_set_link_cfg;
++ dev_err(dev, "dpni_set_link_cfg() failed\n");
++ goto cls_free;
+ }
+
+ return 0;
+
-+err_set_link_cfg:
-+err_cls_rule:
-+err_tx_cong:
-+err_data_offset:
-+err_buf_layout:
-+err_set_rx_queues:
-+err_set_tx_queues:
-+err_get_attr:
-+err_reset:
++cls_free:
++ kfree(priv->cls_rule);
++close:
+ dpni_close(priv->mc_io, 0, priv->mc_token);
-+err_open:
++
+ return err;
+}
+
@@ -3379,12 +4983,44 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ kfree(priv->cscn_unaligned);
+}
+
++static int setup_rx_flow(struct dpaa2_eth_priv *priv,
++ struct dpaa2_eth_fq *fq)
++{
++ struct device *dev = priv->net_dev->dev.parent;
++ struct dpni_queue queue;
++ struct dpni_queue_id qid;
++ int err;
++
++ err = dpni_get_queue(priv->mc_io, 0, priv->mc_token,
++ DPNI_QUEUE_RX, fq->tc, fq->flowid, &queue, &qid);
++ if (err) {
++ dev_err(dev, "dpni_get_queue(RX) failed\n");
++ return err;
++ }
++
++ fq->fqid = qid.fqid;
++
++ queue.destination.id = fq->channel->dpcon_id;
++ queue.destination.type = DPNI_DEST_DPCON;
++ queue.destination.priority = 1;
++ queue.user_context = (u64)(uintptr_t)fq;
++ err = dpni_set_queue(priv->mc_io, 0, priv->mc_token,
++ DPNI_QUEUE_RX, fq->tc, fq->flowid,
++ DPNI_QUEUE_OPT_USER_CTX | DPNI_QUEUE_OPT_DEST,
++ &queue);
++ if (err) {
++ dev_err(dev, "dpni_set_queue(RX) failed\n");
++ return err;
++ }
++
++ return 0;
++}
++
+static int set_queue_taildrop(struct dpaa2_eth_priv *priv,
+ struct dpni_taildrop *td)
+{
+ struct device *dev = priv->net_dev->dev.parent;
-+ int err, i;
-+
++ int i, err;
+
+ for (i = 0; i < priv->num_fqs; i++) {
+ if (priv->fq[i].type != DPAA2_RX_FQ)
@@ -3398,6 +5034,10 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ dev_err(dev, "dpni_set_taildrop() failed (%d)\n", err);
+ return err;
+ }
++
++ dev_dbg(dev, "%s taildrop for Rx group tc %d\n",
++ (td->enable ? "Enabled" : "Disabled"),
++ i);
+ }
+
+ return 0;
@@ -3428,7 +5068,12 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ dev_err(dev, "dpni_set_taildrop() failed (%d)\n", err);
+ return err;
+ }
++
++ dev_dbg(dev, "%s taildrop for Rx queue id %d tc %d\n",
++ (tc_td->enable ? "Enabled" : "Disabled"),
++ priv->fq[i].flowid, priv->fq[i].tc);
+ }
++
+ return 0;
+}
+
@@ -3449,7 +5094,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ case DPAA2_ETH_TD_NONE:
+ memset(&td_queue, 0, sizeof(struct dpni_taildrop));
+ memset(&td_group, 0, sizeof(struct dpni_taildrop));
-+ priv->num_bufs = DPAA2_ETH_NUM_BUFS_FC /
++ priv->max_bufs_per_ch = DPAA2_ETH_NUM_BUFS_FC /
+ priv->num_channels;
+ break;
+ case DPAA2_ETH_TD_QUEUE:
@@ -3458,7 +5103,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ td_queue.units = DPNI_CONGESTION_UNIT_BYTES;
+ td_queue.threshold = DPAA2_ETH_TAILDROP_THRESH /
+ dpaa2_eth_tc_count(priv);
-+ priv->num_bufs = DPAA2_ETH_NUM_BUFS_TD;
++ priv->max_bufs_per_ch = DPAA2_ETH_NUM_BUFS_PER_CH;
+ break;
+ case DPAA2_ETH_TD_GROUP:
+ memset(&td_queue, 0, sizeof(struct dpni_taildrop));
@@ -3466,7 +5111,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ td_group.units = DPNI_CONGESTION_UNIT_FRAMES;
+ td_group.threshold = NAPI_POLL_WEIGHT *
+ dpaa2_eth_queue_count(priv);
-+ priv->num_bufs = NAPI_POLL_WEIGHT *
++ priv->max_bufs_per_ch = NAPI_POLL_WEIGHT *
+ dpaa2_eth_tc_count(priv);
+ break;
+ default:
@@ -3481,39 +5126,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ if (err)
+ return err;
+
-+ priv->refill_thresh = priv->num_bufs - DPAA2_ETH_BUFS_PER_CMD;
-+
-+ return 0;
-+}
-+
-+static int setup_rx_flow(struct dpaa2_eth_priv *priv,
-+ struct dpaa2_eth_fq *fq)
-+{
-+ struct device *dev = priv->net_dev->dev.parent;
-+ struct dpni_queue q = { { 0 } };
-+ struct dpni_queue_id qid;
-+ u8 q_opt = DPNI_QUEUE_OPT_USER_CTX | DPNI_QUEUE_OPT_DEST;
-+ int err;
-+
-+ err = dpni_get_queue(priv->mc_io, 0, priv->mc_token,
-+ DPNI_QUEUE_RX, fq->tc, fq->flowid, &q, &qid);
-+ if (err) {
-+ dev_err(dev, "dpni_get_queue() failed (%d)\n", err);
-+ return err;
-+ }
-+
-+ fq->fqid = qid.fqid;
-+
-+ q.destination.id = fq->channel->dpcon_id;
-+ q.destination.type = DPNI_DEST_DPCON;
-+ q.destination.priority = 1;
-+ q.user_context = (u64)fq;
-+ err = dpni_set_queue(priv->mc_io, 0, priv->mc_token,
-+ DPNI_QUEUE_RX, fq->tc, fq->flowid, q_opt, &q);
-+ if (err) {
-+ dev_err(dev, "dpni_set_queue() failed (%d)\n", err);
-+ return err;
-+ }
++ priv->refill_thresh = DPAA2_ETH_REFILL_THRESH(priv);
+
+ return 0;
+}
@@ -3522,37 +5135,39 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ struct dpaa2_eth_fq *fq)
+{
+ struct device *dev = priv->net_dev->dev.parent;
-+ struct dpni_queue q = { { 0 } };
++ struct dpni_queue queue;
+ struct dpni_queue_id qid;
-+ u8 q_opt = DPNI_QUEUE_OPT_USER_CTX | DPNI_QUEUE_OPT_DEST;
+ int err;
+
+ err = dpni_get_queue(priv->mc_io, 0, priv->mc_token,
-+ DPNI_QUEUE_TX, 0, fq->flowid, &q, &qid);
++ DPNI_QUEUE_TX, 0, fq->flowid, &queue, &qid);
+ if (err) {
-+ dev_err(dev, "dpni_get_queue() failed (%d)\n", err);
++ dev_err(dev, "dpni_get_queue(TX) failed\n");
+ return err;
+ }
+
+ fq->tx_qdbin = qid.qdbin;
+
+ err = dpni_get_queue(priv->mc_io, 0, priv->mc_token,
-+ DPNI_QUEUE_TX_CONFIRM, 0, fq->flowid, &q, &qid);
++ DPNI_QUEUE_TX_CONFIRM, 0, fq->flowid,
++ &queue, &qid);
+ if (err) {
-+ dev_err(dev, "dpni_get_queue() failed (%d)\n", err);
++ dev_err(dev, "dpni_get_queue(TX_CONF) failed\n");
+ return err;
+ }
+
+ fq->fqid = qid.fqid;
+
-+ q.destination.id = fq->channel->dpcon_id;
-+ q.destination.type = DPNI_DEST_DPCON;
-+ q.destination.priority = 0;
-+ q.user_context = (u64)fq;
++ queue.destination.id = fq->channel->dpcon_id;
++ queue.destination.type = DPNI_DEST_DPCON;
++ queue.destination.priority = 0;
++ queue.user_context = (u64)(uintptr_t)fq;
+ err = dpni_set_queue(priv->mc_io, 0, priv->mc_token,
-+ DPNI_QUEUE_TX_CONFIRM, 0, fq->flowid, q_opt, &q);
++ DPNI_QUEUE_TX_CONFIRM, 0, fq->flowid,
++ DPNI_QUEUE_OPT_USER_CTX | DPNI_QUEUE_OPT_DEST,
++ &queue);
+ if (err) {
-+ dev_err(dev, "dpni_get_queue() failed (%d)\n", err);
++ dev_err(dev, "dpni_set_queue(TX_CONF) failed\n");
+ return err;
+ }
+
@@ -3594,16 +5209,18 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+#endif
+
+/* default hash key fields */
-+static struct dpaa2_eth_hash_fields default_hash_fields[] = {
++static struct dpaa2_eth_dist_fields default_dist_fields[] = {
+ {
+ /* L2 header */
+ .rxnfc_field = RXH_L2DA,
+ .cls_prot = NET_PROT_ETH,
+ .cls_field = NH_FLD_ETH_DA,
++ .id = DPAA2_ETH_DIST_ETHDST,
+ .size = 6,
+ }, {
+ .cls_prot = NET_PROT_ETH,
+ .cls_field = NH_FLD_ETH_SA,
++ .id = DPAA2_ETH_DIST_ETHSRC,
+ .size = 6,
+ }, {
+ /* This is the last ethertype field parsed:
@@ -3612,28 +5229,33 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ */
+ .cls_prot = NET_PROT_ETH,
+ .cls_field = NH_FLD_ETH_TYPE,
++ .id = DPAA2_ETH_DIST_ETHTYPE,
+ .size = 2,
+ }, {
+ /* VLAN header */
+ .rxnfc_field = RXH_VLAN,
+ .cls_prot = NET_PROT_VLAN,
+ .cls_field = NH_FLD_VLAN_TCI,
++ .id = DPAA2_ETH_DIST_VLAN,
+ .size = 2,
+ }, {
+ /* IP header */
+ .rxnfc_field = RXH_IP_SRC,
+ .cls_prot = NET_PROT_IP,
+ .cls_field = NH_FLD_IP_SRC,
++ .id = DPAA2_ETH_DIST_IPSRC,
+ .size = 4,
+ }, {
+ .rxnfc_field = RXH_IP_DST,
+ .cls_prot = NET_PROT_IP,
+ .cls_field = NH_FLD_IP_DST,
++ .id = DPAA2_ETH_DIST_IPDST,
+ .size = 4,
+ }, {
+ .rxnfc_field = RXH_L3_PROTO,
+ .cls_prot = NET_PROT_IP,
+ .cls_field = NH_FLD_IP_PROTO,
++ .id = DPAA2_ETH_DIST_IPPROTO,
+ .size = 1,
+ }, {
+ /* Using UDP ports, this is functionally equivalent to raw
@@ -3642,85 +5264,182 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ .rxnfc_field = RXH_L4_B_0_1,
+ .cls_prot = NET_PROT_UDP,
+ .cls_field = NH_FLD_UDP_PORT_SRC,
++ .id = DPAA2_ETH_DIST_L4SRC,
+ .size = 2,
+ }, {
+ .rxnfc_field = RXH_L4_B_2_3,
+ .cls_prot = NET_PROT_UDP,
+ .cls_field = NH_FLD_UDP_PORT_DST,
++ .id = DPAA2_ETH_DIST_L4DST,
+ .size = 2,
+ },
+};
+
-+/* Set RX hash options */
-+static int set_hash(struct dpaa2_eth_priv *priv)
++static int legacy_config_dist_key(struct dpaa2_eth_priv *priv,
++ dma_addr_t key_iova)
+{
+ struct device *dev = priv->net_dev->dev.parent;
-+ struct dpkg_profile_cfg cls_cfg;
+ struct dpni_rx_tc_dist_cfg dist_cfg;
-+ u8 *dma_mem;
-+ int i;
-+ int err = 0;
++ int i, err;
++
++ /* In legacy mode, we can't configure flow steering independently */
++ if (!dpaa2_eth_hash_enabled(priv))
++ return -EOPNOTSUPP;
++
++ memset(&dist_cfg, 0, sizeof(dist_cfg));
++
++ dist_cfg.key_cfg_iova = key_iova;
++ dist_cfg.dist_size = dpaa2_eth_queue_count(priv);
++ if (dpaa2_eth_fs_enabled(priv)) {
++ dist_cfg.dist_mode = DPNI_DIST_MODE_FS;
++ dist_cfg.fs_cfg.miss_action = DPNI_FS_MISS_HASH;
++ } else {
++ dist_cfg.dist_mode = DPNI_DIST_MODE_HASH;
++ }
++
++ for (i = 0; i < dpaa2_eth_tc_count(priv); i++) {
++ err = dpni_set_rx_tc_dist(priv->mc_io, 0, priv->mc_token, i,
++ &dist_cfg);
++ if (err) {
++ dev_err(dev, "dpni_set_rx_tc_dist failed\n");
++ return err;
++ }
++ }
++
++ return 0;
++}
++
++static int config_hash_key(struct dpaa2_eth_priv *priv, dma_addr_t key_iova)
++{
++ struct device *dev = priv->net_dev->dev.parent;
++ struct dpni_rx_dist_cfg dist_cfg;
++ int i, err;
++
++ if (!dpaa2_eth_hash_enabled(priv))
++ return -EOPNOTSUPP;
++
++ memset(&dist_cfg, 0, sizeof(dist_cfg));
++
++ dist_cfg.key_cfg_iova = key_iova;
++ dist_cfg.dist_size = dpaa2_eth_queue_count(priv);
++ dist_cfg.enable = true;
++
++ for (i = 0; i < dpaa2_eth_tc_count(priv); i++) {
++ dist_cfg.tc = i;
++
++ err = dpni_set_rx_hash_dist(priv->mc_io, 0,
++ priv->mc_token, &dist_cfg);
++ if (err) {
++ dev_err(dev, "dpni_set_rx_hash_dist failed\n");
++ return err;
++ }
++ }
++
++ return 0;
++}
++
++static int config_fs_key(struct dpaa2_eth_priv *priv, dma_addr_t key_iova)
++{
++ struct device *dev = priv->net_dev->dev.parent;
++ struct dpni_rx_dist_cfg dist_cfg;
++ int i, err;
++
++ if (!dpaa2_eth_fs_enabled(priv))
++ return -EOPNOTSUPP;
++
++ memset(&dist_cfg, 0, sizeof(dist_cfg));
++
++ dist_cfg.key_cfg_iova = key_iova;
++ dist_cfg.dist_size = dpaa2_eth_queue_count(priv);
++ dist_cfg.enable = true;
++
++ for (i = 0; i < dpaa2_eth_tc_count(priv); i++) {
++ dist_cfg.tc = i;
++
++ err = dpni_set_rx_fs_dist(priv->mc_io, 0,
++ priv->mc_token, &dist_cfg);
++ if (err) {
++ dev_err(dev, "dpni_set_rx_fs_dist failed\n");
++ return err;
++ }
++ }
++
++ return 0;
++}
++
++int dpaa2_eth_set_dist_key(struct dpaa2_eth_priv *priv,
++ enum dpaa2_eth_rx_dist type, u32 key_fields)
++{
++ struct device *dev = priv->net_dev->dev.parent;
++ struct dpkg_profile_cfg cls_cfg;
++ struct dpkg_extract *key;
++ u32 hash_fields = 0;
++ dma_addr_t key_iova;
++ u8 *key_mem;
++ int i, err;
+
+ memset(&cls_cfg, 0, sizeof(cls_cfg));
+
-+ for (i = 0; i < priv->num_hash_fields; i++) {
-+ struct dpkg_extract *key =
-+ &cls_cfg.extracts[cls_cfg.num_extracts];
++ for (i = 0; i < priv->num_dist_fields; i++) {
++ if (!(key_fields & priv->dist_fields[i].id))
++ continue;
+
++ key = &cls_cfg.extracts[cls_cfg.num_extracts];
+ key->type = DPKG_EXTRACT_FROM_HDR;
-+ key->extract.from_hdr.prot = priv->hash_fields[i].cls_prot;
++ key->extract.from_hdr.prot = priv->dist_fields[i].cls_prot;
+ key->extract.from_hdr.type = DPKG_FULL_FIELD;
-+ key->extract.from_hdr.field = priv->hash_fields[i].cls_field;
++ key->extract.from_hdr.field = priv->dist_fields[i].cls_field;
+ cls_cfg.num_extracts++;
+
-+ priv->rx_flow_hash |= priv->hash_fields[i].rxnfc_field;
++ hash_fields |= priv->dist_fields[i].rxnfc_field;
+ }
+
-+ dma_mem = kzalloc(DPAA2_CLASSIFIER_DMA_SIZE, GFP_DMA | GFP_KERNEL);
-+ if (!dma_mem)
++ key_mem = kzalloc(DPAA2_CLASSIFIER_DMA_SIZE, GFP_KERNEL);
++ if (!key_mem)
+ return -ENOMEM;
+
-+ err = dpni_prepare_key_cfg(&cls_cfg, dma_mem);
++ err = dpni_prepare_key_cfg(&cls_cfg, key_mem);
+ if (err) {
-+ dev_err(dev, "dpni_prepare_key_cfg() failed (%d)", err);
-+ goto err_prep_key;
++ dev_err(dev, "dpni_prepare_key_cfg error %d\n", err);
++ goto free_key;
+ }
+
-+ memset(&dist_cfg, 0, sizeof(dist_cfg));
-+
-+ /* Prepare for setting the rx dist */
-+ dist_cfg.key_cfg_iova = dma_map_single(dev, dma_mem,
-+ DPAA2_CLASSIFIER_DMA_SIZE,
-+ DMA_TO_DEVICE);
-+ if (dma_mapping_error(dev, dist_cfg.key_cfg_iova)) {
++ key_iova = dma_map_single(dev, key_mem, DPAA2_CLASSIFIER_DMA_SIZE,
++ DMA_TO_DEVICE);
++ if (dma_mapping_error(dev, key_iova)) {
+ dev_err(dev, "DMA mapping failed\n");
+ err = -ENOMEM;
-+ goto err_dma_map;
++ goto free_key;
+ }
+
-+ dist_cfg.dist_size = dpaa2_eth_queue_count(priv);
-+ if (dpaa2_eth_fs_enabled(priv)) {
-+ dist_cfg.dist_mode = DPNI_DIST_MODE_FS;
-+ dist_cfg.fs_cfg.miss_action = DPNI_FS_MISS_HASH;
-+ } else {
-+ dist_cfg.dist_mode = DPNI_DIST_MODE_HASH;
++ switch (type) {
++ case DPAA2_ETH_RX_DIST_LEGACY:
++ err = legacy_config_dist_key(priv, key_iova);
++ break;
++ case DPAA2_ETH_RX_DIST_HASH:
++ err = config_hash_key(priv, key_iova);
++ break;
++ case DPAA2_ETH_RX_DIST_FS:
++ err = config_fs_key(priv, key_iova);
++ break;
++ default:
++ err = -EINVAL;
++ break;
+ }
+
-+ for (i = 0; i < dpaa2_eth_tc_count(priv); i++) {
-+ err = dpni_set_rx_tc_dist(priv->mc_io, 0, priv->mc_token, i,
-+ &dist_cfg);
-+ if (err)
-+ break;
++ dma_unmap_single(dev, key_iova, DPAA2_CLASSIFIER_DMA_SIZE,
++ DMA_TO_DEVICE);
++ if (err) {
++ if (err != -EOPNOTSUPP)
++ dev_err(dev, "Distribution key config failed\n");
++ goto free_key;
+ }
+
-+ dma_unmap_single(dev, dist_cfg.key_cfg_iova,
-+ DPAA2_CLASSIFIER_DMA_SIZE, DMA_TO_DEVICE);
-+ if (err)
-+ dev_err(dev, "dpni_set_rx_tc_dist() failed (%d)\n", err);
++ if (type != DPAA2_ETH_RX_DIST_FS)
++ priv->rx_hash_fields = hash_fields;
+
-+err_dma_map:
-+err_prep_key:
-+ kfree(dma_mem);
++free_key:
++ kfree(key_mem);
+ return err;
+}
+
@@ -3750,19 +5469,23 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ /* Verify classification options and disable hashing and/or
+ * flow steering support in case of invalid configuration values
+ */
-+ priv->hash_fields = default_hash_fields;
-+ priv->num_hash_fields = ARRAY_SIZE(default_hash_fields);
++ priv->dist_fields = default_dist_fields;
++ priv->num_dist_fields = ARRAY_SIZE(default_dist_fields);
+ check_cls_support(priv);
+
+ /* have the interface implicitly distribute traffic based on
-+ * a static hash key
++ * a static hash key. Also configure flow steering key, if supported.
++ * Errors here are not blocking, so just let the called function
++ * print its error message and move along.
+ */
-+ if (dpaa2_eth_hash_enabled(priv)) {
-+ err = set_hash(priv);
-+ if (err) {
-+ dev_err(dev, "Hashing configuration failed\n");
-+ return err;
-+ }
++ if (dpaa2_eth_has_legacy_dist(priv)) {
++ dpaa2_eth_set_dist_key(priv, DPAA2_ETH_RX_DIST_LEGACY,
++ DPAA2_ETH_DIST_ALL);
++ } else {
++ dpaa2_eth_set_dist_key(priv, DPAA2_ETH_RX_DIST_HASH,
++ DPAA2_ETH_DIST_DEFAULT_HASH);
++ dpaa2_eth_set_dist_key(priv, DPAA2_ETH_RX_DIST_FS,
++ DPAA2_ETH_DIST_ALL);
+ }
+
+ /* Configure handling of error frames */
@@ -3776,7 +5499,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ err = dpni_set_errors_behavior(priv->mc_io, 0, priv->mc_token,
+ &err_cfg);
+ if (err) {
-+ dev_err(dev, "dpni_set_errors_behavior() failed (%d)\n", err);
++ dev_err(dev, "dpni_set_errors_behavior failed\n");
+ return err;
+ }
+
@@ -3802,8 +5525,8 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ return err;
+ }
+
-+ err = dpni_get_qdid(priv->mc_io, 0, priv->mc_token, DPNI_QUEUE_TX,
-+ &priv->tx_qdid);
++ err = dpni_get_qdid(priv->mc_io, 0, priv->mc_token,
++ DPNI_QUEUE_TX, &priv->tx_qdid);
+ if (err) {
+ dev_err(dev, "dpni_get_qdid() failed\n");
+ return err;
@@ -3848,68 +5571,61 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ dpaa2_io_store_destroy(priv->channel[i]->store);
+}
+
-+static int netdev_init(struct net_device *net_dev)
++static int set_mac_addr(struct dpaa2_eth_priv *priv)
+{
-+ int err;
++ struct net_device *net_dev = priv->net_dev;
+ struct device *dev = net_dev->dev.parent;
-+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+ u8 mac_addr[ETH_ALEN], dpni_mac_addr[ETH_ALEN];
-+ u8 bcast_addr[ETH_ALEN];
-+ u16 rx_headroom, rx_req_headroom;
-+
-+ net_dev->netdev_ops = &dpaa2_eth_ops;
++ int err;
+
+ /* Get firmware address, if any */
+ err = dpni_get_port_mac_addr(priv->mc_io, 0, priv->mc_token, mac_addr);
+ if (err) {
-+ dev_err(dev, "dpni_get_port_mac_addr() failed (%d)\n", err);
++ dev_err(dev, "dpni_get_port_mac_addr() failed\n");
+ return err;
+ }
+
-+ /* Get DPNI atttributes address, if any */
++ /* Get DPNI attributes address, if any */
+ err = dpni_get_primary_mac_addr(priv->mc_io, 0, priv->mc_token,
+ dpni_mac_addr);
+ if (err) {
-+ dev_err(dev, "dpni_get_primary_mac_addr() failed (%d)\n", err);
++ dev_err(dev, "dpni_get_primary_mac_addr() failed\n");
+ return err;
+ }
+
+ /* First check if firmware has any address configured by bootloader */
+ if (!is_zero_ether_addr(mac_addr)) {
-+ /* If the DPMAC addr != the DPNI addr, update it */
++ /* If the DPMAC addr != DPNI addr, update it */
+ if (!ether_addr_equal(mac_addr, dpni_mac_addr)) {
+ err = dpni_set_primary_mac_addr(priv->mc_io, 0,
+ priv->mc_token,
+ mac_addr);
+ if (err) {
-+ dev_err(dev,
-+ "dpni_set_primary_mac_addr() failed (%d)\n",
-+ err);
++ dev_err(dev, "dpni_set_primary_mac_addr() failed\n");
+ return err;
+ }
+ }
+ memcpy(net_dev->dev_addr, mac_addr, net_dev->addr_len);
+ } else if (is_zero_ether_addr(dpni_mac_addr)) {
-+ /* Fills in net_dev->dev_addr, as required by
-+ * register_netdevice()
++ /* No MAC address configured, fill in net_dev->dev_addr
++ * with a random one
+ */
+ eth_hw_addr_random(net_dev);
-+ /* Make the user aware, without cluttering the boot log */
-+ dev_dbg_once(dev, " device(s) have all-zero hwaddr, replaced with random\n");
-+ err = dpni_set_primary_mac_addr(priv->mc_io, 0,
-+ priv->mc_token, net_dev->dev_addr);
++ dev_dbg_once(dev, "device(s) have all-zero hwaddr, replaced with random\n");
++
++ err = dpni_set_primary_mac_addr(priv->mc_io, 0, priv->mc_token,
++ net_dev->dev_addr);
+ if (err) {
-+ dev_err(dev,
-+ "dpni_set_primary_mac_addr() failed (%d)\n", err);
++ dev_err(dev, "dpni_set_primary_mac_addr() failed\n");
+ return err;
+ }
++
+ /* Override NET_ADDR_RANDOM set by eth_hw_addr_random(); for all
+ * practical purposes, this will be our "permanent" mac address,
+ * at least until the next reboot. This move will also permit
+ * register_netdevice() to properly fill up net_dev->perm_addr.
+ */
+ net_dev->addr_assign_type = NET_ADDR_PERM;
-+ /* If DPMAC address is non-zero, use that one */
+ } else {
+ /* NET_ADDR_PERM is default, all we have to do is
+ * fill in the device addr.
@@ -3917,41 +5633,57 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ memcpy(net_dev->dev_addr, dpni_mac_addr, net_dev->addr_len);
+ }
+
-+ /* Explicitly add the broadcast address to the MAC filtering table;
-+ * the MC won't do that for us.
-+ */
++ return 0;
++}
++
++static int netdev_init(struct net_device *net_dev)
++{
++ struct device *dev = net_dev->dev.parent;
++ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
++ u8 bcast_addr[ETH_ALEN];
++ u8 num_queues;
++ int err;
++
++ net_dev->netdev_ops = &dpaa2_eth_ops;
++
++ err = set_mac_addr(priv);
++ if (err)
++ return err;
++
++ /* Explicitly add the broadcast address to the MAC filtering table */
+ eth_broadcast_addr(bcast_addr);
+ err = dpni_add_mac_addr(priv->mc_io, 0, priv->mc_token, bcast_addr);
+ if (err) {
-+ dev_warn(dev, "dpni_add_mac_addr() failed (%d)\n", err);
-+ /* Won't return an error; at least, we'd have egress traffic */
++ dev_err(dev, "dpni_add_mac_addr() failed\n");
++ return err;
+ }
+
-+ /* Reserve enough space to align buffer as per hardware requirement;
-+ * NOTE: priv->tx_data_offset MUST be initialized at this point.
-+ */
-+ net_dev->needed_headroom = DPAA2_ETH_NEEDED_HEADROOM(priv);
-+
-+ /* Set MTU limits */
-+ net_dev->min_mtu = 68;
++ /* Set MTU upper limit; lower limit is default (68B) */
+ net_dev->max_mtu = DPAA2_ETH_MAX_MTU;
++ err = dpni_set_max_frame_length(priv->mc_io, 0, priv->mc_token,
++ (u16)DPAA2_ETH_MFL);
++ if (err) {
++ dev_err(dev, "dpni_set_max_frame_length() failed\n");
++ return err;
++ }
+
-+ /* Required headroom for Rx skbs, to avoid reallocation on
-+ * forwarding path.
-+ */
-+ rx_req_headroom = LL_RESERVED_SPACE(net_dev) - ETH_HLEN;
-+ rx_headroom = ALIGN(DPAA2_ETH_RX_HWA_SIZE + DPAA2_ETH_SWA_SIZE +
-+ DPAA2_ETH_RX_HEAD_ROOM, priv->rx_buf_align);
-+ if (rx_req_headroom > rx_headroom)
-+ dev_info_once(dev,
-+ "Required headroom (%d) greater than available (%d).\n"
-+ "This will impact performance due to reallocations.\n",
-+ rx_req_headroom, rx_headroom);
++ /* Set actual number of queues in the net device */
++ num_queues = dpaa2_eth_queue_count(priv);
++ err = netif_set_real_num_tx_queues(net_dev, num_queues);
++ if (err) {
++ dev_err(dev, "netif_set_real_num_tx_queues() failed\n");
++ return err;
++ }
++ err = netif_set_real_num_rx_queues(net_dev, num_queues);
++ if (err) {
++ dev_err(dev, "netif_set_real_num_rx_queues() failed\n");
++ return err;
++ }
+
+ /* Our .ndo_init will be called herein */
+ err = register_netdev(net_dev);
+ if (err < 0) {
-+ dev_err(dev, "register_netdev() failed (%d)\n", err);
++ dev_err(dev, "register_netdev() failed\n");
+ return err;
+ }
+
@@ -3974,14 +5706,9 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ return 0;
+}
+
-+static irqreturn_t dpni_irq0_handler(int irq_num, void *arg)
-+{
-+ return IRQ_WAKE_THREAD;
-+}
-+
+static irqreturn_t dpni_irq0_handler_thread(int irq_num, void *arg)
+{
-+ u32 status = 0, clear = 0;
++ u32 status = ~0;
+ struct device *dev = (struct device *)arg;
+ struct fsl_mc_device *dpni_dev = to_fsl_mc_device(dev);
+ struct net_device *net_dev = dev_get_drvdata(dev);
@@ -3990,19 +5717,13 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ err = dpni_get_irq_status(dpni_dev->mc_io, 0, dpni_dev->mc_handle,
+ DPNI_IRQ_INDEX, &status);
+ if (unlikely(err)) {
-+ netdev_err(net_dev, "Can't get irq status (err %d)", err);
-+ clear = 0xffffffff;
-+ goto out;
++ netdev_err(net_dev, "Can't get irq status (err %d)\n", err);
++ return IRQ_HANDLED;
+ }
+
-+ if (status & DPNI_IRQ_EVENT_LINK_CHANGED) {
-+ clear |= DPNI_IRQ_EVENT_LINK_CHANGED;
++ if (status & DPNI_IRQ_EVENT_LINK_CHANGED)
+ link_state_update(netdev_priv(net_dev));
-+ }
+
-+out:
-+ dpni_clear_irq_status(dpni_dev->mc_io, 0, dpni_dev->mc_handle,
-+ DPNI_IRQ_INDEX, clear);
+ return IRQ_HANDLED;
+}
+
@@ -4019,26 +5740,25 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+
+ irq = ls_dev->irqs[0];
+ err = devm_request_threaded_irq(&ls_dev->dev, irq->msi_desc->irq,
-+ dpni_irq0_handler,
-+ dpni_irq0_handler_thread,
++ NULL, dpni_irq0_handler_thread,
+ IRQF_NO_SUSPEND | IRQF_ONESHOT,
+ dev_name(&ls_dev->dev), &ls_dev->dev);
+ if (err < 0) {
-+ dev_err(&ls_dev->dev, "devm_request_threaded_irq(): %d", err);
++ dev_err(&ls_dev->dev, "devm_request_threaded_irq(): %d\n", err);
+ goto free_mc_irq;
+ }
+
+ err = dpni_set_irq_mask(ls_dev->mc_io, 0, ls_dev->mc_handle,
+ DPNI_IRQ_INDEX, DPNI_IRQ_EVENT_LINK_CHANGED);
+ if (err < 0) {
-+ dev_err(&ls_dev->dev, "dpni_set_irq_mask(): %d", err);
++ dev_err(&ls_dev->dev, "dpni_set_irq_mask(): %d\n", err);
+ goto free_irq;
+ }
+
+ err = dpni_set_irq_enable(ls_dev->mc_io, 0, ls_dev->mc_handle,
+ DPNI_IRQ_INDEX, 1);
+ if (err < 0) {
-+ dev_err(&ls_dev->dev, "dpni_set_irq_enable(): %d", err);
++ dev_err(&ls_dev->dev, "dpni_set_irq_enable(): %d\n", err);
+ goto free_irq;
+ }
+
@@ -4095,7 +5815,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+{
+ int err, items;
+ struct dpaa2_eth_priv *priv = netdev_priv(to_net_dev(dev));
-+ struct dpni_tx_shaping_cfg scfg;
++ struct dpni_tx_shaping_cfg scfg, ercfg = { 0 };
+
+ items = sscanf(buf, "%u %hu", &scfg.rate_limit, &scfg.max_burst_size);
+ if (items != 2) {
@@ -4109,7 +5829,8 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ return -EINVAL;
+ }
+
-+ err = dpni_set_tx_shaping(priv->mc_io, 0, priv->mc_token, &scfg);
++ err = dpni_set_tx_shaping(priv->mc_io, 0, priv->mc_token, &scfg,
++ &ercfg, 0);
+ if (err) {
+ dev_err(dev, "dpni_set_tx_shaping() failed\n");
+ return -EPERM;
@@ -4120,79 +5841,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ return count;
+}
+
-+static ssize_t dpaa2_eth_show_txconf_cpumask(struct device *dev,
-+ struct device_attribute *attr,
-+ char *buf)
-+{
-+ struct dpaa2_eth_priv *priv = netdev_priv(to_net_dev(dev));
-+
-+ return cpumap_print_to_pagebuf(1, buf, &priv->txconf_cpumask);
-+}
-+
-+static ssize_t dpaa2_eth_write_txconf_cpumask(struct device *dev,
-+ struct device_attribute *attr,
-+ const char *buf,
-+ size_t count)
-+{
-+ struct dpaa2_eth_priv *priv = netdev_priv(to_net_dev(dev));
-+ struct dpaa2_eth_fq *fq;
-+ bool running = netif_running(priv->net_dev);
-+ int i, err;
-+
-+ err = cpulist_parse(buf, &priv->txconf_cpumask);
-+ if (err)
-+ return err;
-+
-+ /* Only accept CPUs that have an affine DPIO */
-+ if (!cpumask_subset(&priv->txconf_cpumask, &priv->dpio_cpumask)) {
-+ netdev_info(priv->net_dev,
-+ "cpumask must be a subset of 0x%lx\n",
-+ *cpumask_bits(&priv->dpio_cpumask));
-+ cpumask_and(&priv->txconf_cpumask, &priv->dpio_cpumask,
-+ &priv->txconf_cpumask);
-+ }
-+
-+ /* Rewiring the TxConf FQs requires interface shutdown.
-+ */
-+ if (running) {
-+ err = dpaa2_eth_stop(priv->net_dev);
-+ if (err)
-+ return -ENODEV;
-+ }
-+
-+ /* Set the new TxConf FQ affinities */
-+ set_fq_affinity(priv);
-+
-+ /* dpaa2_eth_open() below will *stop* the Tx queues until an explicit
-+ * link up notification is received. Give the polling thread enough time
-+ * to detect the link state change, or else we'll end up with the
-+ * transmission side forever shut down.
-+ */
-+ if (priv->do_link_poll)
-+ msleep(2 * DPAA2_ETH_LINK_STATE_REFRESH);
-+
-+ for (i = 0; i < priv->num_fqs; i++) {
-+ fq = &priv->fq[i];
-+ if (fq->type != DPAA2_TX_CONF_FQ)
-+ continue;
-+ setup_tx_flow(priv, fq);
-+ }
-+
-+ if (running) {
-+ err = dpaa2_eth_open(priv->net_dev);
-+ if (err)
-+ return -ENODEV;
-+ }
-+
-+ return count;
-+}
-+
+static struct device_attribute dpaa2_eth_attrs[] = {
-+ __ATTR(txconf_cpumask,
-+ 0600,
-+ dpaa2_eth_show_txconf_cpumask,
-+ dpaa2_eth_write_txconf_cpumask),
-+
+ __ATTR(tx_shaping,
+ 0600,
+ dpaa2_eth_show_tx_shaping,
@@ -4234,7 +5883,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ struct dpni_link_state state;
+ int err, i;
+
-+ pfc->pfc_cap = dpaa2_eth_tc_count(priv);
++ priv->pfc.pfc_cap = dpaa2_eth_tc_count(priv);
+
+ err = dpni_get_link_state(priv->mc_io, 0, priv->mc_token, &state);
+ if (err) {
@@ -4261,9 +5910,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ priv->pfc.pfc_en |= 1 << i;
+ }
+
-+ pfc->pfc_en = priv->pfc.pfc_en;
-+ pfc->mbc = priv->pfc.mbc;
-+ pfc->delay = priv->pfc.delay;
++ memcpy(pfc, &priv->pfc, sizeof(priv->pfc));
+
+ return 0;
+}
@@ -4275,8 +5922,9 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ struct dpkg_profile_cfg kg_cfg = {0};
+ struct dpni_qos_tbl_cfg qos_cfg = {0};
+ struct dpni_rule_cfg key_params;
-+ u8 *params_iova;
-+ __be16 key, mask = cpu_to_be16(VLAN_PRIO_MASK);
++ u8 *params_iova, *key, *mask = NULL;
++ /* We only need the trailing 16 bits, without the TPID */
++ u8 key_size = VLAN_HLEN / 2;
+ int err = 0, i, j = 0;
+
+ if (priv->vlan_clsf_set)
@@ -4318,49 +5966,79 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ goto out_free;
+ }
+
-+ key_params.key_size = sizeof(key);
++ key_params.key_size = key_size;
+
+ if (dpaa2_eth_fs_mask_enabled(priv)) {
-+ key_params.mask_iova = dma_map_single(dev, &mask, sizeof(mask),
++ mask = kzalloc(key_size, GFP_KERNEL);
++ if (!mask)
++ goto out_free;
++
++ *mask = cpu_to_be16(VLAN_PRIO_MASK);
++
++ key_params.mask_iova = dma_map_single(dev, mask, key_size,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, key_params.mask_iova)) {
+ dev_err(dev, "DMA mapping failed %s\n", __func__);
+ err = -ENOMEM;
-+ goto out_free;
++ goto out_free_mask;
+ }
+ } else {
+ key_params.mask_iova = 0;
+ }
+
-+ key_params.key_iova = dma_map_single(dev, &key, sizeof(key),
++ key = kzalloc(key_size, GFP_KERNEL);
++ if (!key)
++ goto out_cleanup_mask;
++
++ key_params.key_iova = dma_map_single(dev, key, key_size,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, key_params.key_iova)) {
+ dev_err(dev, "%s: DMA mapping failed\n", __func__);
+ err = -ENOMEM;
-+ goto out_unmap_mask;
++ goto out_free_key;
+ }
+
+ for (i = 0; i < dpaa2_eth_tc_count(priv); i++) {
-+ key = cpu_to_be16(i << VLAN_PRIO_SHIFT);
++ *key = cpu_to_be16(i << VLAN_PRIO_SHIFT);
++
+ dma_sync_single_for_device(dev, key_params.key_iova,
-+ sizeof(key), DMA_TO_DEVICE);
++ key_size, DMA_TO_DEVICE);
+
+ err = dpni_add_qos_entry(priv->mc_io, 0, priv->mc_token,
+ &key_params, i, j++);
+ if (err) {
+ dev_err(dev, "dpni_add_qos_entry failed: %d\n", err);
-+ goto out_unmap;
++ goto out_remove;
+ }
+ }
+
+ priv->vlan_clsf_set = true;
++ dev_dbg(dev, "Vlan PCP QoS classification set\n");
++ goto out_cleanup;
+
-+out_unmap:
-+ dma_unmap_single(dev, key_params.key_iova, sizeof(key), DMA_TO_DEVICE);
-+out_unmap_mask:
++out_remove:
++ for (j = 0; j < i; j++) {
++ *key = cpu_to_be16(j << VLAN_PRIO_SHIFT);
++
++ dma_sync_single_for_device(dev, key_params.key_iova, key_size,
++ DMA_TO_DEVICE);
++
++ err = dpni_remove_qos_entry(priv->mc_io, 0, priv->mc_token,
++ &key_params);
++ if (err)
++ dev_err(dev, "dpni_remove_qos_entry failed: %d\n", err);
++ }
++
++out_cleanup:
++ dma_unmap_single(dev, key_params.key_iova, key_size, DMA_TO_DEVICE);
++out_free_key:
++ kfree(key);
++out_cleanup_mask:
+ if (key_params.mask_iova)
-+ dma_unmap_single(dev, key_params.mask_iova, sizeof(mask),
++ dma_unmap_single(dev, key_params.mask_iova, key_size,
+ DMA_TO_DEVICE);
++out_free_mask:
++ kfree(mask);
+out_free:
+ kfree(params_iova);
+ return err;
@@ -4373,8 +6051,17 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ struct dpni_congestion_notification_cfg notification_cfg = {0};
+ struct dpni_link_state state = {0};
+ struct dpni_link_cfg cfg = {0};
++ struct ieee_pfc old_pfc;
+ int err = 0, i;
+
++ if (dpaa2_eth_tc_count(priv) == 1) {
++ netdev_dbg(net_dev, "DPNI has 1 TC, PFC configuration N/A\n");
++ return 0;
++ }
++
++ /* Zero out pfc_enabled prios greater than tc_count */
++ pfc->pfc_en &= (1 << dpaa2_eth_tc_count(priv)) - 1;
++
+ if (priv->pfc.pfc_en == pfc->pfc_en)
+ /* Same enabled mask, nothing to be done */
+ return 0;
@@ -4402,11 +6089,12 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ return err;
+ }
+
++ memcpy(&old_pfc, &priv->pfc, sizeof(priv->pfc));
+ memcpy(&priv->pfc, pfc, sizeof(priv->pfc));
+
+ err = set_rx_taildrop(priv);
+ if (err)
-+ return err;
++ goto out_restore_config;
+
+ /* configure congestion notifications */
+ notification_cfg.notification_mode = DPNI_CONG_OPT_FLOW_CONTROL;
@@ -4430,11 +6118,19 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ if (err) {
+ netdev_err(net_dev, "Error %d setting congestion notif",
+ err);
-+ return err;
++ goto out_restore_config;
+ }
++
++ netdev_dbg(net_dev, "%s congestion notifications for tc %d\n",
++ (notification_cfg.threshold_entry ?
++ "Enabled" : "Disabled"), i);
+ }
+
+ return 0;
++
++out_restore_config:
++ memcpy(&priv->pfc, &old_pfc, sizeof(priv->pfc));
++ return err;
+}
+
+static u8 dpaa2_eth_dcbnl_getdcbx(struct net_device *net_dev)
@@ -4461,7 +6157,11 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ *cap = true;
+ break;
+ case DCB_CAP_ATTR_PFC_TCS:
-+ *cap = 1 << dpaa2_eth_tc_count(priv);
++ /* bitmap where each bit represents a number of traffic
++ * classes the device can be configured to use for Priority
++ * Flow Control
++ */
++ *cap = 1 << (dpaa2_eth_tc_count(priv) - 1);
+ break;
+ case DCB_CAP_ATTR_DCBX:
+ *cap = priv->dcbx_mode;
@@ -4511,8 +6211,10 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ err = fsl_mc_portal_allocate(dpni_dev, FSL_MC_IO_ATOMIC_CONTEXT_PORTAL,
+ &priv->mc_io);
+ if (err) {
-+ dev_dbg(dev, "MC portal allocation failed\n");
-+ err = -EPROBE_DEFER;
++ if (err == -ENXIO)
++ err = -EPROBE_DEFER;
++ else
++ dev_err(dev, "MC portal allocation failed\n");
+ goto err_portal_alloc;
+ }
+
@@ -4522,11 +6224,8 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ goto err_dpni_setup;
+
+ err = setup_dpio(priv);
-+ if (err) {
-+ dev_info(dev, "Defer probing as no DPIO available\n");
-+ err = -EPROBE_DEFER;
++ if (err)
+ goto err_dpio_setup;
-+ }
+
+ setup_fqs(priv);
+
@@ -4552,15 +6251,6 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ goto err_alloc_percpu_extras;
+ }
+
-+ snprintf(net_dev->name, IFNAMSIZ, "ni%d", dpni_dev->obj_desc.id);
-+ if (!dev_valid_name(net_dev->name)) {
-+ dev_warn(&net_dev->dev,
-+ "netdevice name \"%s\" cannot be used, reverting to default..\n",
-+ net_dev->name);
-+ dev_alloc_name(net_dev, "eth%d");
-+ dev_warn(&net_dev->dev, "using name \"%s\"\n", net_dev->name);
-+ }
-+
+ err = netdev_init(net_dev);
+ if (err)
+ goto err_netdev_init;
@@ -4651,12 +6341,11 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+#endif
+ dpaa2_eth_sysfs_remove(&net_dev->dev);
+
++ unregister_netdev(net_dev);
++
+ disable_ch_napi(priv);
+ del_ch_napi(priv);
+
-+ unregister_netdev(net_dev);
-+ dev_info(net_dev->dev.parent, "Removed interface %s\n", net_dev->name);
-+
+ if (priv->do_link_poll)
+ kthread_stop(priv->poll_thread);
+ else
@@ -4674,6 +6363,8 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ dev_set_drvdata(dev, NULL);
+ free_netdev(net_dev);
+
++ dev_dbg(net_dev->dev.parent, "Removed interface %s\n", net_dev->name);
++
+ return 0;
+}
+
@@ -4702,26 +6393,36 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+
+ dpaa2_eth_dbg_init();
+ err = fsl_mc_driver_register(&dpaa2_eth_driver);
-+ if (err) {
-+ dpaa2_eth_dbg_exit();
-+ return err;
-+ }
++ if (err)
++ goto out_debugfs_err;
++
++ err = dpaa2_ceetm_register();
++ if (err)
++ goto out_ceetm_err;
+
+ return 0;
++
++out_ceetm_err:
++ fsl_mc_driver_unregister(&dpaa2_eth_driver);
++out_debugfs_err:
++ dpaa2_eth_dbg_exit();
++ return err;
+}
+
+static void __exit dpaa2_eth_driver_exit(void)
+{
-+ dpaa2_eth_dbg_exit();
++ dpaa2_ceetm_unregister();
+ fsl_mc_driver_unregister(&dpaa2_eth_driver);
++ dpaa2_eth_dbg_exit();
+}
+
+module_init(dpaa2_eth_driver_init);
+module_exit(dpaa2_eth_driver_exit);
--- /dev/null
+++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.h
-@@ -0,0 +1,499 @@
-+/* Copyright 2014-2015 Freescale Semiconductor Inc.
+@@ -0,0 +1,601 @@
++/* Copyright 2014-2016 Freescale Semiconductor Inc.
++ * Copyright 2016 NXP
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
@@ -4755,16 +6456,20 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+#ifndef __DPAA2_ETH_H
+#define __DPAA2_ETH_H
+
-+#include <linux/atomic.h>
+#include <linux/dcbnl.h>
+#include <linux/netdevice.h>
+#include <linux/if_vlan.h>
++
+#include "../../fsl-mc/include/dpaa2-io.h"
++#include "../../fsl-mc/include/dpaa2-fd.h"
+#include "dpni.h"
-+#include "net.h"
++#include "dpni-cmd.h"
+
++#include "dpaa2-eth-trace.h"
+#include "dpaa2-eth-debugfs.h"
+
++#define DPAA2_WRIOP_VERSION(x, y, z) ((x) << 10 | (y) << 5 | (z) << 0)
++
+#define DPAA2_ETH_STORE_SIZE 16
+
+/* We set a max threshold for how many Tx confirmations we should process
@@ -4805,58 +6510,61 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ * to accommodate the buffer refill delay.
+ */
+#define DPAA2_ETH_MAX_FRAMES_PER_QUEUE (DPAA2_ETH_TAILDROP_THRESH / 64)
-+#define DPAA2_ETH_NUM_BUFS_TD (DPAA2_ETH_MAX_FRAMES_PER_QUEUE + 256)
-+#define DPAA2_ETH_REFILL_THRESH_TD \
-+ (DPAA2_ETH_NUM_BUFS_TD - DPAA2_ETH_BUFS_PER_CMD)
++#define DPAA2_ETH_NUM_BUFS_PER_CH (DPAA2_ETH_MAX_FRAMES_PER_QUEUE + 256)
++#define DPAA2_ETH_REFILL_THRESH(priv) \
++ ((priv)->max_bufs_per_ch - DPAA2_ETH_BUFS_PER_CMD)
+
-+/* Buffer quota per queue to use when flow control is active. */
++/* Global buffer quota in case flow control is enabled */
+#define DPAA2_ETH_NUM_BUFS_FC 256
+
-+/* Hardware requires alignment for ingress/egress buffer addresses
-+ * and ingress buffer lengths.
-+ */
-+#define DPAA2_ETH_RX_BUF_SIZE 2048
++/* Hardware requires alignment for ingress/egress buffer addresses */
+#define DPAA2_ETH_TX_BUF_ALIGN 64
++
++/* Due to a limitation in WRIOP 1.0.0, the RX buffer data must be aligned
++ * to 256B. For newer revisions, the requirement is only for 64B alignment
++ */
++#define DPAA2_ETH_RX_BUF_ALIGN_REV1 256
+#define DPAA2_ETH_RX_BUF_ALIGN 64
-+#define DPAA2_ETH_RX_BUF_ALIGN_V1 256
-+#define DPAA2_ETH_NEEDED_HEADROOM(p_priv) \
-+ ((p_priv)->tx_data_offset + DPAA2_ETH_TX_BUF_ALIGN - HH_DATA_MOD)
+
-+/* rx_extra_head prevents reallocations in L3 processing. */
++#define DPAA2_ETH_RX_BUF_SIZE 2048
+#define DPAA2_ETH_SKB_SIZE \
-+ (DPAA2_ETH_RX_BUF_SIZE + \
-+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
-+
-+/* Hardware only sees DPAA2_ETH_RX_BUF_SIZE, but we need to allocate ingress
-+ * buffers large enough to allow building an skb around them and also account
-+ * for alignment restrictions.
-+ */
-+#define DPAA2_ETH_BUF_RAW_SIZE(p_priv) \
-+ (DPAA2_ETH_SKB_SIZE + \
-+ (p_priv)->rx_buf_align)
++ (DPAA2_ETH_RX_BUF_SIZE + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
+
+/* PTP nominal frequency 1GHz */
+#define DPAA2_PTP_NOMINAL_FREQ_PERIOD_NS 1
+
++/* Hardware annotation area in RX/TX buffers */
++#define DPAA2_ETH_RX_HWA_SIZE 64
++#define DPAA2_ETH_TX_HWA_SIZE 128
++
+/* We are accommodating a skb backpointer and some S/G info
+ * in the frame's software annotation. The hardware
+ * options are either 0 or 64, so we choose the latter.
+ */
+#define DPAA2_ETH_SWA_SIZE 64
+
-+/* Extra headroom space requested to hardware, in order to make sure there's
-+ * no realloc'ing in forwarding scenarios
++/* We store different information in the software annotation area of a Tx frame
++ * based on what type of frame it is
+ */
-+#define DPAA2_ETH_RX_HEAD_ROOM \
-+ (DPAA2_ETH_TX_HWA_SIZE - DPAA2_ETH_RX_HWA_SIZE + \
-+ DPAA2_ETH_TX_BUF_ALIGN)
++enum dpaa2_eth_swa_type {
++ DPAA2_ETH_SWA_SINGLE,
++ DPAA2_ETH_SWA_SG,
++};
+
+/* Must keep this struct smaller than DPAA2_ETH_SWA_SIZE */
+struct dpaa2_eth_swa {
-+ struct sk_buff *skb;
-+ struct scatterlist *scl;
-+ int num_sg;
-+ int num_dma_bufs;
++ enum dpaa2_eth_swa_type type;
++ union {
++ struct {
++ struct sk_buff *skb;
++ } single;
++ struct {
++ struct sk_buff *skb;
++ struct scatterlist *scl;
++ int num_sg;
++ int sgt_size;
++ } sg;
++ };
+};
+
+/* Annotation valid bits in FD FRC */
@@ -4876,22 +6584,16 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+/* Annotation bits in FD CTRL */
+#define DPAA2_FD_CTRL_ASAL 0x00020000 /* ASAL = 128 */
+
-+/* Size of hardware annotation area based on the current buffer layout
-+ * configuration
-+ */
-+#define DPAA2_ETH_RX_HWA_SIZE 64
-+#define DPAA2_ETH_TX_HWA_SIZE 128
-+
+/* Frame annotation status */
+struct dpaa2_fas {
+ u8 reserved;
+ u8 ppid;
+ __le16 ifpid;
+ __le32 status;
-+} __packed;
++};
+
+/* Frame annotation status word is located in the first 8 bytes
-+ * of the buffer's hardware annotation area
++ * of the buffer's hardware annoatation area
+ */
+#define DPAA2_FAS_OFFSET 0
+#define DPAA2_FAS_SIZE (sizeof(struct dpaa2_fas))
@@ -4910,21 +6612,31 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+};
+
+#define DPAA2_FAEAD_A2V 0x20000000
++#define DPAA2_FAEAD_A4V 0x08000000
+#define DPAA2_FAEAD_UPDV 0x00001000
++#define DPAA2_FAEAD_EBDDV 0x00002000
+#define DPAA2_FAEAD_UPD 0x00000010
+
-+/* accessors for the hardware annotation fields that we use */
-+#define dpaa2_eth_get_hwa(buf_addr) \
-+ ((void *)(buf_addr) + DPAA2_ETH_SWA_SIZE)
++/* Accessors for the hardware annotation fields that we use */
++static inline void *dpaa2_get_hwa(void *buf_addr, bool swa)
++{
++ return buf_addr + (swa ? DPAA2_ETH_SWA_SIZE : 0);
++}
+
-+#define dpaa2_eth_get_fas(buf_addr) \
-+ (struct dpaa2_fas *)(dpaa2_eth_get_hwa(buf_addr) + DPAA2_FAS_OFFSET)
++static inline struct dpaa2_fas *dpaa2_get_fas(void *buf_addr, bool swa)
++{
++ return dpaa2_get_hwa(buf_addr, swa) + DPAA2_FAS_OFFSET;
++}
+
-+#define dpaa2_eth_get_ts(buf_addr) \
-+ (u64 *)(dpaa2_eth_get_hwa(buf_addr) + DPAA2_TS_OFFSET)
++static inline u64 *dpaa2_get_ts(void *buf_addr, bool swa)
++{
++ return dpaa2_get_hwa(buf_addr, swa) + DPAA2_TS_OFFSET;
++}
+
-+#define dpaa2_eth_get_faead(buf_addr) \
-+ (struct dpaa2_faead *)(dpaa2_eth_get_hwa(buf_addr) + DPAA2_FAEAD_OFFSET)
++static inline struct dpaa2_faead *dpaa2_get_faead(void *buf_addr, bool swa)
++{
++ return dpaa2_get_hwa(buf_addr, swa) + DPAA2_FAEAD_OFFSET;
++}
+
+/* Error and status bits in the frame annotation status word */
+/* Debug frame, otherwise supposed to be discarded */
@@ -4958,24 +6670,19 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+/* L4 csum error */
+#define DPAA2_FAS_L4CE 0x00000001
+/* Possible errors on the ingress path */
-+#define DPAA2_FAS_RX_ERR_MASK ((DPAA2_FAS_KSE) | \
-+ (DPAA2_FAS_EOFHE) | \
-+ (DPAA2_FAS_MNLE) | \
-+ (DPAA2_FAS_TIDE) | \
-+ (DPAA2_FAS_PIEE) | \
-+ (DPAA2_FAS_FLE) | \
-+ (DPAA2_FAS_FPE) | \
-+ (DPAA2_FAS_PTE) | \
-+ (DPAA2_FAS_ISP) | \
-+ (DPAA2_FAS_PHE) | \
-+ (DPAA2_FAS_BLE) | \
-+ (DPAA2_FAS_L3CE) | \
-+ (DPAA2_FAS_L4CE))
-+/* Tx errors */
-+#define DPAA2_FAS_TX_ERR_MASK ((DPAA2_FAS_KSE) | \
-+ (DPAA2_FAS_EOFHE) | \
-+ (DPAA2_FAS_MNLE) | \
-+ (DPAA2_FAS_TIDE))
++#define DPAA2_FAS_RX_ERR_MASK (DPAA2_FAS_KSE | \
++ DPAA2_FAS_EOFHE | \
++ DPAA2_FAS_MNLE | \
++ DPAA2_FAS_TIDE | \
++ DPAA2_FAS_PIEE | \
++ DPAA2_FAS_FLE | \
++ DPAA2_FAS_FPE | \
++ DPAA2_FAS_PTE | \
++ DPAA2_FAS_ISP | \
++ DPAA2_FAS_PHE | \
++ DPAA2_FAS_BLE | \
++ DPAA2_FAS_L3CE | \
++ DPAA2_FAS_L4CE)
+
+/* Time in milliseconds between link state updates */
+#define DPAA2_ETH_LINK_STATE_REFRESH 1000
@@ -4984,14 +6691,15 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ * Value determined empirically, in order to minimize the number
+ * of frames dropped on Tx
+ */
-+#define DPAA2_ETH_ENQUEUE_RETRIES 10
++#define DPAA2_ETH_ENQUEUE_RETRIES 10
+
+/* Tx congestion entry & exit thresholds, in number of bytes.
+ * We allow a maximum of 512KB worth of frames pending processing on the Tx
+ * queues of an interface
+ */
-+#define DPAA2_ETH_TX_CONG_ENTRY_THRESH (512 * 1024)
-+#define DPAA2_ETH_TX_CONG_EXIT_THRESH (DPAA2_ETH_TX_CONG_ENTRY_THRESH * 9/10)
++#define DPAA2_ETH_TX_CONG_ENTRY_THRESH (512 * 1024)
++#define DPAA2_ETH_TX_CONG_EXIT_THRESH \
++ (DPAA2_ETH_TX_CONG_ENTRY_THRESH * 9 / 10)
+
+/* Driver statistics, other than those in struct rtnl_link_stats64.
+ * These are usually collected per-CPU and aggregated by ethtool.
@@ -5001,6 +6709,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ __u64 tx_conf_bytes;
+ __u64 tx_sg_frames;
+ __u64 tx_sg_bytes;
++ __u64 tx_reallocs;
+ __u64 rx_sg_frames;
+ __u64 rx_sg_bytes;
+ /* Enqueues retried due to portal busy */
@@ -5027,7 +6736,6 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ __u64 pull_err;
+};
+
-+#define DPAA2_ETH_MAX_DPCONS NR_CPUS
+#define DPAA2_ETH_MAX_TCS 8
+
+/* Maximum number of queues associated with a DPNI */
@@ -5038,6 +6746,8 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ DPAA2_ETH_MAX_TX_QUEUES + \
+ DPAA2_ETH_MAX_RX_ERR_QUEUES)
+
++#define DPAA2_ETH_MAX_DPCONS 16
++
+enum dpaa2_eth_fq_type {
+ DPAA2_RX_FQ = 0,
+ DPAA2_TX_CONF_FQ,
@@ -5068,12 +6778,15 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ struct fsl_mc_device *dpcon;
+ int dpcon_id;
+ int ch_id;
-+ int dpio_id;
+ struct napi_struct napi;
++ struct dpaa2_io *dpio;
+ struct dpaa2_io_store *store;
+ struct dpaa2_eth_priv *priv;
+ int buf_count;
+ struct dpaa2_eth_ch_stats stats;
++ struct bpf_prog *xdp_prog;
++ u64 rel_buf_array[DPAA2_ETH_BUFS_PER_CMD];
++ u8 rel_buf_cnt;
+};
+
+struct dpaa2_eth_cls_rule {
@@ -5081,60 +6794,51 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ bool in_use;
+};
+
-+struct dpaa2_eth_hash_fields {
++struct dpaa2_eth_dist_fields {
+ u64 rxnfc_field;
+ enum net_prot cls_prot;
+ int cls_field;
+ int offset;
+ int size;
++ u32 id;
+};
+
+/* Driver private data */
+struct dpaa2_eth_priv {
+ struct net_device *net_dev;
-+
+ /* Standard statistics */
+ struct rtnl_link_stats64 __percpu *percpu_stats;
+ /* Extra stats, in addition to the ones known by the kernel */
+ struct dpaa2_eth_drv_stats __percpu *percpu_extras;
-+ struct iommu_domain *iommu_domain;
-+
+ bool ts_tx_en; /* Tx timestamping enabled */
+ bool ts_rx_en; /* Rx timestamping enabled */
-+
+ u16 tx_data_offset;
-+ u16 rx_buf_align;
-+
+ u16 bpid;
+ u16 tx_qdid;
-+
-+ int tx_pause_frames;
-+ int num_bufs;
++ u16 rx_buf_align;
++ struct iommu_domain *iommu_domain;
++ int max_bufs_per_ch;
+ int refill_thresh;
++ bool has_xdp_prog;
+
-+ /* Tx congestion notifications are written here */
-+ void *cscn_mem;
++ void *cscn_mem; /* Tx congestion notifications are written here */
+ void *cscn_unaligned;
+ dma_addr_t cscn_dma;
+
+ u8 num_fqs;
-+ /* Tx queues are at the beginning of the array */
+ struct dpaa2_eth_fq fq[DPAA2_ETH_MAX_QUEUES];
+
+ u8 num_channels;
+ struct dpaa2_eth_channel *channel[DPAA2_ETH_MAX_DPCONS];
+
-+ int dpni_id;
+ struct dpni_attr dpni_attrs;
++ u16 dpni_ver_major;
++ u16 dpni_ver_minor;
+ struct fsl_mc_device *dpbp_dev;
+
+ struct fsl_mc_io *mc_io;
-+ /* SysFS-controlled affinity mask for TxConf FQs */
-+ struct cpumask txconf_cpumask;
+ /* Cores which have an affine DPIO/DPCON.
-+ * This is the cpu set on which Rx frames are processed;
-+ * Tx confirmation frames are processed on a subset of this,
-+ * depending on user settings.
++ * This is the cpu set on which Rx and Tx conf frames are processed
+ */
+ struct cpumask dpio_cpumask;
+
@@ -5144,25 +6848,52 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ bool do_link_poll;
+ struct task_struct *poll_thread;
+
-+ struct dpaa2_eth_hash_fields *hash_fields;
-+ u8 num_hash_fields;
++ /* Rx distribution (hash and flow steering) header fields
++ * supported by the driver
++ */
++ struct dpaa2_eth_dist_fields *dist_fields;
++ u8 num_dist_fields;
+ /* enabled ethtool hashing bits */
-+ u64 rx_flow_hash;
-+
++ u64 rx_hash_fields;
+#ifdef CONFIG_FSL_DPAA2_ETH_DEBUGFS
+ struct dpaa2_debugfs dbg;
+#endif
-+
+ /* array of classification rules */
+ struct dpaa2_eth_cls_rule *cls_rule;
-+
+ struct dpni_tx_shaping_cfg shaping_cfg;
+
+ u8 dcbx_mode;
+ struct ieee_pfc pfc;
+ bool vlan_clsf_set;
++ bool tx_pause_frames;
++
++ bool ceetm_en;
++};
++
++enum dpaa2_eth_rx_dist {
++ DPAA2_ETH_RX_DIST_HASH,
++ DPAA2_ETH_RX_DIST_FS,
++ DPAA2_ETH_RX_DIST_LEGACY
+};
+
++/* Supported Rx distribution field ids */
++#define DPAA2_ETH_DIST_ETHSRC BIT(0)
++#define DPAA2_ETH_DIST_ETHDST BIT(1)
++#define DPAA2_ETH_DIST_ETHTYPE BIT(2)
++#define DPAA2_ETH_DIST_VLAN BIT(3)
++#define DPAA2_ETH_DIST_IPSRC BIT(4)
++#define DPAA2_ETH_DIST_IPDST BIT(5)
++#define DPAA2_ETH_DIST_IPPROTO BIT(6)
++#define DPAA2_ETH_DIST_L4SRC BIT(7)
++#define DPAA2_ETH_DIST_L4DST BIT(8)
++#define DPAA2_ETH_DIST_ALL (~0U)
++
++/* Default Rx hash key */
++#define DPAA2_ETH_DIST_DEFAULT_HASH \
++ (DPAA2_ETH_DIST_IPPROTO | \
++ DPAA2_ETH_DIST_IPSRC | DPAA2_ETH_DIST_IPDST | \
++ DPAA2_ETH_DIST_L4SRC | DPAA2_ETH_DIST_L4DST)
++
+#define dpaa2_eth_hash_enabled(priv) \
+ ((priv)->dpni_attrs.num_queues > 1)
+
@@ -5175,12 +6906,74 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+#define dpaa2_eth_fs_count(priv) \
+ ((priv)->dpni_attrs.fs_entries)
+
-+/* size of DMA memory used to pass configuration to classifier, in bytes */
++/* Required by struct dpni_rx_tc_dist_cfg::key_cfg_iova */
+#define DPAA2_CLASSIFIER_DMA_SIZE 256
+
+extern const struct ethtool_ops dpaa2_ethtool_ops;
+extern const char dpaa2_eth_drv_version[];
+
++static inline int dpaa2_eth_cmp_dpni_ver(struct dpaa2_eth_priv *priv,
++ u16 ver_major, u16 ver_minor)
++{
++ if (priv->dpni_ver_major == ver_major)
++ return priv->dpni_ver_minor - ver_minor;
++ return priv->dpni_ver_major - ver_major;
++}
++
++#define DPNI_DIST_KEY_VER_MAJOR 7
++#define DPNI_DIST_KEY_VER_MINOR 5
++
++static inline bool dpaa2_eth_has_legacy_dist(struct dpaa2_eth_priv *priv)
++{
++ return (dpaa2_eth_cmp_dpni_ver(priv, DPNI_DIST_KEY_VER_MAJOR,
++ DPNI_DIST_KEY_VER_MINOR) < 0);
++}
++
++/* Hardware only sees DPAA2_ETH_RX_BUF_SIZE, but the skb built around
++ * the buffer also needs space for its shared info struct, and we need
++ * to allocate enough to accommodate hardware alignment restrictions
++ */
++static inline unsigned int dpaa2_eth_buf_raw_size(struct dpaa2_eth_priv *priv)
++{
++ return DPAA2_ETH_SKB_SIZE + priv->rx_buf_align;
++}
++
++/* Total headroom needed by the hardware in Tx frame buffers */
++static inline unsigned int
++dpaa2_eth_needed_headroom(struct dpaa2_eth_priv *priv, struct sk_buff *skb)
++{
++ unsigned int headroom = DPAA2_ETH_SWA_SIZE;
++
++ /* If we don't have an skb (e.g. XDP buffer), we only need space for
++ * the software annotation area
++ */
++ if (!skb)
++ return headroom;
++
++ /* For non-linear skbs we have no headroom requirement, as we build a
++ * SG frame with a newly allocated SGT buffer
++ */
++ if (skb_is_nonlinear(skb))
++ return 0;
++
++ /* If we have Tx timestamping, need 128B hardware annotation */
++ if (priv->ts_tx_en && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)
++ headroom += DPAA2_ETH_TX_HWA_SIZE;
++
++ return headroom;
++}
++
++/* Extra headroom space requested to hardware, in order to make sure there's
++ * no realloc'ing in forwarding scenarios. We need to reserve enough space
++ * such that we can accommodate the maximum required Tx offset and alignment
++ * in the ingress frame buffer
++ */
++static inline unsigned int dpaa2_eth_rx_headroom(struct dpaa2_eth_priv *priv)
++{
++ return priv->tx_data_offset + DPAA2_ETH_TX_BUF_ALIGN -
++ DPAA2_ETH_RX_HWA_SIZE;
++}
++
+static inline int dpaa2_eth_queue_count(struct dpaa2_eth_priv *priv)
+{
+ return priv->dpni_attrs.num_queues;
@@ -5216,14 +7009,24 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ return DPAA2_ETH_TD_QUEUE;
+}
+
++static inline int dpaa2_eth_ch_count(struct dpaa2_eth_priv *priv)
++{
++ return 1;
++}
++
+void check_cls_support(struct dpaa2_eth_priv *priv);
+
+int set_rx_taildrop(struct dpaa2_eth_priv *priv);
++
++int dpaa2_eth_set_dist_key(struct dpaa2_eth_priv *priv,
++ enum dpaa2_eth_rx_dist type, u32 key_fields);
++
+#endif /* __DPAA2_H */
--- /dev/null
+++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-ethtool.c
-@@ -0,0 +1,864 @@
-+/* Copyright 2014-2015 Freescale Semiconductor Inc.
+@@ -0,0 +1,878 @@
++/* Copyright 2014-2016 Freescale Semiconductor Inc.
++ * Copyright 2016-2017 NXP
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
@@ -5257,49 +7060,44 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+#include "dpni.h" /* DPNI_LINK_OPT_* */
+#include "dpaa2-eth.h"
+
-+/* To be kept in sync with dpni_statistics */
++/* To be kept in sync with DPNI statistics */
+static char dpaa2_ethtool_stats[][ETH_GSTRING_LEN] = {
-+ "rx frames",
-+ "rx bytes",
-+ "rx mcast frames",
-+ "rx mcast bytes",
-+ "rx bcast frames",
-+ "rx bcast bytes",
-+ "tx frames",
-+ "tx bytes",
-+ "tx mcast frames",
-+ "tx mcast bytes",
-+ "tx bcast frames",
-+ "tx bcast bytes",
-+ "rx filtered frames",
-+ "rx discarded frames",
-+ "rx nobuffer discards",
-+ "tx discarded frames",
-+ "tx confirmed frames",
++ "[hw] rx frames",
++ "[hw] rx bytes",
++ "[hw] rx mcast frames",
++ "[hw] rx mcast bytes",
++ "[hw] rx bcast frames",
++ "[hw] rx bcast bytes",
++ "[hw] tx frames",
++ "[hw] tx bytes",
++ "[hw] tx mcast frames",
++ "[hw] tx mcast bytes",
++ "[hw] tx bcast frames",
++ "[hw] tx bcast bytes",
++ "[hw] rx filtered frames",
++ "[hw] rx discarded frames",
++ "[hw] rx nobuffer discards",
++ "[hw] tx discarded frames",
++ "[hw] tx confirmed frames",
+};
+
+#define DPAA2_ETH_NUM_STATS ARRAY_SIZE(dpaa2_ethtool_stats)
+
-+/* To be kept in sync with 'struct dpaa2_eth_drv_stats' */
+static char dpaa2_ethtool_extras[][ETH_GSTRING_LEN] = {
+ /* per-cpu stats */
-+
-+ "tx conf frames",
-+ "tx conf bytes",
-+ "tx sg frames",
-+ "tx sg bytes",
-+ "rx sg frames",
-+ "rx sg bytes",
-+ /* how many times we had to retry the enqueue command */
-+ "enqueue portal busy",
-+
++ "[drv] tx conf frames",
++ "[drv] tx conf bytes",
++ "[drv] tx sg frames",
++ "[drv] tx sg bytes",
++ "[drv] tx realloc frames",
++ "[drv] rx sg frames",
++ "[drv] rx sg bytes",
++ "[drv] enqueue portal busy",
+ /* Channel stats */
-+ /* How many times we had to retry the volatile dequeue command */
-+ "dequeue portal busy",
-+ "channel pull errors",
-+ /* Number of notifications received */
-+ "cdan",
-+ "tx congestion state",
++ "[drv] dequeue portal busy",
++ "[drv] channel pull errors",
++ "[drv] cdan",
++ "[drv] tx congestion state",
+#ifdef CONFIG_FSL_QBMAN_DEBUG
+ /* FQ stats */
+ "rx pending frames",
@@ -5315,16 +7113,22 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+static void dpaa2_eth_get_drvinfo(struct net_device *net_dev,
+ struct ethtool_drvinfo *drvinfo)
+{
++ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
++
+ strlcpy(drvinfo->driver, KBUILD_MODNAME, sizeof(drvinfo->driver));
+ strlcpy(drvinfo->version, dpaa2_eth_drv_version,
+ sizeof(drvinfo->version));
-+ strlcpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version));
++
++ snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
++ "%u.%u", priv->dpni_ver_major, priv->dpni_ver_minor);
++
+ strlcpy(drvinfo->bus_info, dev_name(net_dev->dev.parent->parent),
+ sizeof(drvinfo->bus_info));
+}
+
-+static int dpaa2_eth_get_settings(struct net_device *net_dev,
-+ struct ethtool_cmd *cmd)
++static int
++dpaa2_eth_get_link_ksettings(struct net_device *net_dev,
++ struct ethtool_link_ksettings *link_settings)
+{
+ struct dpni_link_state state = {0};
+ int err = 0;
@@ -5332,7 +7136,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+
+ err = dpni_get_link_state(priv->mc_io, 0, priv->mc_token, &state);
+ if (err) {
-+ netdev_err(net_dev, "ERROR %d getting link state", err);
++ netdev_err(net_dev, "ERROR %d getting link state\n", err);
+ goto out;
+ }
+
@@ -5342,39 +7146,52 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ * beyond the DPNI attributes.
+ */
+ if (state.options & DPNI_LINK_OPT_AUTONEG)
-+ cmd->autoneg = AUTONEG_ENABLE;
++ link_settings->base.autoneg = AUTONEG_ENABLE;
+ if (!(state.options & DPNI_LINK_OPT_HALF_DUPLEX))
-+ cmd->duplex = DUPLEX_FULL;
-+ ethtool_cmd_speed_set(cmd, state.rate);
++ link_settings->base.duplex = DUPLEX_FULL;
++ link_settings->base.speed = state.rate;
+
+out:
+ return err;
+}
+
-+static int dpaa2_eth_set_settings(struct net_device *net_dev,
-+ struct ethtool_cmd *cmd)
++#define DPNI_DYNAMIC_LINK_SET_VER_MAJOR 7
++#define DPNI_DYNAMIC_LINK_SET_VER_MINOR 1
++static int
++dpaa2_eth_set_link_ksettings(struct net_device *net_dev,
++ const struct ethtool_link_ksettings *link_settings)
+{
+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+ struct dpni_link_state state = {0};
+ struct dpni_link_cfg cfg = {0};
+ int err = 0;
+
-+ netdev_dbg(net_dev, "Setting link parameters...");
++ /* If using an older MC version, the DPNI must be down
++ * in order to be able to change link settings. Taking steps to let
++ * the user know that.
++ */
++ if (dpaa2_eth_cmp_dpni_ver(priv, DPNI_DYNAMIC_LINK_SET_VER_MAJOR,
++ DPNI_DYNAMIC_LINK_SET_VER_MINOR) < 0) {
++ if (netif_running(net_dev)) {
++ netdev_info(net_dev, "Interface must be brought down first.\n");
++ return -EACCES;
++ }
++ }
+
-+ /* Need to interrogate on link state to get flow control params */
++ /* Need to interrogate link state to get flow control params */
+ err = dpni_get_link_state(priv->mc_io, 0, priv->mc_token, &state);
+ if (err) {
-+ netdev_err(net_dev, "ERROR %d getting link state", err);
++ netdev_err(net_dev, "Error getting link state\n");
+ goto out;
+ }
+
+ cfg.options = state.options;
-+ cfg.rate = ethtool_cmd_speed(cmd);
-+ if (cmd->autoneg == AUTONEG_ENABLE)
++ cfg.rate = link_settings->base.speed;
++ if (link_settings->base.autoneg == AUTONEG_ENABLE)
+ cfg.options |= DPNI_LINK_OPT_AUTONEG;
+ else
+ cfg.options &= ~DPNI_LINK_OPT_AUTONEG;
-+ if (cmd->duplex == DUPLEX_HALF)
++ if (link_settings->base.duplex == DUPLEX_HALF)
+ cfg.options |= DPNI_LINK_OPT_HALF_DUPLEX;
+ else
+ cfg.options &= ~DPNI_LINK_OPT_HALF_DUPLEX;
@@ -5384,7 +7201,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ /* ethtool will be loud enough if we return an error; no point
+ * in putting our own error message on the console by default
+ */
-+ netdev_dbg(net_dev, "ERROR %d setting link cfg", err);
++ netdev_dbg(net_dev, "ERROR %d setting link cfg\n", err);
+
+out:
+ return err;
@@ -5399,13 +7216,13 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+
+ err = dpni_get_link_state(priv->mc_io, 0, priv->mc_token, &state);
+ if (err)
-+ netdev_dbg(net_dev, "ERROR %d getting link state", err);
++ netdev_dbg(net_dev, "Error getting link state\n");
+
-+ /* for now, pause frames autonegotiation is not separate */
++ /* Report general port autonegotiation status */
+ pause->autoneg = !!(state.options & DPNI_LINK_OPT_AUTONEG);
+ pause->rx_pause = !!(state.options & DPNI_LINK_OPT_PAUSE);
+ pause->tx_pause = pause->rx_pause ^
-+ !!(state.options & DPNI_LINK_OPT_ASYM_PAUSE);
++ !!(state.options & DPNI_LINK_OPT_ASYM_PAUSE);
+}
+
+static int dpaa2_eth_set_pauseparam(struct net_device *net_dev,
@@ -5419,7 +7236,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+
+ err = dpni_get_link_state(priv->mc_io, 0, priv->mc_token, &state);
+ if (err) {
-+ netdev_dbg(net_dev, "ERROR %d getting link state", err);
++ netdev_dbg(net_dev, "Error getting link state\n");
+ goto out;
+ }
+
@@ -5428,9 +7245,12 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ current_tx_pause = !!(cfg.options & DPNI_LINK_OPT_PAUSE) ^
+ !!(cfg.options & DPNI_LINK_OPT_ASYM_PAUSE);
+
++ /* We don't support changing pause frame autonegotiation separately
++ * from general port autoneg
++ */
+ if (pause->autoneg != !!(state.options & DPNI_LINK_OPT_AUTONEG))
+ netdev_warn(net_dev,
-+ "WARN: Can't change pause frames autoneg separately\n");
++ "Cannot change pause frame autoneg separately\n");
+
+ if (pause->rx_pause)
+ cfg.options |= DPNI_LINK_OPT_PAUSE;
@@ -5444,23 +7264,19 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+
+ err = dpni_set_link_cfg(priv->mc_io, 0, priv->mc_token, &cfg);
+ if (err) {
-+ /* ethtool will be loud enough if we return an error; no point
-+ * in putting our own error message on the console by default
-+ */
-+ netdev_dbg(net_dev, "ERROR %d setting link cfg", err);
++ netdev_dbg(net_dev, "Error setting link\n");
+ goto out;
+ }
+
-+ /* Enable / disable taildrops if Tx pause frames have changed */
++ /* Enable/disable Rx FQ taildrop if Tx pause frames have changed */
+ if (current_tx_pause == pause->tx_pause)
+ goto out;
+
+ priv->tx_pause_frames = pause->tx_pause;
+ err = set_rx_taildrop(priv);
+ if (err)
-+ netdev_dbg(net_dev, "ERROR %d configuring taildrop", err);
++ netdev_dbg(net_dev, "Error configuring taildrop\n");
+
-+ priv->tx_pause_frames = pause->tx_pause;
+out:
+ return err;
+}
@@ -5501,8 +7317,9 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ struct ethtool_stats *stats,
+ u64 *data)
+{
-+ int i = 0; /* Current index in the data array */
-+ int j = 0, k, err;
++ int i = 0;
++ int j, k, err;
++ int num_cnt;
+ union dpni_statistics dpni_stats;
+
+#ifdef CONFIG_FSL_QBMAN_DEBUG
@@ -5523,38 +7340,22 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ /* Print standard counters, from DPNI statistics */
+ for (j = 0; j <= 2; j++) {
+ err = dpni_get_statistics(priv->mc_io, 0, priv->mc_token,
-+ j, &dpni_stats);
++ j, 0, &dpni_stats);
+ if (err != 0)
-+ netdev_warn(net_dev, "Err %d getting DPNI stats page %d",
-+ err, j);
-+
++ netdev_warn(net_dev, "dpni_get_stats(%d) failed\n", j);
+ switch (j) {
+ case 0:
-+ *(data + i++) = dpni_stats.page_0.ingress_all_frames;
-+ *(data + i++) = dpni_stats.page_0.ingress_all_bytes;
-+ *(data + i++) = dpni_stats.page_0.ingress_multicast_frames;
-+ *(data + i++) = dpni_stats.page_0.ingress_multicast_bytes;
-+ *(data + i++) = dpni_stats.page_0.ingress_broadcast_frames;
-+ *(data + i++) = dpni_stats.page_0.ingress_broadcast_bytes;
-+ break;
++ num_cnt = sizeof(dpni_stats.page_0) / sizeof(u64);
++ break;
+ case 1:
-+ *(data + i++) = dpni_stats.page_1.egress_all_frames;
-+ *(data + i++) = dpni_stats.page_1.egress_all_bytes;
-+ *(data + i++) = dpni_stats.page_1.egress_multicast_frames;
-+ *(data + i++) = dpni_stats.page_1.egress_multicast_bytes;
-+ *(data + i++) = dpni_stats.page_1.egress_broadcast_frames;
-+ *(data + i++) = dpni_stats.page_1.egress_broadcast_bytes;
-+ break;
++ num_cnt = sizeof(dpni_stats.page_1) / sizeof(u64);
++ break;
+ case 2:
-+ *(data + i++) = dpni_stats.page_2.ingress_filtered_frames;
-+ *(data + i++) = dpni_stats.page_2.ingress_discarded_frames;
-+ *(data + i++) = dpni_stats.page_2.ingress_nobuffer_discards;
-+ *(data + i++) = dpni_stats.page_2.egress_discarded_frames;
-+ *(data + i++) = dpni_stats.page_2.egress_confirmed_frames;
-+ break;
-+ default:
-+ break;
++ num_cnt = sizeof(dpni_stats.page_2) / sizeof(u64);
++ break;
+ }
++ for (k = 0; k < num_cnt; k++)
++ *(data + i++) = dpni_stats.raw.counter[k];
+ }
+
+ /* Print per-cpu extra stats */
@@ -5563,10 +7364,8 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ for (j = 0; j < sizeof(*extras) / sizeof(__u64); j++)
+ *((__u64 *)data + i + j) += *((__u64 *)extras + j);
+ }
-+
+ i += j;
+
-+ /* We may be using fewer DPIOs than actual CPUs */
+ for (j = 0; j < priv->num_channels; j++) {
+ ch_stats = &priv->channel[j]->stats;
+ cdan += ch_stats->cdan;
@@ -5617,11 +7416,11 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+{
+ int i, off = 0;
+
-+ for (i = 0; i < priv->num_hash_fields; i++) {
-+ if (priv->hash_fields[i].cls_prot == prot &&
-+ priv->hash_fields[i].cls_field == field)
++ for (i = 0; i < priv->num_dist_fields; i++) {
++ if (priv->dist_fields[i].cls_prot == prot &&
++ priv->dist_fields[i].cls_field == field)
+ return off;
-+ off += priv->hash_fields[i].size;
++ off += priv->dist_fields[i].size;
+ }
+
+ return -1;
@@ -5631,8 +7430,8 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+{
+ u8 i, size = 0;
+
-+ for (i = 0; i < priv->num_hash_fields; i++)
-+ size += priv->hash_fields[i].size;
++ for (i = 0; i < priv->num_dist_fields; i++)
++ size += priv->dist_fields[i].size;
+
+ return size;
+}
@@ -5649,7 +7448,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ key_size);
+ goto disable_fs;
+ }
-+ if (priv->num_hash_fields > DPKG_MAX_NUM_OF_EXTRACTS) {
++ if (priv->num_dist_fields > DPKG_MAX_NUM_OF_EXTRACTS) {
+ dev_info(dev, "Too many key fields (max = %d). Hashing and steering are disabled\n",
+ DPKG_MAX_NUM_OF_EXTRACTS);
+ goto disable_fs;
@@ -5915,8 +7714,8 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ fs->location >= rule_cnt)
+ return -EINVAL;
+
-+ /* When adding a new rule, check if location if available,
-+ * and if not free the existing table entry before inserting
++ /* When adding a new rule, check if location if available
++ * and if not, free the existing table entry before inserting
+ * the new one
+ */
+ if (add && (priv->cls_rule[fs->location].in_use == true))
@@ -6001,6 +7800,22 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ return 0;
+}
+
++static int set_hash(struct net_device *net_dev, u64 data)
++{
++ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
++ u32 key = 0;
++ int i;
++
++ if (data & RXH_DISCARD)
++ return -EOPNOTSUPP;
++
++ for (i = 0; i < priv->num_dist_fields; i++)
++ if (priv->dist_fields[i].rxnfc_field & data)
++ key |= priv->dist_fields[i].id;
++
++ return dpaa2_eth_set_dist_key(priv, DPAA2_ETH_RX_DIST_HASH, key);
++}
++
+static int dpaa2_eth_set_rxnfc(struct net_device *net_dev,
+ struct ethtool_rxnfc *rxnfc)
+{
@@ -6010,11 +7825,12 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ case ETHTOOL_SRXCLSRLINS:
+ err = add_cls(net_dev, &rxnfc->fs);
+ break;
-+
+ case ETHTOOL_SRXCLSRLDEL:
+ err = del_cls(net_dev, rxnfc->fs.location);
+ break;
-+
++ case ETHTOOL_SRXFH:
++ err = set_hash(net_dev, rxnfc->data);
++ break;
+ default:
+ err = -EOPNOTSUPP;
+ }
@@ -6031,12 +7847,12 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+
+ switch (rxnfc->cmd) {
+ case ETHTOOL_GRXFH:
-+ /* we purposely ignore cmd->flow_type, because the hashing key
-+ * is the same (and fixed) for all protocols
++ /* we purposely ignore cmd->flow_type for now, because the
++ * classifier only supports a single set of fields for all
++ * protocols
+ */
-+ rxnfc->data = priv->rx_flow_hash;
++ rxnfc->data = priv->rx_hash_fields;
+ break;
-+
+ case ETHTOOL_GRXRINGS:
+ rxnfc->data = dpaa2_eth_queue_count(priv);
+ break;
@@ -6077,8 +7893,8 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+const struct ethtool_ops dpaa2_ethtool_ops = {
+ .get_drvinfo = dpaa2_eth_get_drvinfo,
+ .get_link = ethtool_op_get_link,
-+ .get_settings = dpaa2_eth_get_settings,
-+ .set_settings = dpaa2_eth_set_settings,
++ .get_link_ksettings = dpaa2_eth_get_link_ksettings,
++ .set_link_ksettings = dpaa2_eth_set_link_ksettings,
+ .get_pauseparam = dpaa2_eth_get_pauseparam,
+ .set_pauseparam = dpaa2_eth_set_pauseparam,
+ .get_sset_count = dpaa2_eth_get_sset_count,
@@ -6268,7 +8084,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+#endif /* __FSL_DPKG_H_ */
--- /dev/null
+++ b/drivers/staging/fsl-dpaa2/ethernet/dpni-cmd.h
-@@ -0,0 +1,658 @@
+@@ -0,0 +1,719 @@
+/* Copyright 2013-2016 Freescale Semiconductor Inc.
+ * Copyright 2016 NXP
+ *
@@ -6304,6 +8120,8 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+#ifndef _FSL_DPNI_CMD_H
+#define _FSL_DPNI_CMD_H
+
++#include "dpni.h"
++
+/* DPNI Version */
+#define DPNI_VER_MAJOR 7
+#define DPNI_VER_MINOR 0
@@ -6344,7 +8162,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+#define DPNI_CMDID_SET_MAX_FRAME_LENGTH DPNI_CMD(0x216)
+#define DPNI_CMDID_GET_MAX_FRAME_LENGTH DPNI_CMD(0x217)
+#define DPNI_CMDID_SET_LINK_CFG DPNI_CMD(0x21A)
-+#define DPNI_CMDID_SET_TX_SHAPING DPNI_CMD(0x21B)
++#define DPNI_CMDID_SET_TX_SHAPING DPNI_CMD_V2(0x21B)
+
+#define DPNI_CMDID_SET_MCAST_PROMISC DPNI_CMD(0x220)
+#define DPNI_CMDID_GET_MCAST_PROMISC DPNI_CMD(0x221)
@@ -6360,11 +8178,13 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+
+#define DPNI_CMDID_SET_QOS_TBL DPNI_CMD(0x240)
+#define DPNI_CMDID_ADD_QOS_ENT DPNI_CMD(0x241)
++#define DPNI_CMDID_REMOVE_QOS_ENT DPNI_CMD(0x242)
+#define DPNI_CMDID_ADD_FS_ENT DPNI_CMD(0x244)
+#define DPNI_CMDID_REMOVE_FS_ENT DPNI_CMD(0x245)
+#define DPNI_CMDID_CLR_FS_ENT DPNI_CMD(0x246)
+
-+#define DPNI_CMDID_GET_STATISTICS DPNI_CMD(0x25D)
++#define DPNI_CMDID_SET_TX_PRIORITIES DPNI_CMD_V2(0x250)
++#define DPNI_CMDID_GET_STATISTICS DPNI_CMD_V2(0x25D)
+#define DPNI_CMDID_RESET_STATISTICS DPNI_CMD(0x25E)
+#define DPNI_CMDID_GET_QUEUE DPNI_CMD(0x25F)
+#define DPNI_CMDID_SET_QUEUE DPNI_CMD(0x260)
@@ -6384,6 +8204,9 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+#define DPNI_CMDID_GET_OFFLOAD DPNI_CMD(0x26B)
+#define DPNI_CMDID_SET_OFFLOAD DPNI_CMD(0x26C)
+
++#define DPNI_CMDID_SET_RX_FS_DIST DPNI_CMD(0x273)
++#define DPNI_CMDID_SET_RX_HASH_DIST DPNI_CMD(0x274)
++
+/* Macros for accessing command fields smaller than 1byte */
+#define DPNI_MASK(field) \
+ GENMASK(DPNI_##field##_SHIFT + DPNI_##field##_SIZE - 1, \
@@ -6578,6 +8401,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+
+struct dpni_cmd_get_statistics {
+ u8 page_number;
++ u8 param;
+};
+
+struct dpni_rsp_get_statistics {
@@ -6610,12 +8434,20 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ __le64 options;
+};
+
++#define DPNI_COUPLED_SHIFT 0
++#define DPNI_COUPLED_SIZE 1
++
+struct dpni_cmd_set_tx_shaping {
+ /* cmd word 0 */
-+ __le16 max_burst_size;
-+ __le16 pad0[3];
++ __le16 tx_cr_max_burst_size;
++ __le16 tx_er_max_burst_size;
++ __le32 pad;
+ /* cmd word 1 */
-+ __le32 rate_limit;
++ __le32 tx_cr_rate_limit;
++ __le32 tx_er_rate_limit;
++ /* cmd word 2 */
++ /* from LSB: coupled:1 */
++ u8 coupled;
+};
+
+struct dpni_cmd_set_max_frame_length {
@@ -6677,6 +8509,24 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ u8 flags;
+};
+
++#define DPNI_SEPARATE_GRP_SHIFT 0
++#define DPNI_SEPARATE_GRP_SIZE 1
++#define DPNI_MODE_1_SHIFT 0
++#define DPNI_MODE_1_SIZE 4
++#define DPNI_MODE_2_SHIFT 4
++#define DPNI_MODE_2_SIZE 4
++
++struct dpni_cmd_set_tx_priorities {
++ __le16 flags;
++ u8 prio_group_A;
++ u8 prio_group_B;
++ __le32 pad0;
++ u8 modes[4];
++ __le32 pad1;
++ __le64 pad2;
++ __le16 delta_bandwidth[8];
++};
++
+#define DPNI_DIST_MODE_SHIFT 0
+#define DPNI_DIST_MODE_SIZE 4
+#define DPNI_MISS_ACTION_SHIFT 4
@@ -6790,45 +8640,45 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+#define DPNI_DISCARD_ON_MISS_SIZE 1
+
+struct dpni_cmd_set_qos_table {
-+ u32 pad;
++ __le32 pad;
+ u8 default_tc;
+ /* only the LSB */
+ u8 discard_on_miss;
-+ u16 pad1[21];
-+ u64 key_cfg_iova;
++ __le16 pad1[21];
++ __le64 key_cfg_iova;
+};
+
+struct dpni_cmd_add_qos_entry {
-+ u16 pad;
++ __le16 pad;
+ u8 tc_id;
+ u8 key_size;
-+ u16 index;
-+ u16 pad2;
-+ u64 key_iova;
-+ u64 mask_iova;
++ __le16 index;
++ __le16 pad2;
++ __le64 key_iova;
++ __le64 mask_iova;
+};
+
+struct dpni_cmd_remove_qos_entry {
+ u8 pad1[3];
+ u8 key_size;
-+ u32 pad2;
-+ u64 key_iova;
-+ u64 mask_iova;
++ __le32 pad2;
++ __le64 key_iova;
++ __le64 mask_iova;
+};
+
+struct dpni_cmd_add_fs_entry {
+ /* cmd word 0 */
-+ u16 options;
++ __le16 options;
+ u8 tc_id;
+ u8 key_size;
-+ u16 index;
-+ u16 flow_id;
++ __le16 index;
++ __le16 flow_id;
+ /* cmd word 1 */
-+ u64 key_iova;
++ __le64 key_iova;
+ /* cmd word 2 */
-+ u64 mask_iova;
++ __le64 mask_iova;
+ /* cmd word 3 */
-+ u64 flc;
++ __le64 flc;
+};
+
+struct dpni_cmd_remove_fs_entry {
@@ -6838,9 +8688,9 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ u8 key_size;
+ __le32 pad1;
+ /* cmd word 1 */
-+ u64 key_iova;
++ __le64 key_iova;
+ /* cmd word 2 */
-+ u64 mask_iova;
++ __le64 mask_iova;
+};
+
+struct dpni_cmd_set_taildrop {
@@ -6878,6 +8728,11 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ __le32 threshold;
+};
+
++struct dpni_rsp_get_api_version {
++ u16 major;
++ u16 minor;
++};
++
+#define DPNI_DEST_TYPE_SHIFT 0
+#define DPNI_DEST_TYPE_SIZE 4
+#define DPNI_CONG_UNITS_SHIFT 4
@@ -6889,18 +8744,18 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ u8 tc;
+ u8 pad[6];
+ /* cmd word 1 */
-+ u32 dest_id;
-+ u16 notification_mode;
++ __le32 dest_id;
++ __le16 notification_mode;
+ u8 dest_priority;
+ /* from LSB: dest_type: 4 units:2 */
+ u8 type_units;
+ /* cmd word 2 */
-+ u64 message_iova;
++ __le64 message_iova;
+ /* cmd word 3 */
-+ u64 message_ctx;
++ __le64 message_ctx;
+ /* cmd word 4 */
-+ u32 threshold_entry;
-+ u32 threshold_exit;
++ __le32 threshold_entry;
++ __le32 threshold_exit;
+};
+
+struct dpni_cmd_get_congestion_notification {
@@ -6911,25 +8766,47 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+
+struct dpni_rsp_get_congestion_notification {
+ /* cmd word 0 */
-+ u64 pad;
++ __le64 pad;
+ /* cmd word 1 */
-+ u32 dest_id;
-+ u16 notification_mode;
++ __le32 dest_id;
++ __le16 notification_mode;
+ u8 dest_priority;
+ /* from LSB: dest_type: 4 units:2 */
+ u8 type_units;
+ /* cmd word 2 */
-+ u64 message_iova;
++ __le64 message_iova;
+ /* cmd word 3 */
-+ u64 message_ctx;
++ __le64 message_ctx;
+ /* cmd word 4 */
-+ u32 threshold_entry;
-+ u32 threshold_exit;
++ __le32 threshold_entry;
++ __le32 threshold_exit;
++};
++
++#define DPNI_RX_FS_DIST_ENABLE_SHIFT 0
++#define DPNI_RX_FS_DIST_ENABLE_SIZE 1
++struct dpni_cmd_set_rx_fs_dist {
++ __le16 dist_size;
++ u8 enable;
++ u8 tc;
++ __le16 miss_flow_id;
++ __le16 pad;
++ __le64 key_cfg_iova;
++};
++
++#define DPNI_RX_HASH_DIST_ENABLE_SHIFT 0
++#define DPNI_RX_HASH_DIST_ENABLE_SIZE 1
++struct dpni_cmd_set_rx_hash_dist {
++ __le16 dist_size;
++ u8 enable;
++ u8 tc;
++ __le32 pad;
++ __le64 key_cfg_iova;
+};
++
+#endif /* _FSL_DPNI_CMD_H */
--- /dev/null
+++ b/drivers/staging/fsl-dpaa2/ethernet/dpni.c
-@@ -0,0 +1,1903 @@
+@@ -0,0 +1,2112 @@
+/* Copyright 2013-2016 Freescale Semiconductor Inc.
+ * Copyright 2016 NXP
+ *
@@ -6962,8 +8839,9 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
-+#include "../../fsl-mc/include/mc-sys.h"
-+#include "../../fsl-mc/include/mc-cmd.h"
++#include <linux/kernel.h>
++#include <linux/errno.h>
++#include <linux/fsl/mc.h>
+#include "dpni.h"
+#include "dpni-cmd.h"
+
@@ -7053,7 +8931,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ int dpni_id,
+ u16 *token)
+{
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+ struct dpni_cmd_open *cmd_params;
+
+ int err;
@@ -7091,7 +8969,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ u32 cmd_flags,
+ u16 token)
+{
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_CLOSE,
@@ -7119,7 +8997,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ u16 token,
+ const struct dpni_pools_cfg *cfg)
+{
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+ struct dpni_cmd_set_pools *cmd_params;
+ int i;
+
@@ -7156,7 +9034,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ u32 cmd_flags,
+ u16 token)
+{
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_ENABLE,
@@ -7179,7 +9057,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ u32 cmd_flags,
+ u16 token)
+{
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_DISABLE,
@@ -7204,7 +9082,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ u16 token,
+ int *en)
+{
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+ struct dpni_rsp_is_enabled *rsp_params;
+ int err;
+
@@ -7237,7 +9115,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ u32 cmd_flags,
+ u16 token)
+{
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_RESET,
@@ -7269,7 +9147,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ u8 irq_index,
+ u8 en)
+{
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+ struct dpni_cmd_set_irq_enable *cmd_params;
+
+ /* prepare command */
@@ -7300,7 +9178,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ u8 irq_index,
+ u8 *en)
+{
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+ struct dpni_cmd_get_irq_enable *cmd_params;
+ struct dpni_rsp_get_irq_enable *rsp_params;
+
@@ -7347,7 +9225,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ u8 irq_index,
+ u32 mask)
+{
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+ struct dpni_cmd_set_irq_mask *cmd_params;
+
+ /* prepare command */
@@ -7381,7 +9259,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ u8 irq_index,
+ u32 *mask)
+{
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+ struct dpni_cmd_get_irq_mask *cmd_params;
+ struct dpni_rsp_get_irq_mask *rsp_params;
+ int err;
@@ -7423,7 +9301,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ u8 irq_index,
+ u32 *status)
+{
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+ struct dpni_cmd_get_irq_status *cmd_params;
+ struct dpni_rsp_get_irq_status *rsp_params;
+ int err;
@@ -7466,7 +9344,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ u8 irq_index,
+ u32 status)
+{
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+ struct dpni_cmd_clear_irq_status *cmd_params;
+
+ /* prepare command */
@@ -7495,7 +9373,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ u16 token,
+ struct dpni_attr *attr)
+{
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+ struct dpni_rsp_get_attr *rsp_params;
+
+ int err;
@@ -7543,7 +9421,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ u16 token,
+ struct dpni_error_cfg *cfg)
+{
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+ struct dpni_cmd_set_errors_behavior *cmd_params;
+
+ /* prepare command */
@@ -7575,7 +9453,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ enum dpni_queue_type qtype,
+ struct dpni_buffer_layout *layout)
+{
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+ struct dpni_cmd_get_buffer_layout *cmd_params;
+ struct dpni_rsp_get_buffer_layout *rsp_params;
+ int err;
@@ -7623,7 +9501,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ enum dpni_queue_type qtype,
+ const struct dpni_buffer_layout *layout)
+{
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+ struct dpni_cmd_set_buffer_layout *cmd_params;
+
+ /* prepare command */
@@ -7665,7 +9543,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ enum dpni_offload type,
+ u32 config)
+{
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+ struct dpni_cmd_set_offload *cmd_params;
+
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_OFFLOAD,
@@ -7684,7 +9562,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ enum dpni_offload type,
+ u32 *config)
+{
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+ struct dpni_cmd_get_offload *cmd_params;
+ struct dpni_rsp_get_offload *rsp_params;
+ int err;
@@ -7726,7 +9604,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ enum dpni_queue_type qtype,
+ u16 *qdid)
+{
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+ struct dpni_cmd_get_qdid *cmd_params;
+ struct dpni_rsp_get_qdid *rsp_params;
+ int err;
@@ -7764,7 +9642,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ u16 token,
+ u16 *data_offset)
+{
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+ struct dpni_rsp_get_tx_data_offset *rsp_params;
+ int err;
+
@@ -7799,7 +9677,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ u16 token,
+ const struct dpni_link_cfg *cfg)
+{
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+ struct dpni_cmd_set_link_cfg *cmd_params;
+
+ /* prepare command */
@@ -7828,7 +9706,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ u16 token,
+ struct dpni_link_state *state)
+{
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+ struct dpni_rsp_get_link_state *rsp_params;
+ int err;
+
@@ -7853,19 +9731,23 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+
+/**
+ * dpni_set_tx_shaping() - Set the transmit shaping
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPNI object
-+ * @tx_shaper: tx shaping configuration
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @tx_cr_shaper: TX committed rate shaping configuration
++ * @tx_er_shaper: TX excess rate shaping configuration
++ * @coupled: Committed and excess rate shapers are coupled
+ *
-+ * Return: '0' on Success; Error code otherwise.
++ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_set_tx_shaping(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
-+ const struct dpni_tx_shaping_cfg *tx_shaper)
++ const struct dpni_tx_shaping_cfg *tx_cr_shaper,
++ const struct dpni_tx_shaping_cfg *tx_er_shaper,
++ int coupled)
+{
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+ struct dpni_cmd_set_tx_shaping *cmd_params;
+
+ /* prepare command */
@@ -7873,8 +9755,13 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ cmd_flags,
+ token);
+ cmd_params = (struct dpni_cmd_set_tx_shaping *)cmd.params;
-+ cmd_params->max_burst_size = cpu_to_le16(tx_shaper->max_burst_size);
-+ cmd_params->rate_limit = cpu_to_le32(tx_shaper->rate_limit);
++ cmd_params->tx_cr_max_burst_size =
++ cpu_to_le16(tx_cr_shaper->max_burst_size);
++ cmd_params->tx_er_max_burst_size =
++ cpu_to_le16(tx_er_shaper->max_burst_size);
++ cmd_params->tx_cr_rate_limit = cpu_to_le32(tx_cr_shaper->rate_limit);
++ cmd_params->tx_er_rate_limit = cpu_to_le32(tx_er_shaper->rate_limit);
++ dpni_set_field(cmd_params->coupled, COUPLED, coupled);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
@@ -7896,7 +9783,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ u16 token,
+ u16 max_frame_length)
+{
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+ struct dpni_cmd_set_max_frame_length *cmd_params;
+
+ /* prepare command */
@@ -7926,7 +9813,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ u16 token,
+ u16 *max_frame_length)
+{
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+ struct dpni_rsp_get_max_frame_length *rsp_params;
+ int err;
+
@@ -7961,7 +9848,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ u16 token,
+ int en)
+{
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+ struct dpni_cmd_set_multicast_promisc *cmd_params;
+
+ /* prepare command */
@@ -7989,7 +9876,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ u16 token,
+ int *en)
+{
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+ struct dpni_rsp_get_multicast_promisc *rsp_params;
+ int err;
+
@@ -8024,7 +9911,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ u16 token,
+ int en)
+{
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+ struct dpni_cmd_set_unicast_promisc *cmd_params;
+
+ /* prepare command */
@@ -8052,7 +9939,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ u16 token,
+ int *en)
+{
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+ struct dpni_rsp_get_unicast_promisc *rsp_params;
+ int err;
+
@@ -8087,7 +9974,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ u16 token,
+ const u8 mac_addr[6])
+{
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+ struct dpni_cmd_set_primary_mac_addr *cmd_params;
+ int i;
+
@@ -8117,7 +10004,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ u16 token,
+ u8 mac_addr[6])
+{
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+ struct dpni_rsp_get_primary_mac_addr *rsp_params;
+ int i, err;
+
@@ -8156,7 +10043,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ u16 token,
+ u8 mac_addr[6])
+{
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+ struct dpni_rsp_get_port_mac_addr *rsp_params;
+ int i, err;
+
@@ -8192,7 +10079,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ u16 token,
+ const u8 mac_addr[6])
+{
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+ struct dpni_cmd_add_mac_addr *cmd_params;
+ int i;
+
@@ -8222,7 +10109,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ u16 token,
+ const u8 mac_addr[6])
+{
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+ struct dpni_cmd_remove_mac_addr *cmd_params;
+ int i;
+
@@ -8256,7 +10143,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ int unicast,
+ int multicast)
+{
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+ struct dpni_cmd_clear_mac_filters *cmd_params;
+
+ /* prepare command */
@@ -8272,6 +10159,55 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+}
+
+/**
++ * dpni_set_tx_priorities() - Set transmission TC priority configuration
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @cfg: Transmission selection configuration
++ *
++ * warning: Allowed only when DPNI is disabled
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_set_tx_priorities(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ const struct dpni_tx_priorities_cfg *cfg)
++{
++ struct dpni_cmd_set_tx_priorities *cmd_params;
++ struct fsl_mc_command cmd = { 0 };
++ int i;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_TX_PRIORITIES,
++ cmd_flags,
++ token);
++ cmd_params = (struct dpni_cmd_set_tx_priorities *)cmd.params;
++ dpni_set_field(cmd_params->flags,
++ SEPARATE_GRP,
++ cfg->separate_groups);
++ cmd_params->prio_group_A = cfg->prio_group_A;
++ cmd_params->prio_group_B = cfg->prio_group_B;
++
++ for (i = 0; i + 1 < DPNI_MAX_TC; i += 2) {
++ dpni_set_field(cmd_params->modes[i / 2],
++ MODE_1,
++ cfg->tc_sched[i].mode);
++ dpni_set_field(cmd_params->modes[i / 2],
++ MODE_2,
++ cfg->tc_sched[i + 1].mode);
++ }
++
++ for (i = 0; i < DPNI_MAX_TC; i++) {
++ cmd_params->delta_bandwidth[i] =
++ cpu_to_le16(cfg->tc_sched[i].delta_bandwidth);
++ }
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++/**
+ * dpni_set_rx_tc_dist() - Set Rx traffic class distribution configuration
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
@@ -8290,7 +10226,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ u8 tc_id,
+ const struct dpni_rx_tc_dist_cfg *cfg)
+{
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+ struct dpni_cmd_set_rx_tc_dist *cmd_params;
+
+ /* prepare command */
@@ -8330,7 +10266,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ const struct dpni_qos_tbl_cfg *cfg)
+{
+ struct dpni_cmd_set_qos_table *cmd_params;
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_QOS_TBL,
@@ -8368,7 +10304,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ u16 index)
+{
+ struct dpni_cmd_add_qos_entry *cmd_params;
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_ADD_QOS_ENT,
@@ -8386,6 +10322,36 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+}
+
+/**
++ * dpni_remove_qos_entry() - Remove QoS mapping entry
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @cfg: QoS rule to remove
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_remove_qos_entry(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ const struct dpni_rule_cfg *cfg)
++{
++ struct dpni_cmd_remove_qos_entry *cmd_params;
++ struct fsl_mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_REMOVE_QOS_ENT,
++ cmd_flags,
++ token);
++ cmd_params = (struct dpni_cmd_remove_qos_entry *)cmd.params;
++ cmd_params->key_size = cfg->key_size;
++ cmd_params->key_iova = cpu_to_le64(cfg->key_iova);
++ cmd_params->mask_iova = cpu_to_le64(cfg->mask_iova);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++/**
+ * dpni_add_fs_entry() - Add Flow Steering entry for a specific traffic class
+ * (to select a flow ID)
+ * @mc_io: Pointer to MC portal's I/O object
@@ -8409,7 +10375,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ const struct dpni_fs_action_cfg *action)
+{
+ struct dpni_cmd_add_fs_entry *cmd_params;
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_ADD_FS_ENT,
@@ -8431,7 +10397,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+
+/**
+ * dpni_remove_fs_entry() - Remove Flow Steering entry from a specific
-+ * traffic class
++ * traffic class
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
@@ -8447,7 +10413,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ const struct dpni_rule_cfg *cfg)
+{
+ struct dpni_cmd_remove_fs_entry *cmd_params;
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_REMOVE_FS_ENT,
@@ -8465,17 +10431,18 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+
+/**
+ * dpni_set_congestion_notification() - Set traffic class congestion
-+ * notification configuration
++ * notification configuration
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @qtype: Type of queue - Rx, Tx and Tx confirm types are supported
+ * @tc_id: Traffic class selection (0-7)
-+ * @cfg: congestion notification configuration
++ * @cfg: Congestion notification configuration
+ *
+ * Return: '0' on Success; error code otherwise.
+ */
-+int dpni_set_congestion_notification(struct fsl_mc_io *mc_io,
++int dpni_set_congestion_notification(
++ struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ enum dpni_queue_type qtype,
@@ -8483,7 +10450,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ const struct dpni_congestion_notification_cfg *cfg)
+{
+ struct dpni_cmd_set_congestion_notification *cmd_params;
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(
@@ -8515,7 +10482,10 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @qtype: Type of queue - Rx, Tx and Tx confirm types are supported
-+ * @tc_id: Traffic class selection (0-7)
++ * @tc_id: bits 7-4 contain ceetm channel index (valid only for TX);
++ * bits 3-0 contain traffic class.
++ * Use macro DPNI_BUILD_CH_TC() to build correct value for
++ * tc_id parameter.
+ * @cfg: congestion notification configuration
+ *
+ * Return: '0' on Success; error code otherwise.
@@ -8530,7 +10500,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+{
+ struct dpni_rsp_get_congestion_notification *rsp_params;
+ struct dpni_cmd_get_congestion_notification *cmd_params;
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
@@ -8587,7 +10557,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ u8 options,
+ const struct dpni_queue *queue)
+{
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+ struct dpni_cmd_set_queue *cmd_params;
+
+ /* prepare command */
@@ -8635,7 +10605,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ struct dpni_queue *queue,
+ struct dpni_queue_id *qid)
+{
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+ struct dpni_cmd_get_queue *cmd_params;
+ struct dpni_rsp_get_queue *rsp_params;
+ int err;
@@ -8659,7 +10629,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ queue->destination.id = le32_to_cpu(rsp_params->dest_id);
+ queue->destination.priority = rsp_params->dest_prio;
+ queue->destination.type = dpni_get_field(rsp_params->flags,
-+ DEST_TYPE);
++ DEST_TYPE);
+ queue->flc.stash_control = dpni_get_field(rsp_params->flags,
+ STASH_CTRL);
+ queue->destination.hold_active = dpni_get_field(rsp_params->flags,
@@ -8679,6 +10649,8 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ * @token: Token of DPNI object
+ * @page: Selects the statistics page to retrieve, see
+ * DPNI_GET_STATISTICS output. Pages are numbered 0 to 2.
++ * @param: Custom parameter for some pages used to select a certain
++ * statistic source, for example the TC.
+ * @stat: Structure containing the statistics
+ *
+ * Return: '0' on Success; Error code otherwise.
@@ -8687,9 +10659,10 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ u32 cmd_flags,
+ u16 token,
+ u8 page,
++ u8 param,
+ union dpni_statistics *stat)
+{
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+ struct dpni_cmd_get_statistics *cmd_params;
+ struct dpni_rsp_get_statistics *rsp_params;
+ int i, err;
@@ -8700,6 +10673,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ token);
+ cmd_params = (struct dpni_cmd_get_statistics *)cmd.params;
+ cmd_params->page_number = page;
++ cmd_params->param = param;
+
+ /* send command to mc */
+ err = mc_send_command(mc_io, &cmd);
@@ -8726,7 +10700,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ u32 cmd_flags,
+ u16 token)
+{
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_RESET_STATISTICS,
@@ -8745,7 +10719,10 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ * @cg_point: Congestion point
+ * @q_type: Queue type on which the taildrop is configured.
+ * Only Rx queues are supported for now
-+ * @tc: Traffic class to apply this taildrop to
++ * @tc: bits 7-4 contain ceetm channel index (valid only for TX);
++ * bits 3-0 contain traffic class.
++ * Use macro DPNI_BUILD_CH_TC() to build correct value for
++ * tc parameter.
+ * @q_index: Index of the queue if the DPNI supports multiple queues for
+ * traffic distribution. Ignored if CONGESTION_POINT is not 0.
+ * @taildrop: Taildrop structure
@@ -8761,7 +10738,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ u8 index,
+ struct dpni_taildrop *taildrop)
+{
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+ struct dpni_cmd_set_taildrop *cmd_params;
+
+ /* prepare command */
@@ -8789,7 +10766,10 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ * @cg_point: Congestion point
+ * @q_type: Queue type on which the taildrop is configured.
+ * Only Rx queues are supported for now
-+ * @tc: Traffic class to apply this taildrop to
++ * @tc: bits 7-4 contain ceetm channel index (valid only for TX);
++ * bits 3-0 contain traffic class.
++ * Use macro DPNI_BUILD_CH_TC() to build correct value for
++ * tc parameter.
+ * @q_index: Index of the queue if the DPNI supports multiple queues for
+ * traffic distribution. Ignored if CONGESTION_POINT is not 0.
+ * @taildrop: Taildrop structure
@@ -8805,7 +10785,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ u8 index,
+ struct dpni_taildrop *taildrop)
+{
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+ struct dpni_cmd_get_taildrop *cmd_params;
+ struct dpni_rsp_get_taildrop *rsp_params;
+ int err;
@@ -8833,9 +10813,115 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+
+ return 0;
+}
++
++/**
++ * dpni_get_api_version() - Get Data Path Network Interface API version
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @major_ver: Major version of data path network interface API
++ * @minor_ver: Minor version of data path network interface API
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_get_api_version(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 *major_ver,
++ u16 *minor_ver)
++{
++ struct dpni_rsp_get_api_version *rsp_params;
++ struct fsl_mc_command cmd = { 0 };
++ int err;
++
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_API_VERSION,
++ cmd_flags, 0);
++
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ rsp_params = (struct dpni_rsp_get_api_version *)cmd.params;
++ *major_ver = le16_to_cpu(rsp_params->major);
++ *minor_ver = le16_to_cpu(rsp_params->minor);
++
++ return 0;
++}
++
++/**
++ * dpni_set_rx_fs_dist() - Set Rx traffic class FS distribution
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @cfg: Distribution configuration
++ * If the FS is already enabled with a previous call the classification
++ * key will be changed but all the table rules are kept. If the
++ * existing rules do not match the key the results will not be
++ * predictable. It is the user responsibility to keep key integrity.
++ * If cfg.enable is set to 1 the command will create a flow steering table
++ * and will classify packets according to this table. The packets that
++ * miss all the table rules will be classified according to settings
++ * made in dpni_set_rx_hash_dist()
++ * If cfg.enable is set to 0 the command will clear flow steering table.
++ * The packets will be classified according to settings made in
++ * dpni_set_rx_hash_dist()
++ */
++int dpni_set_rx_fs_dist(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ const struct dpni_rx_dist_cfg *cfg)
++{
++ struct dpni_cmd_set_rx_fs_dist *cmd_params;
++ struct fsl_mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_RX_FS_DIST,
++ cmd_flags,
++ token);
++ cmd_params = (struct dpni_cmd_set_rx_fs_dist *)cmd.params;
++ cmd_params->dist_size = le16_to_cpu(cfg->dist_size);
++ dpni_set_field(cmd_params->enable, RX_FS_DIST_ENABLE, cfg->enable);
++ cmd_params->tc = cfg->tc;
++ cmd_params->miss_flow_id = le16_to_cpu(cfg->fs_miss_flow_id);
++ cmd_params->key_cfg_iova = le64_to_cpu(cfg->key_cfg_iova);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++/**
++ * dpni_set_rx_hash_dist() - Set Rx traffic class HASH distribution
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @cfg: Distribution configuration
++ * If cfg.enable is set to 1 the packets will be classified using a hash
++ * function based on the key received in cfg.key_cfg_iova parameter.
++ * If cfg.enable is set to 0 the packets will be sent to the queue configured
++ * in dpni_set_rx_dist_default_queue() call
++ */
++int dpni_set_rx_hash_dist(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ const struct dpni_rx_dist_cfg *cfg)
++{
++ struct dpni_cmd_set_rx_hash_dist *cmd_params;
++ struct fsl_mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_RX_HASH_DIST,
++ cmd_flags,
++ token);
++ cmd_params = (struct dpni_cmd_set_rx_hash_dist *)cmd.params;
++ cmd_params->dist_size = le16_to_cpu(cfg->dist_size);
++ dpni_set_field(cmd_params->enable, RX_FS_DIST_ENABLE, cfg->enable);
++ cmd_params->tc = cfg->tc;
++ cmd_params->key_cfg_iova = le64_to_cpu(cfg->key_cfg_iova);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
--- /dev/null
+++ b/drivers/staging/fsl-dpaa2/ethernet/dpni.h
-@@ -0,0 +1,1053 @@
+@@ -0,0 +1,1172 @@
+/* Copyright 2013-2016 Freescale Semiconductor Inc.
+ * Copyright 2016 NXP
+ *
@@ -8893,11 +10979,11 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+/**
+ * Maximum number of senders
+ */
-+#define DPNI_MAX_SENDERS 8
++#define DPNI_MAX_SENDERS 16
+/**
+ * Maximum distribution size
+ */
-+#define DPNI_MAX_DIST_SIZE 8
++#define DPNI_MAX_DIST_SIZE 16
+
+/**
+ * All traffic classes considered; see dpni_set_queue()
@@ -9324,6 +11410,24 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ u64 egress_confirmed_frames;
+ } page_2;
+ /**
++ * struct page_3 - Page_3 statistics structure with values for the
++ * selected TC
++ * @ceetm_dequeue_bytes: Cumulative count of the number of bytes
++ * dequeued
++ * @ceetm_dequeue_frames: Cumulative count of the number of frames
++ * dequeued
++ * @ceetm_reject_bytes: Cumulative count of the number of bytes in all
++ * frames whose enqueue was rejected
++ * @ceetm_reject_frames: Cumulative count of all frame enqueues
++ * rejected
++ */
++ struct {
++ u64 ceetm_dequeue_bytes;
++ u64 ceetm_dequeue_frames;
++ u64 ceetm_reject_bytes;
++ u64 ceetm_reject_frames;
++ } page_3;
++ /**
+ * struct raw - raw statistics structure
+ */
+ struct {
@@ -9335,6 +11439,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ u32 cmd_flags,
+ u16 token,
+ u8 page,
++ u8 param,
+ union dpni_statistics *stat);
+
+int dpni_reset_statistics(struct fsl_mc_io *mc_io,
@@ -9400,14 +11505,16 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ * @max_burst_size: burst size in bytes (up to 64KB)
+ */
+struct dpni_tx_shaping_cfg {
-+ u32 rate_limit;
-+ u16 max_burst_size;
++ u32 rate_limit;
++ u16 max_burst_size;
+};
+
-+int dpni_set_tx_shaping(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ const struct dpni_tx_shaping_cfg *tx_shaper);
++int dpni_set_tx_shaping(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ const struct dpni_tx_shaping_cfg *tx_cr_shaper,
++ const struct dpni_tx_shaping_cfg *tx_er_shaper,
++ int coupled);
+
+int dpni_set_max_frame_length(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
@@ -9530,6 +11637,50 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ const struct dpni_qos_tbl_cfg *cfg);
+
+/**
++ * enum dpni_tx_schedule_mode - DPNI Tx scheduling mode
++ * @DPNI_TX_SCHED_STRICT_PRIORITY: strict priority
++ * @DPNI_TX_SCHED_WEIGHTED_A: weighted based scheduling in group A
++ * @DPNI_TX_SCHED_WEIGHTED_B: weighted based scheduling in group B
++ */
++enum dpni_tx_schedule_mode {
++ DPNI_TX_SCHED_STRICT_PRIORITY = 0,
++ DPNI_TX_SCHED_WEIGHTED_A,
++ DPNI_TX_SCHED_WEIGHTED_B,
++};
++
++/**
++ * struct dpni_tx_schedule_cfg - Structure representing Tx scheduling conf
++ * @mode: Scheduling mode
++ * @delta_bandwidth: Bandwidth represented in weights from 100 to 10000;
++ * not applicable for 'strict-priority' mode;
++ */
++struct dpni_tx_schedule_cfg {
++ enum dpni_tx_schedule_mode mode;
++ u16 delta_bandwidth;
++};
++
++/**
++ * struct dpni_tx_priorities_cfg - Structure representing transmission
++ * priorities for DPNI TCs
++ * @tc_sched: An array of traffic-classes
++ * @prio_group_A: Priority of group A
++ * @prio_group_B: Priority of group B
++ * @separate_groups: Treat A and B groups as separate
++ * @ceetm_ch_idx: ceetm channel index to apply the changes
++ */
++struct dpni_tx_priorities_cfg {
++ struct dpni_tx_schedule_cfg tc_sched[DPNI_MAX_TC];
++ u8 prio_group_A;
++ u8 prio_group_B;
++ u8 separate_groups;
++};
++
++int dpni_set_tx_priorities(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ const struct dpni_tx_priorities_cfg *cfg);
++
++/**
+ * struct dpni_rx_tc_dist_cfg - Rx traffic class distribution configuration
+ * @dist_size: Set the distribution size;
+ * supported values: 1,2,3,4,6,7,8,12,14,16,24,28,32,48,56,64,96,
@@ -9676,11 +11827,11 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+
+/**
+ * struct dpni_dest_cfg - Structure representing DPNI destination parameters
-+ * @dest_type: Destination type
-+ * @dest_id: Either DPIO ID or DPCON ID, depending on the destination type
-+ * @priority: Priority selection within the DPIO or DPCON channel; valid values
-+ * are 0-1 or 0-7, depending on the number of priorities in that
-+ * channel; not relevant for 'DPNI_DEST_NONE' option
++ * @dest_type: Destination type
++ * @dest_id: Either DPIO ID or DPCON ID, depending on the destination type
++ * @priority: Priority selection within the DPIO or DPCON channel; valid
++ * values are 0-1 or 0-7, depending on the number of priorities
++ * in that channel; not relevant for 'DPNI_DEST_NONE' option
+ */
+struct dpni_dest_cfg {
+ enum dpni_dest dest_type;
@@ -9694,34 +11845,34 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ * CSCN message is written to message_iova once entering a
+ * congestion state (see 'threshold_entry')
+ */
-+#define DPNI_CONG_OPT_WRITE_MEM_ON_ENTER 0x00000001
++#define DPNI_CONG_OPT_WRITE_MEM_ON_ENTER 0x00000001
+/**
+ * CSCN message is written to message_iova once exiting a
+ * congestion state (see 'threshold_exit')
+ */
-+#define DPNI_CONG_OPT_WRITE_MEM_ON_EXIT 0x00000002
++#define DPNI_CONG_OPT_WRITE_MEM_ON_EXIT 0x00000002
+/**
+ * CSCN write will attempt to allocate into a cache (coherent write);
+ * valid only if 'DPNI_CONG_OPT_WRITE_MEM_<X>' is selected
+ */
-+#define DPNI_CONG_OPT_COHERENT_WRITE 0x00000004
++#define DPNI_CONG_OPT_COHERENT_WRITE 0x00000004
+/**
+ * if 'dest_cfg.dest_type != DPNI_DEST_NONE' CSCN message is sent to
+ * DPIO/DPCON's WQ channel once entering a congestion state
+ * (see 'threshold_entry')
+ */
-+#define DPNI_CONG_OPT_NOTIFY_DEST_ON_ENTER 0x00000008
++#define DPNI_CONG_OPT_NOTIFY_DEST_ON_ENTER 0x00000008
+/**
+ * if 'dest_cfg.dest_type != DPNI_DEST_NONE' CSCN message is sent to
+ * DPIO/DPCON's WQ channel once exiting a congestion state
+ * (see 'threshold_exit')
+ */
-+#define DPNI_CONG_OPT_NOTIFY_DEST_ON_EXIT 0x00000010
++#define DPNI_CONG_OPT_NOTIFY_DEST_ON_EXIT 0x00000010
+/**
+ * if 'dest_cfg.dest_type != DPNI_DEST_NONE' when the CSCN is written to the
+ * sw-portal's DQRR, the DQRI interrupt is asserted immediately (if enabled)
+ */
-+#define DPNI_CONG_OPT_INTR_COALESCING_DISABLED 0x00000020
++#define DPNI_CONG_OPT_INTR_COALESCING_DISABLED 0x00000020
+/**
+ * This congestion will trigger flow control or priority flow control.
+ * This will have effect only if flow control is enabled with
@@ -9731,15 +11882,15 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+
+/**
+ * struct dpni_congestion_notification_cfg - congestion notification
-+ * configuration
-+ * @units: units type
-+ * @threshold_entry: above this threshold we enter a congestion state.
-+ * set it to '0' to disable it
-+ * @threshold_exit: below this threshold we exit the congestion state.
++ * configuration
++ * @units: Units type
++ * @threshold_entry: Above this threshold we enter a congestion state.
++ * set it to '0' to disable it
++ * @threshold_exit: Below this threshold we exit the congestion state.
+ * @message_ctx: The context that will be part of the CSCN message
+ * @message_iova: I/O virtual address (must be in DMA-able memory),
-+ * must be 16B aligned; valid only if 'DPNI_CONG_OPT_WRITE_MEM_<X>' is
-+ * contained in 'options'
++ * must be 16B aligned; valid only if 'DPNI_CONG_OPT_WRITE_MEM_<X>'
++ * is contained in 'options'
+ * @dest_cfg: CSCN can be send to either DPIO or DPCON WQ channel
+ * @notification_mode: Mask of available options; use 'DPNI_CONG_OPT_<X>' values
+ */
@@ -9754,7 +11905,14 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ u16 notification_mode;
+};
+
-+int dpni_set_congestion_notification(struct fsl_mc_io *mc_io,
++/** Compose TC parameter for function dpni_set_congestion_notification()
++ * and dpni_get_congestion_notification().
++ */
++#define DPNI_BUILD_CH_TC(ceetm_ch_idx, tc) \
++ ((((ceetm_ch_idx) & 0x0F) << 4) | ((tc) & 0x0F))
++
++int dpni_set_congestion_notification(
++ struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ enum dpni_queue_type qtype,
@@ -9815,6 +11973,11 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ u8 key_size;
+};
+
++int dpni_get_api_version(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 *major_ver,
++ u16 *minor_ver);
++
+int dpni_add_qos_entry(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
@@ -9832,13 +11995,13 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ u16 token);
+
+/**
-+ * Discard matching traffic. If set, this takes precedence over any other
++ * Discard matching traffic. If set, this takes precedence over any other
+ * configuration and matching traffic is always discarded.
+ */
+ #define DPNI_FS_OPT_DISCARD 0x1
+
+/**
-+ * Set FLC value. If set, flc member of truct dpni_fs_action_cfg is used to
++ * Set FLC value. If set, flc member of struct dpni_fs_action_cfg is used to
+ * override the FLC value set per queue.
+ * For more details check the Frame Descriptor section in the hardware
+ * documentation.
@@ -9847,26 +12010,27 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+
+/*
+ * Indicates whether the 6 lowest significant bits of FLC are used for stash
-+ * control. If set, the 6 least significant bits in value are interpreted as
++ * control. If set, the 6 least significant bits in value are interpreted as
+ * follows:
+ * - bits 0-1: indicates the number of 64 byte units of context that are
-+ * stashed. FLC value is interpreted as a memory address in this case,
++ * stashed. FLC value is interpreted as a memory address in this case,
+ * excluding the 6 LS bits.
+ * - bits 2-3: indicates the number of 64 byte units of frame annotation
-+ * to be stashed. Annotation is placed at FD[ADDR].
++ * to be stashed. Annotation is placed at FD[ADDR].
+ * - bits 4-5: indicates the number of 64 byte units of frame data to be
-+ * stashed. Frame data is placed at FD[ADDR] + FD[OFFSET].
++ * stashed. Frame data is placed at FD[ADDR] + FD[OFFSET].
+ * This flag is ignored if DPNI_FS_OPT_SET_FLC is not specified.
+ */
+#define DPNI_FS_OPT_SET_STASH_CONTROL 0x4
+
+/**
+ * struct dpni_fs_action_cfg - Action configuration for table look-up
-+ * @flc: FLC value for traffic matching this rule. Please check the Frame
-+ * Descriptor section in the hardware documentation for more information.
-+ * @flow_id: Identifies the Rx queue used for matching traffic. Supported
-+ * values are in range 0 to num_queue-1.
-+ * @options: Any combination of DPNI_FS_OPT_ values.
++ * @flc: FLC value for traffic matching this rule. Please check the
++ * Frame Descriptor section in the hardware documentation for
++ * more information.
++ * @flow_id: Identifies the Rx queue used for matching traffic. Supported
++ * values are in range 0 to num_queue-1.
++ * @options: Any combination of DPNI_FS_OPT_ values.
+ */
+struct dpni_fs_action_cfg {
+ u64 flc;
@@ -9888,6 +12052,47 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ u8 tc_id,
+ const struct dpni_rule_cfg *cfg);
+
++/**
++ * When used for queue_idx in function dpni_set_rx_dist_default_queue
++ * will signal to dpni to drop all unclassified frames
++ */
++#define DPNI_FS_MISS_DROP ((uint16_t)-1)
++
++/**
++ * struct dpni_rx_dist_cfg - distribution configuration
++ * @dist_size: distribution size; supported values: 1,2,3,4,6,7,8,
++ * 12,14,16,24,28,32,48,56,64,96,112,128,192,224,256,384,448,
++ * 512,768,896,1024
++ * @key_cfg_iova: I/O virtual address of 256 bytes DMA-able memory filled with
++ * the extractions to be used for the distribution key by calling
++ * dpkg_prepare_key_cfg() relevant only when enable!=0 otherwise
++ * it can be '0'
++ * @enable: enable/disable the distribution.
++ * @tc: TC id for which distribution is set
++ * @fs_miss_flow_id: when packet misses all rules from flow steering table and
++ * hash is disabled it will be put into this queue id; use
++ * DPNI_FS_MISS_DROP to drop frames. The value of this field is
++ * used only when flow steering distribution is enabled and hash
++ * distribution is disabled
++ */
++struct dpni_rx_dist_cfg {
++ u16 dist_size;
++ u64 key_cfg_iova;
++ u8 enable;
++ u8 tc;
++ u16 fs_miss_flow_id;
++};
++
++int dpni_set_rx_fs_dist(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ const struct dpni_rx_dist_cfg *cfg);
++
++int dpni_set_rx_hash_dist(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ const struct dpni_rx_dist_cfg *cfg);
++
+#endif /* __FSL_DPNI_H */
--- /dev/null
+++ b/drivers/staging/fsl-dpaa2/ethernet/net.h
@@ -10373,61 +12578,154 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+
+#endif /* __FSL_NET_H */
--- /dev/null
-+++ b/drivers/staging/fsl-dpaa2/ethsw/Kconfig
-@@ -0,0 +1,6 @@
-+config FSL_DPAA2_ETHSW
-+ tristate "DPAA2 Ethernet Switch"
-+ depends on FSL_MC_BUS && FSL_DPAA2
-+ default y
-+ ---help---
-+ Prototype driver for DPAA2 Ethernet Switch.
---- /dev/null
+++ b/drivers/staging/fsl-dpaa2/ethsw/Makefile
@@ -0,0 +1,10 @@
++# SPDX-License-Identifier: GPL-2.0
++#
++# Makefile for the Freescale DPAA2 Ethernet Switch
++#
++# Copyright 2014-2017 Freescale Semiconductor, Inc.
++# Copyright 2017-2018 NXP
+
+obj-$(CONFIG_FSL_DPAA2_ETHSW) += dpaa2-ethsw.o
+
-+dpaa2-ethsw-objs := switch.o dpsw.o
++dpaa2-ethsw-objs := ethsw.o ethsw-ethtool.o dpsw.o
+--- /dev/null
++++ b/drivers/staging/fsl-dpaa2/ethsw/README
+@@ -0,0 +1,106 @@
++DPAA2 Ethernet Switch driver
++============================
++
++This file provides documentation for the DPAA2 Ethernet Switch driver
+
-+all:
-+ make -C /lib/modules/$(shell uname -r)/build M=$(PWD) modules
+
-+clean:
-+ make -C /lib/modules/$(shell uname -r)/build M=$(PWD) clean
++Contents
++========
++ Supported Platforms
++ Architecture Overview
++ Creating an Ethernet Switch
++ Features
++
++
++ Supported Platforms
++===================
++This driver provides networking support for Freescale LS2085A, LS2088A
++DPAA2 SoCs.
++
++
++Architecture Overview
++=====================
++The Ethernet Switch in the DPAA2 architecture consists of several hardware
++resources that provide the functionality. These are allocated and
++configured via the Management Complex (MC) portals. MC abstracts most of
++these resources as DPAA2 objects and exposes ABIs through which they can
++be configured and controlled.
++
++For a more detailed description of the DPAA2 architecture and its object
++abstractions see:
++ drivers/staging/fsl-mc/README.txt
++
++The Ethernet Switch is built on top of a Datapath Switch (DPSW) object.
++
++Configuration interface:
++
++ ---------------------
++ | DPAA2 Switch driver |
++ ---------------------
++ .
++ .
++ ----------
++ | DPSW API |
++ ----------
++ . software
++ ================= . ==============
++ . hardware
++ ---------------------
++ | MC hardware portals |
++ ---------------------
++ .
++ .
++ ------
++ | DPSW |
++ ------
++
++Driver uses the switch device driver model and exposes each switch port as
++a network interface, which can be included in a bridge. Traffic switched
++between ports is offloaded into the hardware. Exposed network interfaces
++are not used for I/O, they are used just for configuration. This
++limitation is going to be addressed in the future.
++
++The DPSW can have ports connected to DPNIs or to PHYs via DPMACs.
++
++
++ [ethA] [ethB] [ethC] [ethD] [ethE] [ethF]
++ : : : : : :
++ : : : : : :
++[eth drv] [eth drv] [ ethsw drv ]
++ : : : : : : kernel
++========================================================================
++ : : : : : : hardware
++ [DPNI] [DPNI] [============= DPSW =================]
++ | | | | | |
++ | ---------- | [DPMAC] [DPMAC]
++ ------------------------------- | |
++ | |
++ [PHY] [PHY]
++
++For a more detailed description of the Ethernet switch device driver model
++see:
++ Documentation/networking/switchdev.txt
++
++Creating an Ethernet Switch
++===========================
++A device is created for the switch objects probed on the MC bus. Each DPSW
++has a number of properties which determine the configuration options and
++associated hardware resources.
++
++A DPSW object (and the other DPAA2 objects needed for a DPAA2 switch) can
++be added to a container on the MC bus in one of two ways: statically,
++through a Datapath Layout Binary file (DPL) that is parsed by MC at boot
++time; or created dynamically at runtime, via the DPAA2 objects APIs.
++
++Features
++========
++Driver configures DPSW to perform hardware switching offload of
++unicast/multicast/broadcast (VLAN tagged or untagged) traffic between its
++ports.
++
++It allows configuration of hardware learning, flooding, multicast groups,
++port VLAN configuration and STP state.
++
++Static entries can be added/removed from the FDB.
++
++Hardware statistics for each port are provided through ethtool -S option.
+--- /dev/null
++++ b/drivers/staging/fsl-dpaa2/ethsw/TODO
+@@ -0,0 +1,14 @@
++* Add I/O capabilities on switch port netdevices. This will allow control
++traffic to reach the CPU.
++* Add ACL to redirect control traffic to CPU.
++* Add support for displaying learned FDB entries
++* MC firmware uprev; the DPAA2 objects used by the Ethernet Switch driver
++need to be kept in sync with binary interface changes in MC
++* refine README file
++* cleanup
++
++NOTE: At least first three of the above are required before getting the
++DPAA2 Ethernet Switch driver out of staging. Another requirement is that
++the fsl-mc bus driver is moved to drivers/bus and dpio driver is moved to
++drivers/soc (this is required for I/O).
++
--- /dev/null
+++ b/drivers/staging/fsl-dpaa2/ethsw/dpsw-cmd.h
-@@ -0,0 +1,851 @@
-+/* Copyright 2013-2016 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of the above-listed copyright holders nor the
-+ * names of any contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
+@@ -0,0 +1,359 @@
++// SPDX-License-Identifier: GPL-2.0
++/*
++ * Copyright 2013-2016 Freescale Semiconductor, Inc.
++ * Copyright 2017-2018 NXP
+ *
-+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
-+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-+ * POSSIBILITY OF SUCH DAMAGE.
+ */
++
+#ifndef __FSL_DPSW_CMD_H
+#define __FSL_DPSW_CMD_H
+
@@ -10450,47 +12748,28 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+#define DPSW_CMDID_DISABLE DPSW_CMD_ID(0x003)
+#define DPSW_CMDID_GET_ATTR DPSW_CMD_ID(0x004)
+#define DPSW_CMDID_RESET DPSW_CMD_ID(0x005)
-+#define DPSW_CMDID_IS_ENABLED DPSW_CMD_ID(0x006)
+
-+#define DPSW_CMDID_SET_IRQ DPSW_CMD_ID(0x010)
-+#define DPSW_CMDID_GET_IRQ DPSW_CMD_ID(0x011)
+#define DPSW_CMDID_SET_IRQ_ENABLE DPSW_CMD_ID(0x012)
-+#define DPSW_CMDID_GET_IRQ_ENABLE DPSW_CMD_ID(0x013)
++
+#define DPSW_CMDID_SET_IRQ_MASK DPSW_CMD_ID(0x014)
-+#define DPSW_CMDID_GET_IRQ_MASK DPSW_CMD_ID(0x015)
++
+#define DPSW_CMDID_GET_IRQ_STATUS DPSW_CMD_ID(0x016)
+#define DPSW_CMDID_CLEAR_IRQ_STATUS DPSW_CMD_ID(0x017)
+
-+#define DPSW_CMDID_SET_REFLECTION_IF DPSW_CMD_ID(0x022)
-+
-+#define DPSW_CMDID_ADD_CUSTOM_TPID DPSW_CMD_ID(0x024)
-+
-+#define DPSW_CMDID_REMOVE_CUSTOM_TPID DPSW_CMD_ID(0x026)
-+
+#define DPSW_CMDID_IF_SET_TCI DPSW_CMD_ID(0x030)
+#define DPSW_CMDID_IF_SET_STP DPSW_CMD_ID(0x031)
-+#define DPSW_CMDID_IF_SET_ACCEPTED_FRAMES DPSW_CMD_ID(0x032)
-+#define DPSW_CMDID_SET_IF_ACCEPT_ALL_VLAN DPSW_CMD_ID(0x033)
++
+#define DPSW_CMDID_IF_GET_COUNTER DPSW_CMD_ID(0x034)
-+#define DPSW_CMDID_IF_SET_COUNTER DPSW_CMD_ID(0x035)
-+#define DPSW_CMDID_IF_SET_TX_SELECTION DPSW_CMD_ID(0x036)
-+#define DPSW_CMDID_IF_ADD_REFLECTION DPSW_CMD_ID(0x037)
-+#define DPSW_CMDID_IF_REMOVE_REFLECTION DPSW_CMD_ID(0x038)
-+#define DPSW_CMDID_IF_SET_FLOODING_METERING DPSW_CMD_ID(0x039)
-+#define DPSW_CMDID_IF_SET_METERING DPSW_CMD_ID(0x03A)
-+#define DPSW_CMDID_IF_SET_EARLY_DROP DPSW_CMD_ID(0x03B)
+
+#define DPSW_CMDID_IF_ENABLE DPSW_CMD_ID(0x03D)
+#define DPSW_CMDID_IF_DISABLE DPSW_CMD_ID(0x03E)
+
-+#define DPSW_CMDID_IF_GET_ATTR DPSW_CMD_ID(0x042)
-+
+#define DPSW_CMDID_IF_SET_MAX_FRAME_LENGTH DPSW_CMD_ID(0x044)
-+#define DPSW_CMDID_IF_GET_MAX_FRAME_LENGTH DPSW_CMD_ID(0x045)
++
+#define DPSW_CMDID_IF_GET_LINK_STATE DPSW_CMD_ID(0x046)
+#define DPSW_CMDID_IF_SET_FLOODING DPSW_CMD_ID(0x047)
+#define DPSW_CMDID_IF_SET_BROADCAST DPSW_CMD_ID(0x048)
-+#define DPSW_CMDID_IF_SET_MULTICAST DPSW_CMD_ID(0x049)
++
+#define DPSW_CMDID_IF_GET_TCI DPSW_CMD_ID(0x04A)
+
+#define DPSW_CMDID_IF_SET_LINK_CFG DPSW_CMD_ID(0x04C)
@@ -10498,39 +12777,17 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+#define DPSW_CMDID_VLAN_ADD DPSW_CMD_ID(0x060)
+#define DPSW_CMDID_VLAN_ADD_IF DPSW_CMD_ID(0x061)
+#define DPSW_CMDID_VLAN_ADD_IF_UNTAGGED DPSW_CMD_ID(0x062)
-+#define DPSW_CMDID_VLAN_ADD_IF_FLOODING DPSW_CMD_ID(0x063)
++
+#define DPSW_CMDID_VLAN_REMOVE_IF DPSW_CMD_ID(0x064)
+#define DPSW_CMDID_VLAN_REMOVE_IF_UNTAGGED DPSW_CMD_ID(0x065)
+#define DPSW_CMDID_VLAN_REMOVE_IF_FLOODING DPSW_CMD_ID(0x066)
+#define DPSW_CMDID_VLAN_REMOVE DPSW_CMD_ID(0x067)
-+#define DPSW_CMDID_VLAN_GET_IF DPSW_CMD_ID(0x068)
-+#define DPSW_CMDID_VLAN_GET_IF_FLOODING DPSW_CMD_ID(0x069)
-+#define DPSW_CMDID_VLAN_GET_IF_UNTAGGED DPSW_CMD_ID(0x06A)
-+#define DPSW_CMDID_VLAN_GET_ATTRIBUTES DPSW_CMD_ID(0x06B)
-+
-+#define DPSW_CMDID_FDB_GET_MULTICAST DPSW_CMD_ID(0x080)
-+#define DPSW_CMDID_FDB_GET_UNICAST DPSW_CMD_ID(0x081)
-+#define DPSW_CMDID_FDB_ADD DPSW_CMD_ID(0x082)
-+#define DPSW_CMDID_FDB_REMOVE DPSW_CMD_ID(0x083)
++
+#define DPSW_CMDID_FDB_ADD_UNICAST DPSW_CMD_ID(0x084)
+#define DPSW_CMDID_FDB_REMOVE_UNICAST DPSW_CMD_ID(0x085)
+#define DPSW_CMDID_FDB_ADD_MULTICAST DPSW_CMD_ID(0x086)
+#define DPSW_CMDID_FDB_REMOVE_MULTICAST DPSW_CMD_ID(0x087)
+#define DPSW_CMDID_FDB_SET_LEARNING_MODE DPSW_CMD_ID(0x088)
-+#define DPSW_CMDID_FDB_GET_ATTR DPSW_CMD_ID(0x089)
-+
-+#define DPSW_CMDID_ACL_ADD DPSW_CMD_ID(0x090)
-+#define DPSW_CMDID_ACL_REMOVE DPSW_CMD_ID(0x091)
-+#define DPSW_CMDID_ACL_ADD_ENTRY DPSW_CMD_ID(0x092)
-+#define DPSW_CMDID_ACL_REMOVE_ENTRY DPSW_CMD_ID(0x093)
-+#define DPSW_CMDID_ACL_ADD_IF DPSW_CMD_ID(0x094)
-+#define DPSW_CMDID_ACL_REMOVE_IF DPSW_CMD_ID(0x095)
-+#define DPSW_CMDID_ACL_GET_ATTR DPSW_CMD_ID(0x096)
-+
-+#define DPSW_CMDID_CTRL_IF_GET_ATTR DPSW_CMD_ID(0x0A0)
-+#define DPSW_CMDID_CTRL_IF_SET_POOLS DPSW_CMD_ID(0x0A1)
-+#define DPSW_CMDID_CTRL_IF_ENABLE DPSW_CMD_ID(0x0A2)
-+#define DPSW_CMDID_CTRL_IF_DISABLE DPSW_CMD_ID(0x0A3)
+
+/* Macros for accessing command fields smaller than 1byte */
+#define DPSW_MASK(field) \
@@ -10543,12 +12800,6 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+#define dpsw_get_bit(var, bit) \
+ (((var) >> (bit)) & GENMASK(0, 0))
+
-+static inline u64 dpsw_set_bit(u64 var, unsigned int bit, u8 val)
-+{
-+ var |= (u64)val << bit & GENMASK(bit, bit);
-+ return var;
-+}
-+
+struct dpsw_cmd_open {
+ __le32 dpsw_id;
+};
@@ -10585,33 +12836,6 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ u8 enabled;
+};
+
-+struct dpsw_cmd_set_irq {
-+ /* cmd word 0 */
-+ u8 irq_index;
-+ u8 pad[3];
-+ __le32 irq_val;
-+ /* cmd word 1 */
-+ __le64 irq_addr;
-+ /* cmd word 2 */
-+ __le32 irq_num;
-+};
-+
-+struct dpsw_cmd_get_irq {
-+ __le32 pad;
-+ u8 irq_index;
-+};
-+
-+struct dpsw_rsp_get_irq {
-+ /* cmd word 0 */
-+ __le32 irq_val;
-+ __le32 pad;
-+ /* cmd word 1 */
-+ __le64 irq_addr;
-+ /* cmd word 2 */
-+ __le32 irq_num;
-+ __le32 irq_type;
-+};
-+
+struct dpsw_cmd_set_irq_enable {
+ u8 enable_state;
+ u8 pad[3];
@@ -10673,17 +12897,13 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ __le16 mem_size;
+ __le16 max_fdb_mc_groups;
+ u8 max_meters_per_if;
-+ /* from LSB only the ffirst 4 bits */
++ /* from LSB only the first 4 bits */
+ u8 component_type;
+ __le16 pad;
+ /* cmd word 3 */
+ __le64 options;
+};
+
-+struct dpsw_cmd_set_reflection_if {
-+ __le16 if_id;
-+};
-+
+struct dpsw_cmd_if_set_flooding {
+ __le16 if_id;
+ /* from LSB: enable:1 */
@@ -10696,12 +12916,6 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ u8 enable;
+};
+
-+struct dpsw_cmd_if_set_multicast {
-+ __le16 if_id;
-+ /* from LSB: enable:1 */
-+ u8 enable;
-+};
-+
+#define DPSW_VLAN_ID_SHIFT 0
+#define DPSW_VLAN_ID_SIZE 12
+#define DPSW_DEI_SHIFT 12
@@ -10736,26 +12950,6 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ u8 state;
+};
+
-+#define DPSW_FRAME_TYPE_SHIFT 0
-+#define DPSW_FRAME_TYPE_SIZE 4
-+#define DPSW_UNACCEPTED_ACT_SHIFT 4
-+#define DPSW_UNACCEPTED_ACT_SIZE 4
-+
-+struct dpsw_cmd_if_set_accepted_frames {
-+ __le16 if_id;
-+ /* from LSB: type:4 unaccepted_act:4 */
-+ u8 unaccepted;
-+};
-+
-+#define DPSW_ACCEPT_ALL_SHIFT 0
-+#define DPSW_ACCEPT_ALL_SIZE 1
-+
-+struct dpsw_cmd_if_set_accept_all_vlan {
-+ __le16 if_id;
-+ /* only the least significant bit */
-+ u8 accept_all;
-+};
-+
+#define DPSW_COUNTER_TYPE_SHIFT 0
+#define DPSW_COUNTER_TYPE_SIZE 5
+
@@ -10770,153 +12964,15 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ __le64 counter;
+};
+
-+struct dpsw_cmd_if_set_counter {
-+ /* cmd word 0 */
-+ __le16 if_id;
-+ /* from LSB: type:5 */
-+ u8 type;
-+ /* cmd word 1 */
-+ __le64 counter;
-+};
-+
-+#define DPSW_PRIORITY_SELECTOR_SHIFT 0
-+#define DPSW_PRIORITY_SELECTOR_SIZE 3
-+#define DPSW_SCHED_MODE_SHIFT 0
-+#define DPSW_SCHED_MODE_SIZE 4
-+
-+struct dpsw_cmd_if_set_tx_selection {
-+ __le16 if_id;
-+ /* from LSB: priority_selector:3 */
-+ u8 priority_selector;
-+ u8 pad[5];
-+ u8 tc_id[8];
-+
-+ struct dpsw_tc_sched {
-+ __le16 delta_bandwidth;
-+ u8 mode;
-+ u8 pad;
-+ } tc_sched[8];
-+};
-+
-+#define DPSW_FILTER_SHIFT 0
-+#define DPSW_FILTER_SIZE 2
-+
-+struct dpsw_cmd_if_reflection {
-+ __le16 if_id;
-+ __le16 vlan_id;
-+ /* only 2 bits from the LSB */
-+ u8 filter;
-+};
-+
-+#define DPSW_MODE_SHIFT 0
-+#define DPSW_MODE_SIZE 4
-+#define DPSW_UNITS_SHIFT 4
-+#define DPSW_UNITS_SIZE 4
-+
-+struct dpsw_cmd_if_set_flooding_metering {
-+ /* cmd word 0 */
-+ __le16 if_id;
-+ u8 pad;
-+ /* from LSB: mode:4 units:4 */
-+ u8 mode_units;
-+ __le32 cir;
-+ /* cmd word 1 */
-+ __le32 eir;
-+ __le32 cbs;
-+ /* cmd word 2 */
-+ __le32 ebs;
-+};
-+
-+struct dpsw_cmd_if_set_metering {
-+ /* cmd word 0 */
-+ __le16 if_id;
-+ u8 tc_id;
-+ /* from LSB: mode:4 units:4 */
-+ u8 mode_units;
-+ __le32 cir;
-+ /* cmd word 1 */
-+ __le32 eir;
-+ __le32 cbs;
-+ /* cmd word 2 */
-+ __le32 ebs;
-+};
-+
-+#define DPSW_EARLY_DROP_MODE_SHIFT 0
-+#define DPSW_EARLY_DROP_MODE_SIZE 2
-+#define DPSW_EARLY_DROP_UNIT_SHIFT 2
-+#define DPSW_EARLY_DROP_UNIT_SIZE 2
-+
-+struct dpsw_prep_early_drop {
-+ /* from LSB: mode:2 units:2 */
-+ u8 conf;
-+ u8 pad0[3];
-+ __le32 tail_drop_threshold;
-+ u8 green_drop_probability;
-+ u8 pad1[7];
-+ __le64 green_max_threshold;
-+ __le64 green_min_threshold;
-+ __le64 pad2;
-+ u8 yellow_drop_probability;
-+ u8 pad3[7];
-+ __le64 yellow_max_threshold;
-+ __le64 yellow_min_threshold;
-+};
-+
-+struct dpsw_cmd_if_set_early_drop {
-+ /* cmd word 0 */
-+ u8 pad0;
-+ u8 tc_id;
-+ __le16 if_id;
-+ __le32 pad1;
-+ /* cmd word 1 */
-+ __le64 early_drop_iova;
-+};
-+
-+struct dpsw_cmd_custom_tpid {
-+ __le16 pad;
-+ __le16 tpid;
-+};
-+
+struct dpsw_cmd_if {
+ __le16 if_id;
+};
+
-+#define DPSW_ADMIT_UNTAGGED_SHIFT 0
-+#define DPSW_ADMIT_UNTAGGED_SIZE 4
-+#define DPSW_ENABLED_SHIFT 5
-+#define DPSW_ENABLED_SIZE 1
-+#define DPSW_ACCEPT_ALL_VLAN_SHIFT 6
-+#define DPSW_ACCEPT_ALL_VLAN_SIZE 1
-+
-+struct dpsw_rsp_if_get_attr {
-+ /* cmd word 0 */
-+ /* from LSB: admit_untagged:4 enabled:1 accept_all_vlan:1 */
-+ u8 conf;
-+ u8 pad1;
-+ u8 num_tcs;
-+ u8 pad2;
-+ __le16 qdid;
-+ /* cmd word 1 */
-+ __le32 options;
-+ __le32 pad3;
-+ /* cmd word 2 */
-+ __le32 rate;
-+};
-+
+struct dpsw_cmd_if_set_max_frame_length {
+ __le16 if_id;
+ __le16 frame_length;
+};
+
-+struct dpsw_cmd_if_get_max_frame_length {
-+ __le16 if_id;
-+};
-+
-+struct dpsw_rsp_if_get_max_frame_length {
-+ __le16 pad;
-+ __le16 frame_length;
-+};
-+
+struct dpsw_cmd_if_set_link_cfg {
+ /* cmd word 0 */
+ __le16 if_id;
@@ -10957,7 +13013,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ __le16 pad0;
+ __le16 vlan_id;
+ __le32 pad1;
-+ /* cmd word 1 */
++ /* cmd word 1-4 */
+ __le64 if_id[4];
+};
+
@@ -10966,59 +13022,6 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ __le16 vlan_id;
+};
+
-+struct dpsw_cmd_vlan_get_attr {
-+ __le16 vlan_id;
-+};
-+
-+struct dpsw_rsp_vlan_get_attr {
-+ /* cmd word 0 */
-+ __le64 pad;
-+ /* cmd word 1 */
-+ __le16 fdb_id;
-+ __le16 num_ifs;
-+ __le16 num_untagged_ifs;
-+ __le16 num_flooding_ifs;
-+};
-+
-+struct dpsw_cmd_vlan_get_if {
-+ __le16 vlan_id;
-+};
-+
-+struct dpsw_rsp_vlan_get_if {
-+ /* cmd word 0 */
-+ __le16 pad0;
-+ __le16 num_ifs;
-+ u8 pad1[4];
-+ /* cmd word 1 */
-+ __le64 if_id[4];
-+};
-+
-+struct dpsw_cmd_vlan_get_if_untagged {
-+ __le16 vlan_id;
-+};
-+
-+struct dpsw_rsp_vlan_get_if_untagged {
-+ /* cmd word 0 */
-+ __le16 pad0;
-+ __le16 num_ifs;
-+ u8 pad1[4];
-+ /* cmd word 1 */
-+ __le64 if_id[4];
-+};
-+
-+struct dpsw_cmd_vlan_get_if_flooding {
-+ __le16 vlan_id;
-+};
-+
-+struct dpsw_rsp_vlan_get_if_flooding {
-+ /* cmd word 0 */
-+ __le16 pad0;
-+ __le16 num_ifs;
-+ u8 pad1[4];
-+ /* cmd word 1 */
-+ __le64 if_id[4];
-+};
-+
+struct dpsw_cmd_fdb_add {
+ __le32 pad;
+ __le16 fdb_aging_time;
@@ -11036,30 +13039,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+#define DPSW_ENTRY_TYPE_SHIFT 0
+#define DPSW_ENTRY_TYPE_SIZE 4
+
-+struct dpsw_cmd_fdb_add_unicast {
-+ /* cmd word 0 */
-+ __le16 fdb_id;
-+ u8 mac_addr[6];
-+ /* cmd word 1 */
-+ u8 if_egress;
-+ u8 pad;
-+ /* only the first 4 bits from LSB */
-+ u8 type;
-+};
-+
-+struct dpsw_cmd_fdb_get_unicast {
-+ __le16 fdb_id;
-+ u8 mac_addr[6];
-+};
-+
-+struct dpsw_rsp_fdb_get_unicast {
-+ __le64 pad;
-+ __le16 if_egress;
-+ /* only first 4 bits from LSB */
-+ u8 type;
-+};
-+
-+struct dpsw_cmd_fdb_remove_unicast {
++struct dpsw_cmd_fdb_unicast_op {
+ /* cmd word 0 */
+ __le16 fdb_id;
+ u8 mac_addr[6];
@@ -11069,7 +13049,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ u8 type;
+};
+
-+struct dpsw_cmd_fdb_add_multicast {
++struct dpsw_cmd_fdb_multicast_op {
+ /* cmd word 0 */
+ __le16 fdb_id;
+ __le16 num_ifs;
@@ -11079,38 +13059,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ /* cmd word 1 */
+ u8 mac_addr[6];
+ __le16 pad2;
-+ /* cmd word 2 */
-+ __le64 if_id[4];
-+};
-+
-+struct dpsw_cmd_fdb_get_multicast {
-+ __le16 fdb_id;
-+ u8 mac_addr[6];
-+};
-+
-+struct dpsw_rsp_fdb_get_multicast {
-+ /* cmd word 0 */
-+ __le64 pad0;
-+ /* cmd word 1 */
-+ __le16 num_ifs;
-+ /* only the first 4 bits from LSB */
-+ u8 type;
-+ u8 pad1[5];
-+ /* cmd word 2 */
-+ __le64 if_id[4];
-+};
-+
-+struct dpsw_cmd_fdb_remove_multicast {
-+ /* cmd word 0 */
-+ __le16 fdb_id;
-+ __le16 num_ifs;
-+ /* only the first 4 bits from LSB */
-+ u8 type;
-+ u8 pad[3];
-+ /* cmd word 1 */
-+ u8 mac_addr[6];
-+ __le16 pad2;
-+ /* cmd word 2 */
++ /* cmd word 2-5 */
+ __le64 if_id[4];
+};
+
@@ -11123,125 +13072,6 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ u8 mode;
+};
+
-+struct dpsw_cmd_fdb_get_attr {
-+ __le16 fdb_id;
-+};
-+
-+struct dpsw_rsp_fdb_get_attr {
-+ /* cmd word 0 */
-+ __le16 pad;
-+ __le16 max_fdb_entries;
-+ __le16 fdb_aging_time;
-+ __le16 num_fdb_mc_groups;
-+ /* cmd word 1 */
-+ __le16 max_fdb_mc_groups;
-+ /* only the first 4 bits from LSB */
-+ u8 learning_mode;
-+};
-+
-+struct dpsw_cmd_acl_add {
-+ __le16 pad;
-+ __le16 max_entries;
-+};
-+
-+struct dpsw_rsp_acl_add {
-+ __le16 acl_id;
-+};
-+
-+struct dpsw_cmd_acl_remove {
-+ __le16 acl_id;
-+};
-+
-+struct dpsw_prep_acl_entry {
-+ u8 match_l2_dest_mac[6];
-+ __le16 match_l2_tpid;
-+
-+ u8 match_l2_source_mac[6];
-+ __le16 match_l2_vlan_id;
-+
-+ __le32 match_l3_dest_ip;
-+ __le32 match_l3_source_ip;
-+
-+ __le16 match_l4_dest_port;
-+ __le16 match_l4_source_port;
-+ __le16 match_l2_ether_type;
-+ u8 match_l2_pcp_dei;
-+ u8 match_l3_dscp;
-+
-+ u8 mask_l2_dest_mac[6];
-+ __le16 mask_l2_tpid;
-+
-+ u8 mask_l2_source_mac[6];
-+ __le16 mask_l2_vlan_id;
-+
-+ __le32 mask_l3_dest_ip;
-+ __le32 mask_l3_source_ip;
-+
-+ __le16 mask_l4_dest_port;
-+ __le16 mask_l4_source_port;
-+ __le16 mask_l2_ether_type;
-+ u8 mask_l2_pcp_dei;
-+ u8 mask_l3_dscp;
-+
-+ u8 match_l3_protocol;
-+ u8 mask_l3_protocol;
-+};
-+
-+#define DPSW_RESULT_ACTION_SHIFT 0
-+#define DPSW_RESULT_ACTION_SIZE 4
-+
-+struct dpsw_cmd_acl_entry {
-+ __le16 acl_id;
-+ __le16 result_if_id;
-+ __le32 precedence;
-+ /* from LSB only the first 4 bits */
-+ u8 result_action;
-+ u8 pad[7];
-+ __le64 pad2[4];
-+ __le64 key_iova;
-+};
-+
-+struct dpsw_cmd_acl_if {
-+ /* cmd word 0 */
-+ __le16 acl_id;
-+ __le16 num_ifs;
-+ __le32 pad;
-+ /* cmd word 1 */
-+ __le64 if_id[4];
-+};
-+
-+struct dpsw_cmd_acl_get_attr {
-+ __le16 acl_id;
-+};
-+
-+struct dpsw_rsp_acl_get_attr {
-+ /* cmd word 0 */
-+ __le64 pad;
-+ /* cmd word 1 */
-+ __le16 max_entries;
-+ __le16 num_entries;
-+ __le16 num_ifs;
-+};
-+
-+struct dpsw_rsp_ctrl_if_get_attr {
-+ /* cmd word 0 */
-+ __le64 pad;
-+ /* cmd word 1 */
-+ __le32 rx_fqid;
-+ __le32 rx_err_fqid;
-+ /* cmd word 2 */
-+ __le32 tx_err_conf_fqid;
-+};
-+
-+struct dpsw_cmd_ctrl_if_set_pools {
-+ u8 num_dpbp;
-+ /* from LSB: POOL0_BACKUP_POOL:1 ... POOL7_BACKUP_POOL */
-+ u8 backup_pool;
-+ __le16 pad;
-+ __le32 dpbp_id[8];
-+ __le16 buffer_size[8];
-+};
-+
+struct dpsw_rsp_get_api_version {
+ __le16 version_major;
+ __le16 version_minor;
@@ -11250,74 +13080,27 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+#endif /* __FSL_DPSW_CMD_H */
--- /dev/null
+++ b/drivers/staging/fsl-dpaa2/ethsw/dpsw.c
-@@ -0,0 +1,2762 @@
-+/* Copyright 2013-2015 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of the above-listed copyright holders nor the
-+ * names of any contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
+@@ -0,0 +1,1165 @@
++// SPDX-License-Identifier: GPL-2.0
++/*
++ * Copyright 2013-2016 Freescale Semiconductor, Inc.
++ * Copyright 2017-2018 NXP
+ *
-+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
-+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-+ * POSSIBILITY OF SUCH DAMAGE.
+ */
-+#include "../../fsl-mc/include/mc-sys.h"
-+#include "../../fsl-mc/include/mc-cmd.h"
++
++#include <linux/fsl/mc.h>
+#include "dpsw.h"
+#include "dpsw-cmd.h"
+
+static void build_if_id_bitmap(__le64 *bmap,
+ const u16 *id,
-+ const u16 num_ifs) {
++ const u16 num_ifs)
++{
+ int i;
+
-+ for (i = 0; (i < num_ifs) && (i < DPSW_MAX_IF); i++)
-+ bmap[id[i] / 64] = dpsw_set_bit(bmap[id[i] / 64],
-+ (id[i] % 64),
-+ 1);
-+}
-+
-+static void read_if_id_bitmap(u16 *if_id,
-+ u16 *num_ifs,
-+ __le64 *bmap) {
-+ int bitmap[DPSW_MAX_IF] = { 0 };
-+ int i, j = 0;
-+ int count = 0;
-+
-+ for (i = 0; i < DPSW_MAX_IF; i++) {
-+ bitmap[i] = dpsw_get_bit(le64_to_cpu(bmap[i / 64]),
-+ i % 64);
-+ count += bitmap[i];
-+ }
-+
-+ *num_ifs = (u16)count;
-+
-+ for (i = 0; (i < DPSW_MAX_IF) && (j < count); i++) {
-+ if (bitmap[i]) {
-+ if_id[j] = (u16)i;
-+ j++;
-+ }
++ for (i = 0; (i < num_ifs) && (i < DPSW_MAX_IF); i++) {
++ if (id[i] < DPSW_MAX_IF)
++ bmap[id[i] / 64] |= cpu_to_le64(BIT_MASK(id[i] % 64));
+ }
+}
+
@@ -11343,7 +13126,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ int dpsw_id,
+ u16 *token)
+{
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+ struct dpsw_cmd_open *cmd_params;
+ int err;
+
@@ -11380,7 +13163,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ u32 cmd_flags,
+ u16 token)
+{
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_CLOSE,
@@ -11403,7 +13186,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ u32 cmd_flags,
+ u16 token)
+{
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_ENABLE,
@@ -11426,7 +13209,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ u32 cmd_flags,
+ u16 token)
+{
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_DISABLE,
@@ -11438,41 +13221,6 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+}
+
+/**
-+ * dpsw_is_enabled() - Check if the DPSW is enabled
-+ *
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSW object
-+ * @en: Returns '1' if object is enabled; '0' otherwise
-+ *
-+ * Return: '0' on Success; Error code otherwise
-+ */
-+int dpsw_is_enabled(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ int *en)
-+{
-+ struct mc_command cmd = { 0 };
-+ struct dpsw_rsp_is_enabled *cmd_rsp;
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IS_ENABLED, cmd_flags,
-+ token);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ cmd_rsp = (struct dpsw_rsp_is_enabled *)cmd.params;
-+ *en = dpsw_get_field(cmd_rsp->enabled, ENABLE);
-+
-+ return 0;
-+}
-+
-+/**
+ * dpsw_reset() - Reset the DPSW, returns the object to initial state.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
@@ -11484,7 +13232,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ u32 cmd_flags,
+ u16 token)
+{
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_RESET,
@@ -11496,86 +13244,6 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+}
+
+/**
-+ * dpsw_set_irq() - Set IRQ information for the DPSW to trigger an interrupt.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSW object
-+ * @irq_index: Identifies the interrupt index to configure
-+ * @irq_cfg: IRQ configuration
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpsw_set_irq(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u8 irq_index,
-+ struct dpsw_irq_cfg *irq_cfg)
-+{
-+ struct mc_command cmd = { 0 };
-+ struct dpsw_cmd_set_irq *cmd_params;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_SET_IRQ,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dpsw_cmd_set_irq *)cmd.params;
-+ cmd_params->irq_index = irq_index;
-+ cmd_params->irq_val = cpu_to_le32(irq_cfg->val);
-+ cmd_params->irq_addr = cpu_to_le64(irq_cfg->addr);
-+ cmd_params->irq_num = cpu_to_le32(irq_cfg->irq_num);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+/**
-+ * dpsw_get_irq() - Get IRQ information from the DPSW
-+ *
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSW object
-+ * @irq_index: The interrupt index to configure
-+ * @type: Interrupt type: 0 represents message interrupt
-+ * type (both irq_addr and irq_val are valid)
-+ * @irq_cfg: IRQ attributes
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpsw_get_irq(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u8 irq_index,
-+ int *type,
-+ struct dpsw_irq_cfg *irq_cfg)
-+{
-+ struct mc_command cmd = { 0 };
-+ struct dpsw_cmd_get_irq *cmd_params;
-+ struct dpsw_rsp_get_irq *rsp_params;
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_GET_IRQ,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dpsw_cmd_get_irq *)cmd.params;
-+ cmd_params->irq_index = irq_index;
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ rsp_params = (struct dpsw_rsp_get_irq *)cmd.params;
-+ irq_cfg->addr = le64_to_cpu(rsp_params->irq_addr);
-+ irq_cfg->val = le32_to_cpu(rsp_params->irq_val);
-+ irq_cfg->irq_num = le32_to_cpu(rsp_params->irq_num);
-+ *type = le32_to_cpu(rsp_params->irq_type);
-+
-+ return 0;
-+}
-+
-+/**
+ * dpsw_set_irq_enable() - Set overall interrupt state.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
@@ -11596,7 +13264,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ u8 irq_index,
+ u8 en)
+{
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+ struct dpsw_cmd_set_irq_enable *cmd_params;
+
+ /* prepare command */
@@ -11633,7 +13301,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ u8 irq_index,
+ u32 mask)
+{
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+ struct dpsw_cmd_set_irq_mask *cmd_params;
+
+ /* prepare command */
@@ -11666,7 +13334,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ u8 irq_index,
+ u32 *status)
+{
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+ struct dpsw_cmd_get_irq_status *cmd_params;
+ struct dpsw_rsp_get_irq_status *rsp_params;
+ int err;
@@ -11709,7 +13377,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ u8 irq_index,
+ u32 status)
+{
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+ struct dpsw_cmd_clear_irq_status *cmd_params;
+
+ /* prepare command */
@@ -11738,7 +13406,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ u16 token,
+ struct dpsw_attr *attr)
+{
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+ struct dpsw_rsp_get_attr *rsp_params;
+ int err;
+
@@ -11773,36 +13441,6 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+}
+
+/**
-+ * dpsw_set_reflection_if() - Set target interface for reflected interfaces.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSW object
-+ * @if_id: Interface Id
-+ *
-+ * Only one reflection receive interface is allowed per switch
-+ *
-+ * Return: Completion status. '0' on Success; Error code otherwise.
-+ */
-+int dpsw_set_reflection_if(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 if_id)
-+{
-+ struct mc_command cmd = { 0 };
-+ struct dpsw_cmd_set_reflection_if *cmd_params;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_SET_REFLECTION_IF,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dpsw_cmd_set_reflection_if *)cmd.params;
-+ cmd_params->if_id = cpu_to_le16(if_id);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+/**
+ * dpsw_if_set_link_cfg() - Set the link configuration.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
@@ -11818,7 +13456,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ u16 if_id,
+ struct dpsw_link_cfg *cfg)
+{
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+ struct dpsw_cmd_if_set_link_cfg *cmd_params;
+
+ /* prepare command */
@@ -11850,7 +13488,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ u16 if_id,
+ struct dpsw_link_state *state)
+{
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+ struct dpsw_cmd_if_get_link_state *cmd_params;
+ struct dpsw_rsp_if_get_link_state *rsp_params;
+ int err;
@@ -11890,9 +13528,9 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ u32 cmd_flags,
+ u16 token,
+ u16 if_id,
-+ int en)
++ u8 en)
+{
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+ struct dpsw_cmd_if_set_flooding *cmd_params;
+
+ /* prepare command */
@@ -11921,9 +13559,9 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ u32 cmd_flags,
+ u16 token,
+ u16 if_id,
-+ int en)
++ u8 en)
+{
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+ struct dpsw_cmd_if_set_broadcast *cmd_params;
+
+ /* prepare command */
@@ -11939,37 +13577,6 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+}
+
+/**
-+ * dpsw_if_set_multicast() - Enable/disable multicast for particular interface
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSW object
-+ * @if_id: Interface Identifier
-+ * @en: 1 - enable, 0 - disable
-+ *
-+ * Return: Completion status. '0' on Success; Error code otherwise.
-+ */
-+int dpsw_if_set_multicast(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 if_id,
-+ int en)
-+{
-+ struct mc_command cmd = { 0 };
-+ struct dpsw_cmd_if_set_multicast *cmd_params;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_MULTICAST,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dpsw_cmd_if_set_multicast *)cmd.params;
-+ cmd_params->if_id = cpu_to_le16(if_id);
-+ dpsw_set_field(cmd_params->enable, ENABLE, en);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+/**
+ * dpsw_if_set_tci() - Set default VLAN Tag Control Information (TCI)
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
@@ -11985,8 +13592,9 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ u16 if_id,
+ const struct dpsw_tci_cfg *cfg)
+{
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+ struct dpsw_cmd_if_set_tci *cmd_params;
++ u16 tmp_conf = 0;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_TCI,
@@ -11994,10 +13602,10 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ token);
+ cmd_params = (struct dpsw_cmd_if_set_tci *)cmd.params;
+ cmd_params->if_id = cpu_to_le16(if_id);
-+ dpsw_set_field(cmd_params->conf, VLAN_ID, cfg->vlan_id);
-+ dpsw_set_field(cmd_params->conf, DEI, cfg->dei);
-+ dpsw_set_field(cmd_params->conf, PCP, cfg->pcp);
-+ cmd_params->conf = cpu_to_le16(cmd_params->conf);
++ dpsw_set_field(tmp_conf, VLAN_ID, cfg->vlan_id);
++ dpsw_set_field(tmp_conf, DEI, cfg->dei);
++ dpsw_set_field(tmp_conf, PCP, cfg->pcp);
++ cmd_params->conf = cpu_to_le16(tmp_conf);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
@@ -12019,7 +13627,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ u16 if_id,
+ struct dpsw_tci_cfg *cfg)
+{
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+ struct dpsw_cmd_if_get_tci *cmd_params;
+ struct dpsw_rsp_if_get_tci *rsp_params;
+ int err;
@@ -12064,7 +13672,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ u16 if_id,
+ const struct dpsw_stp_cfg *cfg)
+{
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+ struct dpsw_cmd_if_set_stp *cmd_params;
+
+ /* prepare command */
@@ -12081,83 +13689,6 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+}
+
+/**
-+ * dpsw_if_set_accepted_frames()
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSW object
-+ * @if_id: Interface Identifier
-+ * @cfg: Frame types configuration
-+ *
-+ * When is admit_only_vlan_tagged- the device will discard untagged
-+ * frames or Priority-Tagged frames received on this interface.
-+ * When admit_only_untagged- untagged frames or Priority-Tagged
-+ * frames received on this interface will be accepted and assigned
-+ * to a VID based on the PVID and VID Set for this interface.
-+ * When admit_all - the device will accept VLAN tagged, untagged
-+ * and priority tagged frames.
-+ * The default is admit_all
-+ *
-+ * Return: Completion status. '0' on Success; Error code otherwise.
-+ */
-+int dpsw_if_set_accepted_frames(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 if_id,
-+ const struct dpsw_accepted_frames_cfg *cfg)
-+{
-+ struct mc_command cmd = { 0 };
-+ struct dpsw_cmd_if_set_accepted_frames *cmd_params;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_ACCEPTED_FRAMES,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dpsw_cmd_if_set_accepted_frames *)cmd.params;
-+ cmd_params->if_id = cpu_to_le16(if_id);
-+ dpsw_set_field(cmd_params->unaccepted, FRAME_TYPE, cfg->type);
-+ dpsw_set_field(cmd_params->unaccepted, UNACCEPTED_ACT,
-+ cfg->unaccept_act);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+/**
-+ * dpsw_if_set_accept_all_vlan()
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSW object
-+ * @if_id: Interface Identifier
-+ * @accept_all: Accept or drop frames having different VLAN
-+ *
-+ * When this is accept (FALSE), the device will discard incoming
-+ * frames for VLANs that do not include this interface in its
-+ * Member set. When accept (TRUE), the interface will accept all incoming frames
-+ *
-+ * Return: Completion status. '0' on Success; Error code otherwise.
-+ */
-+int dpsw_if_set_accept_all_vlan(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 if_id,
-+ int accept_all)
-+{
-+ struct mc_command cmd = { 0 };
-+ struct dpsw_cmd_if_set_accept_all_vlan *cmd_params;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_SET_IF_ACCEPT_ALL_VLAN,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dpsw_cmd_if_set_accept_all_vlan *)cmd.params;
-+ cmd_params->if_id = cpu_to_le16(if_id);
-+ dpsw_set_field(cmd_params->accept_all, ACCEPT_ALL, accept_all);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+/**
+ * dpsw_if_get_counter() - Get specific counter of particular interface
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
@@ -12175,7 +13706,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ enum dpsw_counter type,
+ u64 *counter)
+{
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+ struct dpsw_cmd_if_get_counter *cmd_params;
+ struct dpsw_rsp_if_get_counter *rsp_params;
+ int err;
@@ -12201,352 +13732,6 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+}
+
+/**
-+ * dpsw_if_set_counter() - Set specific counter of particular interface
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSW object
-+ * @if_id: Interface Identifier
-+ * @type: Counter type
-+ * @counter: New counter value
-+ *
-+ * Return: Completion status. '0' on Success; Error code otherwise.
-+ */
-+int dpsw_if_set_counter(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 if_id,
-+ enum dpsw_counter type,
-+ u64 counter)
-+{
-+ struct mc_command cmd = { 0 };
-+ struct dpsw_cmd_if_set_counter *cmd_params;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_COUNTER,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dpsw_cmd_if_set_counter *)cmd.params;
-+ cmd_params->if_id = cpu_to_le16(if_id);
-+ cmd_params->counter = cpu_to_le64(counter);
-+ dpsw_set_field(cmd_params->type, COUNTER_TYPE, type);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+/**
-+ * dpsw_if_set_tx_selection() - Function is used for mapping variety
-+ * of frame fields
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSW object
-+ * @if_id: Interface Identifier
-+ * @cfg: Traffic class mapping configuration
-+ *
-+ * Function is used for mapping variety of frame fields (DSCP, PCP)
-+ * to Traffic Class. Traffic class is a number
-+ * in the range from 0 to 7
-+ *
-+ * Return: Completion status. '0' on Success; Error code otherwise.
-+ */
-+int dpsw_if_set_tx_selection(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 if_id,
-+ const struct dpsw_tx_selection_cfg *cfg)
-+{
-+ struct dpsw_cmd_if_set_tx_selection *cmd_params;
-+ struct mc_command cmd = { 0 };
-+ int i;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_TX_SELECTION,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dpsw_cmd_if_set_tx_selection *)cmd.params;
-+ cmd_params->if_id = cpu_to_le16(if_id);
-+ dpsw_set_field(cmd_params->priority_selector, PRIORITY_SELECTOR,
-+ cfg->priority_selector);
-+
-+ for (i = 0; i < 8; i++) {
-+ cmd_params->tc_sched[i].delta_bandwidth =
-+ cpu_to_le16(cfg->tc_sched[i].delta_bandwidth);
-+ dpsw_set_field(cmd_params->tc_sched[i].mode, SCHED_MODE,
-+ cfg->tc_sched[i].mode);
-+ cmd_params->tc_id[i] = cfg->tc_id[i];
-+ }
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+/**
-+ * dpsw_if_add_reflection() - Identify interface to be reflected or mirrored
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSW object
-+ * @if_id: Interface Identifier
-+ * @cfg: Reflection configuration
-+ *
-+ * Return: Completion status. '0' on Success; Error code otherwise.
-+ */
-+int dpsw_if_add_reflection(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 if_id,
-+ const struct dpsw_reflection_cfg *cfg)
-+{
-+ struct mc_command cmd = { 0 };
-+ struct dpsw_cmd_if_reflection *cmd_params;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_ADD_REFLECTION,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dpsw_cmd_if_reflection *)cmd.params;
-+ cmd_params->if_id = cpu_to_le16(if_id);
-+ cmd_params->vlan_id = cpu_to_le16(cfg->vlan_id);
-+ dpsw_set_field(cmd_params->filter, FILTER, cfg->filter);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+/**
-+ * dpsw_if_remove_reflection() - Remove interface to be reflected or mirrored
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSW object
-+ * @if_id: Interface Identifier
-+ * @cfg: Reflection configuration
-+ *
-+ * Return: Completion status. '0' on Success; Error code otherwise.
-+ */
-+int dpsw_if_remove_reflection(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 if_id,
-+ const struct dpsw_reflection_cfg *cfg)
-+{
-+ struct mc_command cmd = { 0 };
-+ struct dpsw_cmd_if_reflection *cmd_params;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_REMOVE_REFLECTION,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dpsw_cmd_if_reflection *)cmd.params;
-+ cmd_params->if_id = cpu_to_le16(if_id);
-+ cmd_params->vlan_id = cpu_to_le16(cfg->vlan_id);
-+ dpsw_set_field(cmd_params->filter, FILTER, cfg->filter);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+/**
-+ * dpsw_if_set_flooding_metering() - Set flooding metering
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSW object
-+ * @if_id: Interface Identifier
-+ * @cfg: Metering parameters
-+ *
-+ * Return: Completion status. '0' on Success; Error code otherwise.
-+ */
-+int dpsw_if_set_flooding_metering(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 if_id,
-+ const struct dpsw_metering_cfg *cfg)
-+{
-+ struct mc_command cmd = { 0 };
-+ struct dpsw_cmd_if_set_flooding_metering *cmd_params;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_FLOODING_METERING,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dpsw_cmd_if_set_flooding_metering *)cmd.params;
-+ cmd_params->if_id = cpu_to_le16(if_id);
-+ dpsw_set_field(cmd_params->mode_units, MODE, cfg->mode);
-+ dpsw_set_field(cmd_params->mode_units, UNITS, cfg->units);
-+ cmd_params->cir = cpu_to_le32(cfg->cir);
-+ cmd_params->eir = cpu_to_le32(cfg->eir);
-+ cmd_params->cbs = cpu_to_le32(cfg->cbs);
-+ cmd_params->ebs = cpu_to_le32(cfg->ebs);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+/**
-+ * dpsw_if_set_metering() - Set interface metering for flooding
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSW object
-+ * @if_id: Interface Identifier
-+ * @tc_id: Traffic class ID
-+ * @cfg: Metering parameters
-+ *
-+ * Return: Completion status. '0' on Success; Error code otherwise.
-+ */
-+int dpsw_if_set_metering(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 if_id,
-+ u8 tc_id,
-+ const struct dpsw_metering_cfg *cfg)
-+{
-+ struct mc_command cmd = { 0 };
-+ struct dpsw_cmd_if_set_metering *cmd_params;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_METERING,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dpsw_cmd_if_set_metering *)cmd.params;
-+ cmd_params->if_id = cpu_to_le16(if_id);
-+ cmd_params->tc_id = tc_id;
-+ dpsw_set_field(cmd_params->mode_units, MODE, cfg->mode);
-+ dpsw_set_field(cmd_params->mode_units, UNITS, cfg->units);
-+ cmd_params->cir = cpu_to_le32(cfg->cir);
-+ cmd_params->eir = cpu_to_le32(cfg->eir);
-+ cmd_params->cbs = cpu_to_le32(cfg->cbs);
-+ cmd_params->ebs = cpu_to_le32(cfg->ebs);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+/**
-+ * dpsw_prepare_early_drop() - Prepare an early drop for setting in to interface
-+ * @cfg: Early-drop configuration
-+ * @early_drop_buf: Zeroed 256 bytes of memory before mapping it to DMA
-+ *
-+ * This function has to be called before dpsw_if_tc_set_early_drop
-+ *
-+ */
-+void dpsw_prepare_early_drop(const struct dpsw_early_drop_cfg *cfg,
-+ u8 *early_drop_buf)
-+{
-+ struct dpsw_prep_early_drop *ext_params;
-+
-+ ext_params = (struct dpsw_prep_early_drop *)early_drop_buf;
-+ dpsw_set_field(ext_params->conf, EARLY_DROP_MODE, cfg->drop_mode);
-+ dpsw_set_field(ext_params->conf, EARLY_DROP_UNIT, cfg->units);
-+ ext_params->tail_drop_threshold = cpu_to_le32(cfg->tail_drop_threshold);
-+ ext_params->green_drop_probability = cfg->green.drop_probability;
-+ ext_params->green_max_threshold = cpu_to_le64(cfg->green.max_threshold);
-+ ext_params->green_min_threshold = cpu_to_le64(cfg->green.min_threshold);
-+ ext_params->yellow_drop_probability = cfg->yellow.drop_probability;
-+ ext_params->yellow_max_threshold =
-+ cpu_to_le64(cfg->yellow.max_threshold);
-+ ext_params->yellow_min_threshold =
-+ cpu_to_le64(cfg->yellow.min_threshold);
-+}
-+
-+/**
-+ * dpsw_if_set_early_drop() - Set interface traffic class early-drop
-+ * configuration
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSW object
-+ * @if_id: Interface Identifier
-+ * @tc_id: Traffic class selection (0-7)
-+ * @early_drop_iova: I/O virtual address of 64 bytes;
-+ * Must be cacheline-aligned and DMA-able memory
-+ *
-+ * warning: Before calling this function, call dpsw_prepare_if_tc_early_drop()
-+ * to prepare the early_drop_iova parameter
-+ *
-+ * Return: '0' on Success; error code otherwise.
-+ */
-+int dpsw_if_set_early_drop(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 if_id,
-+ u8 tc_id,
-+ u64 early_drop_iova)
-+{
-+ struct mc_command cmd = { 0 };
-+ struct dpsw_cmd_if_set_early_drop *cmd_params;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_EARLY_DROP,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dpsw_cmd_if_set_early_drop *)cmd.params;
-+ cmd_params->tc_id = tc_id;
-+ cmd_params->if_id = cpu_to_le16(if_id);
-+ cmd_params->early_drop_iova = cpu_to_le64(early_drop_iova);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+/**
-+ * dpsw_add_custom_tpid() - API Configures a distinct Ethernet type value
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSW object
-+ * @cfg: Tag Protocol identifier
-+ *
-+ * API Configures a distinct Ethernet type value (or TPID value)
-+ * to indicate a VLAN tag in addition to the common
-+ * TPID values 0x8100 and 0x88A8.
-+ * Two additional TPID's are supported
-+ *
-+ * Return: Completion status. '0' on Success; Error code otherwise.
-+ */
-+int dpsw_add_custom_tpid(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ const struct dpsw_custom_tpid_cfg *cfg)
-+{
-+ struct mc_command cmd = { 0 };
-+ struct dpsw_cmd_custom_tpid *cmd_params;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_ADD_CUSTOM_TPID,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dpsw_cmd_custom_tpid *)cmd.params;
-+ cmd_params->tpid = cpu_to_le16(cfg->tpid);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+/**
-+ * dpsw_remove_custom_tpid - API removes a distinct Ethernet type value
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSW object
-+ * @cfg: Tag Protocol identifier
-+ *
-+ * Return: Completion status. '0' on Success; Error code otherwise.
-+ */
-+int dpsw_remove_custom_tpid(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ const struct dpsw_custom_tpid_cfg *cfg)
-+{
-+ struct mc_command cmd = { 0 };
-+ struct dpsw_cmd_custom_tpid *cmd_params;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_REMOVE_CUSTOM_TPID,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dpsw_cmd_custom_tpid *)cmd.params;
-+ cmd_params->tpid = cpu_to_le16(cfg->tpid);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+/**
+ * dpsw_if_enable() - Enable Interface
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
@@ -12560,7 +13745,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ u16 token,
+ u16 if_id)
+{
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+ struct dpsw_cmd_if *cmd_params;
+
+ /* prepare command */
@@ -12588,7 +13773,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ u16 token,
+ u16 if_id)
+{
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+ struct dpsw_cmd_if *cmd_params;
+
+ /* prepare command */
@@ -12603,53 +13788,6 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+}
+
+/**
-+ * dpsw_if_get_attributes() - Function obtains attributes of interface
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSW object
-+ * @if_id: Interface Identifier
-+ * @attr: Returned interface attributes
-+ *
-+ * Return: Completion status. '0' on Success; Error code otherwise.
-+ */
-+int dpsw_if_get_attributes(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 if_id,
-+ struct dpsw_if_attr *attr)
-+{
-+ struct dpsw_rsp_if_get_attr *rsp_params;
-+ struct dpsw_cmd_if *cmd_params;
-+ struct mc_command cmd = { 0 };
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_GET_ATTR,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dpsw_cmd_if *)cmd.params;
-+ cmd_params->if_id = cpu_to_le16(if_id);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ rsp_params = (struct dpsw_rsp_if_get_attr *)cmd.params;
-+ attr->num_tcs = rsp_params->num_tcs;
-+ attr->rate = le32_to_cpu(rsp_params->rate);
-+ attr->options = le32_to_cpu(rsp_params->options);
-+ attr->enabled = dpsw_get_field(rsp_params->conf, ENABLED);
-+ attr->accept_all_vlan = dpsw_get_field(rsp_params->conf,
-+ ACCEPT_ALL_VLAN);
-+ attr->admit_untagged = dpsw_get_field(rsp_params->conf, ADMIT_UNTAGGED);
-+ attr->qdid = le16_to_cpu(rsp_params->qdid);
-+
-+ return 0;
-+}
-+
-+/**
+ * dpsw_if_set_max_frame_length() - Set Maximum Receive frame length.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
@@ -12665,7 +13803,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ u16 if_id,
+ u16 frame_length)
+{
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+ struct dpsw_cmd_if_set_max_frame_length *cmd_params;
+
+ /* prepare command */
@@ -12681,45 +13819,6 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+}
+
+/**
-+ * dpsw_if_get_max_frame_length() - Get Maximum Receive frame length.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSW object
-+ * @if_id: Interface Identifier
-+ * @frame_length: Returned maximum Frame Length
-+ *
-+ * Return: Completion status. '0' on Success; Error code otherwise.
-+ */
-+int dpsw_if_get_max_frame_length(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 if_id,
-+ u16 *frame_length)
-+{
-+ struct mc_command cmd = { 0 };
-+ struct dpsw_cmd_if_get_max_frame_length *cmd_params;
-+ struct dpsw_rsp_if_get_max_frame_length *rsp_params;
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_GET_MAX_FRAME_LENGTH,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dpsw_cmd_if_get_max_frame_length *)cmd.params;
-+ cmd_params->if_id = cpu_to_le16(if_id);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ rsp_params = (struct dpsw_rsp_if_get_max_frame_length *)cmd.params;
-+ *frame_length = le16_to_cpu(rsp_params->frame_length);
-+
-+ return 0;
-+}
-+
-+/**
+ * dpsw_vlan_add() - Adding new VLAN to DPSW.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
@@ -12742,7 +13841,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ u16 vlan_id,
+ const struct dpsw_vlan_cfg *cfg)
+{
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+ struct dpsw_vlan_add *cmd_params;
+
+ /* prepare command */
@@ -12778,7 +13877,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ u16 vlan_id,
+ const struct dpsw_vlan_if_cfg *cfg)
+{
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+ struct dpsw_cmd_vlan_manage_if *cmd_params;
+
+ /* prepare command */
@@ -12816,7 +13915,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ u16 vlan_id,
+ const struct dpsw_vlan_if_cfg *cfg)
+{
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+ struct dpsw_cmd_vlan_manage_if *cmd_params;
+
+ /* prepare command */
@@ -12832,45 +13931,6 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+}
+
+/**
-+ * dpsw_vlan_add_if_flooding() - Define a set of interfaces that should be
-+ * included in flooding when frame with unknown destination
-+ * unicast MAC arrived.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSW object
-+ * @vlan_id: VLAN Identifier
-+ * @cfg: Set of interfaces that should be used for flooding
-+ *
-+ * These interfaces should belong to this VLAN. By default all
-+ * interfaces are included into flooding list. Providing
-+ * un-existing interface or an interface that already in the
-+ * flooding list generates an error and the entire command is
-+ * ignored.
-+ *
-+ * Return: Completion status. '0' on Success; Error code otherwise.
-+ */
-+int dpsw_vlan_add_if_flooding(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 vlan_id,
-+ const struct dpsw_vlan_if_cfg *cfg)
-+{
-+ struct mc_command cmd = { 0 };
-+ struct dpsw_cmd_vlan_manage_if *cmd_params;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_VLAN_ADD_IF_FLOODING,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dpsw_cmd_vlan_manage_if *)cmd.params;
-+ cmd_params->vlan_id = cpu_to_le16(vlan_id);
-+ build_if_id_bitmap(cmd_params->if_id, cfg->if_id, cfg->num_ifs);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+/**
+ * dpsw_vlan_remove_if() - Remove interfaces from an existing VLAN.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
@@ -12889,7 +13949,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ u16 vlan_id,
+ const struct dpsw_vlan_if_cfg *cfg)
+{
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+ struct dpsw_cmd_vlan_manage_if *cmd_params;
+
+ /* prepare command */
@@ -12925,7 +13985,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ u16 vlan_id,
+ const struct dpsw_vlan_if_cfg *cfg)
+{
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+ struct dpsw_cmd_vlan_manage_if *cmd_params;
+
+ /* prepare command */
@@ -12941,38 +14001,6 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+}
+
+/**
-+ * dpsw_vlan_remove_if_flooding() - Define a set of interfaces that should be
-+ * removed from the flooding list.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSW object
-+ * @vlan_id: VLAN Identifier
-+ * @cfg: Set of interfaces used for flooding
-+ *
-+ * Return: Completion status. '0' on Success; Error code otherwise.
-+ */
-+int dpsw_vlan_remove_if_flooding(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 vlan_id,
-+ const struct dpsw_vlan_if_cfg *cfg)
-+{
-+ struct mc_command cmd = { 0 };
-+ struct dpsw_cmd_vlan_manage_if *cmd_params;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_VLAN_REMOVE_IF_FLOODING,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dpsw_cmd_vlan_manage_if *)cmd.params;
-+ cmd_params->vlan_id = cpu_to_le16(vlan_id);
-+ build_if_id_bitmap(cmd_params->if_id, cfg->if_id, cfg->num_ifs);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+/**
+ * dpsw_vlan_remove() - Remove an entire VLAN
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
@@ -12986,7 +14014,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ u16 token,
+ u16 vlan_id)
+{
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+ struct dpsw_cmd_vlan_remove *cmd_params;
+
+ /* prepare command */
@@ -13001,244 +14029,6 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+}
+
+/**
-+ * dpsw_vlan_get_attributes() - Get VLAN attributes
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSW object
-+ * @vlan_id: VLAN Identifier
-+ * @attr: Returned DPSW attributes
-+ *
-+ * Return: Completion status. '0' on Success; Error code otherwise.
-+ */
-+int dpsw_vlan_get_attributes(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 vlan_id,
-+ struct dpsw_vlan_attr *attr)
-+{
-+ struct mc_command cmd = { 0 };
-+ struct dpsw_cmd_vlan_get_attr *cmd_params;
-+ struct dpsw_rsp_vlan_get_attr *rsp_params;
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_VLAN_GET_ATTRIBUTES,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dpsw_cmd_vlan_get_attr *)cmd.params;
-+ cmd_params->vlan_id = cpu_to_le16(vlan_id);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ rsp_params = (struct dpsw_rsp_vlan_get_attr *)cmd.params;
-+ attr->fdb_id = le16_to_cpu(rsp_params->fdb_id);
-+ attr->num_ifs = le16_to_cpu(rsp_params->num_ifs);
-+ attr->num_untagged_ifs = le16_to_cpu(rsp_params->num_untagged_ifs);
-+ attr->num_flooding_ifs = le16_to_cpu(rsp_params->num_flooding_ifs);
-+
-+ return 0;
-+}
-+
-+/**
-+ * dpsw_vlan_get_if() - Get interfaces belong to this VLAN
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSW object
-+ * @vlan_id: VLAN Identifier
-+ * @cfg: Returned set of interfaces belong to this VLAN
-+ *
-+ * Return: Completion status. '0' on Success; Error code otherwise.
-+ */
-+int dpsw_vlan_get_if(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 vlan_id,
-+ struct dpsw_vlan_if_cfg *cfg)
-+{
-+ struct mc_command cmd = { 0 };
-+ struct dpsw_cmd_vlan_get_if *cmd_params;
-+ struct dpsw_rsp_vlan_get_if *rsp_params;
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_VLAN_GET_IF,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dpsw_cmd_vlan_get_if *)cmd.params;
-+ cmd_params->vlan_id = cpu_to_le16(vlan_id);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ rsp_params = (struct dpsw_rsp_vlan_get_if *)cmd.params;
-+ cfg->num_ifs = le16_to_cpu(rsp_params->num_ifs);
-+ read_if_id_bitmap(cfg->if_id, &cfg->num_ifs, rsp_params->if_id);
-+
-+ return 0;
-+}
-+
-+/**
-+ * dpsw_vlan_get_if_flooding() - Get interfaces used in flooding for this VLAN
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSW object
-+ * @vlan_id: VLAN Identifier
-+ * @cfg: Returned set of flooding interfaces
-+ *
-+ * Return: Completion status. '0' on Success; Error code otherwise.
-+ */
-+
-+int dpsw_vlan_get_if_flooding(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 vlan_id,
-+ struct dpsw_vlan_if_cfg *cfg)
-+{
-+ struct mc_command cmd = { 0 };
-+ struct dpsw_cmd_vlan_get_if_flooding *cmd_params;
-+ struct dpsw_rsp_vlan_get_if_flooding *rsp_params;
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_VLAN_GET_IF_FLOODING,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dpsw_cmd_vlan_get_if_flooding *)cmd.params;
-+ cmd_params->vlan_id = cpu_to_le16(vlan_id);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ rsp_params = (struct dpsw_rsp_vlan_get_if_flooding *)cmd.params;
-+ cfg->num_ifs = le16_to_cpu(rsp_params->num_ifs);
-+ read_if_id_bitmap(cfg->if_id, &cfg->num_ifs, rsp_params->if_id);
-+
-+ return 0;
-+}
-+
-+/**
-+ * dpsw_vlan_get_if_untagged() - Get interfaces that should be transmitted as
-+ * untagged
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSW object
-+ * @vlan_id: VLAN Identifier
-+ * @cfg: Returned set of untagged interfaces
-+ *
-+ * Return: Completion status. '0' on Success; Error code otherwise.
-+ */
-+int dpsw_vlan_get_if_untagged(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 vlan_id,
-+ struct dpsw_vlan_if_cfg *cfg)
-+{
-+ struct mc_command cmd = { 0 };
-+ struct dpsw_cmd_vlan_get_if_untagged *cmd_params;
-+ struct dpsw_rsp_vlan_get_if_untagged *rsp_params;
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_VLAN_GET_IF_UNTAGGED,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dpsw_cmd_vlan_get_if_untagged *)cmd.params;
-+ cmd_params->vlan_id = cpu_to_le16(vlan_id);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ rsp_params = (struct dpsw_rsp_vlan_get_if_untagged *)cmd.params;
-+ cfg->num_ifs = le16_to_cpu(rsp_params->num_ifs);
-+ read_if_id_bitmap(cfg->if_id, &cfg->num_ifs, rsp_params->if_id);
-+
-+ return 0;
-+}
-+
-+/**
-+ * dpsw_fdb_add() - Add FDB to switch and Returns handle to FDB table for
-+ * the reference
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSW object
-+ * @fdb_id: Returned Forwarding Database Identifier
-+ * @cfg: FDB Configuration
-+ *
-+ * Return: Completion status. '0' on Success; Error code otherwise.
-+ */
-+int dpsw_fdb_add(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 *fdb_id,
-+ const struct dpsw_fdb_cfg *cfg)
-+{
-+ struct mc_command cmd = { 0 };
-+ struct dpsw_cmd_fdb_add *cmd_params;
-+ struct dpsw_rsp_fdb_add *rsp_params;
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_FDB_ADD,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dpsw_cmd_fdb_add *)cmd.params;
-+ cmd_params->fdb_aging_time = cpu_to_le16(cfg->fdb_aging_time);
-+ cmd_params->num_fdb_entries = cpu_to_le16(cfg->num_fdb_entries);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ rsp_params = (struct dpsw_rsp_fdb_add *)cmd.params;
-+ *fdb_id = le16_to_cpu(rsp_params->fdb_id);
-+
-+ return 0;
-+}
-+
-+/**
-+ * dpsw_fdb_remove() - Remove FDB from switch
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSW object
-+ * @fdb_id: Forwarding Database Identifier
-+ *
-+ * Return: Completion status. '0' on Success; Error code otherwise.
-+ */
-+int dpsw_fdb_remove(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 fdb_id)
-+{
-+ struct mc_command cmd = { 0 };
-+ struct dpsw_cmd_fdb_remove *cmd_params;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_FDB_REMOVE,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dpsw_cmd_fdb_remove *)cmd.params;
-+ cmd_params->fdb_id = cpu_to_le16(fdb_id);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+/**
+ * dpsw_fdb_add_unicast() - Function adds an unicast entry into MAC lookup table
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
@@ -13254,15 +14044,15 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ u16 fdb_id,
+ const struct dpsw_fdb_unicast_cfg *cfg)
+{
-+ struct mc_command cmd = { 0 };
-+ struct dpsw_cmd_fdb_add_unicast *cmd_params;
++ struct fsl_mc_command cmd = { 0 };
++ struct dpsw_cmd_fdb_unicast_op *cmd_params;
+ int i;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_FDB_ADD_UNICAST,
+ cmd_flags,
+ token);
-+ cmd_params = (struct dpsw_cmd_fdb_add_unicast *)cmd.params;
++ cmd_params = (struct dpsw_cmd_fdb_unicast_op *)cmd.params;
+ cmd_params->fdb_id = cpu_to_le16(fdb_id);
+ cmd_params->if_egress = cpu_to_le16(cfg->if_egress);
+ for (i = 0; i < 6; i++)
@@ -13274,50 +14064,6 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+}
+
+/**
-+ * dpsw_fdb_get_unicast() - Get unicast entry from MAC lookup table by
-+ * unicast Ethernet address
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSW object
-+ * @fdb_id: Forwarding Database Identifier
-+ * @cfg: Returned unicast entry configuration
-+ *
-+ * Return: Completion status. '0' on Success; Error code otherwise.
-+ */
-+int dpsw_fdb_get_unicast(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 fdb_id,
-+ struct dpsw_fdb_unicast_cfg *cfg)
-+{
-+ struct mc_command cmd = { 0 };
-+ struct dpsw_cmd_fdb_get_unicast *cmd_params;
-+ struct dpsw_rsp_fdb_get_unicast *rsp_params;
-+ int err, i;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_FDB_GET_UNICAST,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dpsw_cmd_fdb_get_unicast *)cmd.params;
-+ cmd_params->fdb_id = cpu_to_le16(fdb_id);
-+ for (i = 0; i < 6; i++)
-+ cmd_params->mac_addr[i] = cfg->mac_addr[5 - i];
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ rsp_params = (struct dpsw_rsp_fdb_get_unicast *)cmd.params;
-+ cfg->if_egress = le16_to_cpu(rsp_params->if_egress);
-+ cfg->type = dpsw_get_field(rsp_params->type, ENTRY_TYPE);
-+
-+ return 0;
-+}
-+
-+/**
+ * dpsw_fdb_remove_unicast() - removes an entry from MAC lookup table
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
@@ -13333,15 +14079,15 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ u16 fdb_id,
+ const struct dpsw_fdb_unicast_cfg *cfg)
+{
-+ struct mc_command cmd = { 0 };
-+ struct dpsw_cmd_fdb_remove_unicast *cmd_params;
++ struct fsl_mc_command cmd = { 0 };
++ struct dpsw_cmd_fdb_unicast_op *cmd_params;
+ int i;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_FDB_REMOVE_UNICAST,
+ cmd_flags,
+ token);
-+ cmd_params = (struct dpsw_cmd_fdb_remove_unicast *)cmd.params;
++ cmd_params = (struct dpsw_cmd_fdb_unicast_op *)cmd.params;
+ cmd_params->fdb_id = cpu_to_le16(fdb_id);
+ for (i = 0; i < 6; i++)
+ cmd_params->mac_addr[i] = cfg->mac_addr[5 - i];
@@ -13375,15 +14121,15 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ u16 fdb_id,
+ const struct dpsw_fdb_multicast_cfg *cfg)
+{
-+ struct mc_command cmd = { 0 };
-+ struct dpsw_cmd_fdb_add_multicast *cmd_params;
++ struct fsl_mc_command cmd = { 0 };
++ struct dpsw_cmd_fdb_multicast_op *cmd_params;
+ int i;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_FDB_ADD_MULTICAST,
+ cmd_flags,
+ token);
-+ cmd_params = (struct dpsw_cmd_fdb_add_multicast *)cmd.params;
++ cmd_params = (struct dpsw_cmd_fdb_multicast_op *)cmd.params;
+ cmd_params->fdb_id = cpu_to_le16(fdb_id);
+ cmd_params->num_ifs = cpu_to_le16(cfg->num_ifs);
+ dpsw_set_field(cmd_params->type, ENTRY_TYPE, cfg->type);
@@ -13396,51 +14142,6 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+}
+
+/**
-+ * dpsw_fdb_get_multicast() - Reading multi-cast group by multi-cast Ethernet
-+ * address.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSW object
-+ * @fdb_id: Forwarding Database Identifier
-+ * @cfg: Returned multicast entry configuration
-+ *
-+ * Return: Completion status. '0' on Success; Error code otherwise.
-+ */
-+int dpsw_fdb_get_multicast(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 fdb_id,
-+ struct dpsw_fdb_multicast_cfg *cfg)
-+{
-+ struct mc_command cmd = { 0 };
-+ struct dpsw_cmd_fdb_get_multicast *cmd_params;
-+ struct dpsw_rsp_fdb_get_multicast *rsp_params;
-+ int err, i;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_FDB_GET_MULTICAST,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dpsw_cmd_fdb_get_multicast *)cmd.params;
-+ cmd_params->fdb_id = cpu_to_le16(fdb_id);
-+ for (i = 0; i < 6; i++)
-+ cmd_params->mac_addr[i] = cfg->mac_addr[5 - i];
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ rsp_params = (struct dpsw_rsp_fdb_get_multicast *)cmd.params;
-+ cfg->num_ifs = le16_to_cpu(rsp_params->num_ifs);
-+ cfg->type = dpsw_get_field(rsp_params->type, ENTRY_TYPE);
-+ read_if_id_bitmap(cfg->if_id, &cfg->num_ifs, rsp_params->if_id);
-+
-+ return 0;
-+}
-+
-+/**
+ * dpsw_fdb_remove_multicast() - Removing interfaces from an existing multicast
+ * group.
+ * @mc_io: Pointer to MC portal's I/O object
@@ -13462,15 +14163,15 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ u16 fdb_id,
+ const struct dpsw_fdb_multicast_cfg *cfg)
+{
-+ struct mc_command cmd = { 0 };
-+ struct dpsw_cmd_fdb_remove_multicast *cmd_params;
++ struct fsl_mc_command cmd = { 0 };
++ struct dpsw_cmd_fdb_multicast_op *cmd_params;
+ int i;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_FDB_REMOVE_MULTICAST,
+ cmd_flags,
+ token);
-+ cmd_params = (struct dpsw_cmd_fdb_remove_multicast *)cmd.params;
++ cmd_params = (struct dpsw_cmd_fdb_multicast_op *)cmd.params;
+ cmd_params->fdb_id = cpu_to_le16(fdb_id);
+ cmd_params->num_ifs = cpu_to_le16(cfg->num_ifs);
+ dpsw_set_field(cmd_params->type, ENTRY_TYPE, cfg->type);
@@ -13498,7 +14199,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ u16 fdb_id,
+ enum dpsw_fdb_learning_mode mode)
+{
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+ struct dpsw_cmd_fdb_set_learning_mode *cmd_params;
+
+ /* prepare command */
@@ -13514,474 +14215,6 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+}
+
+/**
-+ * dpsw_fdb_get_attributes() - Get FDB attributes
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSW object
-+ * @fdb_id: Forwarding Database Identifier
-+ * @attr: Returned FDB attributes
-+ *
-+ * Return: Completion status. '0' on Success; Error code otherwise.
-+ */
-+int dpsw_fdb_get_attributes(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 fdb_id,
-+ struct dpsw_fdb_attr *attr)
-+{
-+ struct mc_command cmd = { 0 };
-+ struct dpsw_cmd_fdb_get_attr *cmd_params;
-+ struct dpsw_rsp_fdb_get_attr *rsp_params;
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_FDB_GET_ATTR,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dpsw_cmd_fdb_get_attr *)cmd.params;
-+ cmd_params->fdb_id = cpu_to_le16(fdb_id);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ rsp_params = (struct dpsw_rsp_fdb_get_attr *)cmd.params;
-+ attr->max_fdb_entries = le16_to_cpu(rsp_params->max_fdb_entries);
-+ attr->fdb_aging_time = le16_to_cpu(rsp_params->fdb_aging_time);
-+ attr->learning_mode = dpsw_get_field(rsp_params->learning_mode,
-+ LEARNING_MODE);
-+ attr->num_fdb_mc_groups = le16_to_cpu(rsp_params->num_fdb_mc_groups);
-+ attr->max_fdb_mc_groups = le16_to_cpu(rsp_params->max_fdb_mc_groups);
-+
-+ return 0;
-+}
-+
-+/**
-+ * dpsw_acl_add() - Adds ACL to L2 switch.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSW object
-+ * @acl_id: Returned ACL ID, for the future reference
-+ * @cfg: ACL configuration
-+ *
-+ * Create Access Control List. Multiple ACLs can be created and
-+ * co-exist in L2 switch
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpsw_acl_add(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 *acl_id,
-+ const struct dpsw_acl_cfg *cfg)
-+{
-+ struct mc_command cmd = { 0 };
-+ struct dpsw_cmd_acl_add *cmd_params;
-+ struct dpsw_rsp_acl_add *rsp_params;
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_ACL_ADD,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dpsw_cmd_acl_add *)cmd.params;
-+ cmd_params->max_entries = cpu_to_le16(cfg->max_entries);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ rsp_params = (struct dpsw_rsp_acl_add *)cmd.params;
-+ *acl_id = le16_to_cpu(rsp_params->acl_id);
-+
-+ return 0;
-+}
-+
-+/**
-+ * dpsw_acl_remove() - Removes ACL from L2 switch.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSW object
-+ * @acl_id: ACL ID
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpsw_acl_remove(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 acl_id)
-+{
-+ struct mc_command cmd = { 0 };
-+ struct dpsw_cmd_acl_remove *cmd_params;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_ACL_REMOVE,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dpsw_cmd_acl_remove *)cmd.params;
-+ cmd_params->acl_id = cpu_to_le16(acl_id);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+/**
-+ * dpsw_acl_prepare_entry_cfg() - Set an entry to ACL.
-+ * @key: Key
-+ * @entry_cfg_buf: Zeroed 256 bytes of memory before mapping it to DMA
-+ *
-+ * This function has to be called before adding or removing acl_entry
-+ *
-+ */
-+void dpsw_acl_prepare_entry_cfg(const struct dpsw_acl_key *key,
-+ u8 *entry_cfg_buf)
-+{
-+ struct dpsw_prep_acl_entry *ext_params;
-+ int i;
-+
-+ ext_params = (struct dpsw_prep_acl_entry *)entry_cfg_buf;
-+
-+ for (i = 0; i < 6; i++) {
-+ ext_params->match_l2_dest_mac[i] =
-+ key->match.l2_dest_mac[5 - i];
-+ ext_params->match_l2_source_mac[i] =
-+ key->match.l2_source_mac[5 - i];
-+ ext_params->mask_l2_dest_mac[i] =
-+ key->mask.l2_dest_mac[5 - i];
-+ ext_params->mask_l2_source_mac[i] =
-+ key->mask.l2_source_mac[5 - i];
-+ }
-+
-+ ext_params->match_l2_tpid = cpu_to_le16(key->match.l2_tpid);
-+ ext_params->match_l2_vlan_id = cpu_to_le16(key->match.l2_vlan_id);
-+ ext_params->match_l3_dest_ip = cpu_to_le32(key->match.l3_dest_ip);
-+ ext_params->match_l3_source_ip = cpu_to_le32(key->match.l3_source_ip);
-+ ext_params->match_l4_dest_port = cpu_to_le16(key->match.l4_dest_port);
-+ ext_params->match_l2_ether_type = cpu_to_le16(key->match.l2_ether_type);
-+ ext_params->match_l2_pcp_dei = key->match.l2_pcp_dei;
-+ ext_params->match_l3_dscp = key->match.l3_dscp;
-+ ext_params->match_l4_source_port =
-+ cpu_to_le16(key->match.l4_source_port);
-+
-+ ext_params->mask_l2_tpid = cpu_to_le16(key->mask.l2_tpid);
-+ ext_params->mask_l2_vlan_id = cpu_to_le16(key->mask.l2_vlan_id);
-+ ext_params->mask_l3_dest_ip = cpu_to_le32(key->mask.l3_dest_ip);
-+ ext_params->mask_l3_source_ip = cpu_to_le32(key->mask.l3_source_ip);
-+ ext_params->mask_l4_dest_port = cpu_to_le16(key->mask.l4_dest_port);
-+ ext_params->mask_l4_source_port = cpu_to_le16(key->mask.l4_source_port);
-+ ext_params->mask_l2_ether_type = cpu_to_le16(key->mask.l2_ether_type);
-+ ext_params->mask_l2_pcp_dei = key->mask.l2_pcp_dei;
-+ ext_params->mask_l3_dscp = key->mask.l3_dscp;
-+ ext_params->match_l3_protocol = key->match.l3_protocol;
-+ ext_params->mask_l3_protocol = key->mask.l3_protocol;
-+}
-+
-+/**
-+ * dpsw_acl_add_entry() - Adds an entry to ACL.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSW object
-+ * @acl_id: ACL ID
-+ * @cfg: Entry configuration
-+ *
-+ * warning: This function has to be called after dpsw_acl_set_entry_cfg()
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpsw_acl_add_entry(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 acl_id,
-+ const struct dpsw_acl_entry_cfg *cfg)
-+{
-+ struct mc_command cmd = { 0 };
-+ struct dpsw_cmd_acl_entry *cmd_params;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_ACL_ADD_ENTRY,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dpsw_cmd_acl_entry *)cmd.params;
-+ cmd_params->acl_id = cpu_to_le16(acl_id);
-+ cmd_params->result_if_id = cpu_to_le16(cfg->result.if_id);
-+ cmd_params->precedence = cpu_to_le32(cfg->precedence);
-+ dpsw_set_field(cmd_params->result_action, RESULT_ACTION,
-+ cfg->result.action);
-+ cmd_params->key_iova = cpu_to_le64(cfg->key_iova);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+/**
-+ * dpsw_acl_remove_entry() - Removes an entry from ACL.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSW object
-+ * @acl_id: ACL ID
-+ * @cfg: Entry configuration
-+ *
-+ * warning: This function has to be called after dpsw_acl_set_entry_cfg()
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpsw_acl_remove_entry(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 acl_id,
-+ const struct dpsw_acl_entry_cfg *cfg)
-+{
-+ struct mc_command cmd = { 0 };
-+ struct dpsw_cmd_acl_entry *cmd_params;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_ACL_REMOVE_ENTRY,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dpsw_cmd_acl_entry *)cmd.params;
-+ cmd_params->acl_id = cpu_to_le16(acl_id);
-+ cmd_params->result_if_id = cpu_to_le16(cfg->result.if_id);
-+ cmd_params->precedence = cpu_to_le32(cfg->precedence);
-+ dpsw_set_field(cmd_params->result_action, RESULT_ACTION,
-+ cfg->result.action);
-+ cmd_params->key_iova = cpu_to_le64(cfg->key_iova);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+/**
-+ * dpsw_acl_add_if() - Associate interface/interfaces with ACL.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSW object
-+ * @acl_id: ACL ID
-+ * @cfg: Interfaces list
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpsw_acl_add_if(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 acl_id,
-+ const struct dpsw_acl_if_cfg *cfg)
-+{
-+ struct mc_command cmd = { 0 };
-+ struct dpsw_cmd_acl_if *cmd_params;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_ACL_ADD_IF,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dpsw_cmd_acl_if *)cmd.params;
-+ cmd_params->acl_id = cpu_to_le16(acl_id);
-+ cmd_params->num_ifs = cpu_to_le16(cfg->num_ifs);
-+ build_if_id_bitmap(cmd_params->if_id, cfg->if_id, cfg->num_ifs);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+/**
-+ * dpsw_acl_remove_if() - De-associate interface/interfaces from ACL.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSW object
-+ * @acl_id: ACL ID
-+ * @cfg: Interfaces list
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpsw_acl_remove_if(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 acl_id,
-+ const struct dpsw_acl_if_cfg *cfg)
-+{
-+ struct mc_command cmd = { 0 };
-+ struct dpsw_cmd_acl_if *cmd_params;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_ACL_REMOVE_IF,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dpsw_cmd_acl_if *)cmd.params;
-+ cmd_params->acl_id = cpu_to_le16(acl_id);
-+ cmd_params->num_ifs = cpu_to_le16(cfg->num_ifs);
-+ build_if_id_bitmap(cmd_params->if_id, cfg->if_id, cfg->num_ifs);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+/**
-+ * dpsw_acl_get_attributes() - Get specific counter of particular interface
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSW object
-+ * @acl_id: ACL Identifier
-+ * @attr: Returned ACL attributes
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpsw_acl_get_attributes(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 acl_id,
-+ struct dpsw_acl_attr *attr)
-+{
-+ struct mc_command cmd = { 0 };
-+ struct dpsw_cmd_acl_get_attr *cmd_params;
-+ struct dpsw_rsp_acl_get_attr *rsp_params;
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_ACL_GET_ATTR,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dpsw_cmd_acl_get_attr *)cmd.params;
-+ cmd_params->acl_id = cpu_to_le16(acl_id);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ rsp_params = (struct dpsw_rsp_acl_get_attr *)cmd.params;
-+ attr->max_entries = le16_to_cpu(rsp_params->max_entries);
-+ attr->num_entries = le16_to_cpu(rsp_params->num_entries);
-+ attr->num_ifs = le16_to_cpu(rsp_params->num_ifs);
-+
-+ return 0;
-+}
-+
-+/**
-+ * dpsw_ctrl_if_get_attributes() - Obtain control interface attributes
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSW object
-+ * @attr: Returned control interface attributes
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpsw_ctrl_if_get_attributes(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ struct dpsw_ctrl_if_attr *attr)
-+{
-+ struct mc_command cmd = { 0 };
-+ struct dpsw_rsp_ctrl_if_get_attr *rsp_params;
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_CTRL_IF_GET_ATTR,
-+ cmd_flags,
-+ token);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ rsp_params = (struct dpsw_rsp_ctrl_if_get_attr *)cmd.params;
-+ attr->rx_fqid = le32_to_cpu(rsp_params->rx_fqid);
-+ attr->rx_err_fqid = le32_to_cpu(rsp_params->rx_err_fqid);
-+ attr->tx_err_conf_fqid = le32_to_cpu(rsp_params->tx_err_conf_fqid);
-+
-+ return 0;
-+}
-+
-+/**
-+ * dpsw_ctrl_if_set_pools() - Set control interface buffer pools
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSW object
-+ * @cfg: Buffer pools configuration
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpsw_ctrl_if_set_pools(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ const struct dpsw_ctrl_if_pools_cfg *pools)
-+{
-+ struct mc_command cmd = { 0 };
-+ struct dpsw_cmd_ctrl_if_set_pools *cmd_params;
-+ int i;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_CTRL_IF_SET_POOLS,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dpsw_cmd_ctrl_if_set_pools *)cmd.params;
-+ cmd_params->num_dpbp = pools->num_dpbp;
-+ for (i = 0; i < 8; i++) {
-+ cmd_params->backup_pool = dpsw_set_bit(cmd_params->backup_pool,
-+ i,
-+ pools->pools[i].backup_pool);
-+ cmd_params->buffer_size[i] =
-+ cpu_to_le16(pools->pools[i].buffer_size);
-+ cmd_params->dpbp_id[i] =
-+ cpu_to_le32(pools->pools[i].dpbp_id);
-+ }
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+/**
-+ * dpsw_ctrl_if_enable() - Enable control interface
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSW object
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpsw_ctrl_if_enable(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_CTRL_IF_ENABLE,
-+ cmd_flags,
-+ token);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+/**
-+ * dpsw_ctrl_if_disable() - Function disables control interface
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSW object
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpsw_ctrl_if_disable(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_CTRL_IF_DISABLE,
-+ cmd_flags,
-+ token);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+/**
+ * dpsw_get_api_version() - Get Data Path Switch API version
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
@@ -13995,13 +14228,13 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ u16 *major_ver,
+ u16 *minor_ver)
+{
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+ struct dpsw_rsp_get_api_version *rsp_params;
+ int err;
+
+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_GET_API_VERSION,
-+ cmd_flags,
-+ 0);
++ cmd_flags,
++ 0);
+
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
@@ -14015,38 +14248,14 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+}
--- /dev/null
+++ b/drivers/staging/fsl-dpaa2/ethsw/dpsw.h
-@@ -0,0 +1,1269 @@
-+/* Copyright 2013-2015 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of the above-listed copyright holders nor the
-+ * names of any contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
+@@ -0,0 +1,592 @@
++// SPDX-License-Identifier: GPL-2.0
++/*
++ * Copyright 2013-2016 Freescale Semiconductor, Inc.
++ * Copyright 2017-2018 NXP
+ *
-+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
-+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-+ * POSSIBILITY OF SUCH DAMAGE.
+ */
++
+#ifndef __FSL_DPSW_H
+#define __FSL_DPSW_H
+
@@ -14151,17 +14360,6 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ } adv;
+};
+
-+int dpsw_create(struct fsl_mc_io *mc_io,
-+ u16 dprc_token,
-+ u32 cmd_flags,
-+ const struct dpsw_cfg *cfg,
-+ u32 *obj_id);
-+
-+int dpsw_destroy(struct fsl_mc_io *mc_io,
-+ u16 dprc_token,
-+ u32 cmd_flags,
-+ u32 object_id);
-+
+int dpsw_enable(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token);
@@ -14170,11 +14368,6 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ u32 cmd_flags,
+ u16 token);
+
-+int dpsw_is_enabled(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ int *en);
-+
+int dpsw_reset(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token);
@@ -14203,43 +14396,18 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ int irq_num;
+};
+
-+int dpsw_set_irq(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u8 irq_index,
-+ struct dpsw_irq_cfg *irq_cfg);
-+
-+int dpsw_get_irq(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u8 irq_index,
-+ int *type,
-+ struct dpsw_irq_cfg *irq_cfg);
-+
+int dpsw_set_irq_enable(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 irq_index,
+ u8 en);
+
-+int dpsw_get_irq_enable(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u8 irq_index,
-+ u8 *en);
-+
+int dpsw_set_irq_mask(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 irq_index,
+ u32 mask);
+
-+int dpsw_get_irq_mask(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u8 irq_index,
-+ u32 *mask);
-+
+int dpsw_get_irq_status(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
@@ -14292,11 +14460,6 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ u16 token,
+ struct dpsw_attr *attr);
+
-+int dpsw_set_reflection_if(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 if_id);
-+
+/**
+ * enum dpsw_action - Action selection for special/control frames
+ * @DPSW_ACTION_DROP: Drop frame
@@ -14348,7 +14511,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+struct dpsw_link_state {
+ u32 rate;
+ u64 options;
-+ int up;
++ u8 up;
+};
+
+int dpsw_if_get_link_state(struct fsl_mc_io *mc_io,
@@ -14361,22 +14524,16 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ u32 cmd_flags,
+ u16 token,
+ u16 if_id,
-+ int en);
++ u8 en);
+
+int dpsw_if_set_broadcast(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u16 if_id,
-+ int en);
-+
-+int dpsw_if_set_multicast(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 if_id,
-+ int en);
++ u8 en);
+
+/**
-+ * struct dpsw_tci_cfg - Tag Contorl Information (TCI) configuration
++ * struct dpsw_tci_cfg - Tag Control Information (TCI) configuration
+ * @pcp: Priority Code Point (PCP): a 3-bit field which refers
+ * to the IEEE 802.1p priority
+ * @dei: Drop Eligible Indicator (DEI): a 1-bit field. May be used
@@ -14415,10 +14572,11 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ *
+ */
+enum dpsw_stp_state {
-+ DPSW_STP_STATE_BLOCKING = 0,
++ DPSW_STP_STATE_DISABLED = 0,
+ DPSW_STP_STATE_LISTENING = 1,
+ DPSW_STP_STATE_LEARNING = 2,
-+ DPSW_STP_STATE_FORWARDING = 3
++ DPSW_STP_STATE_FORWARDING = 3,
++ DPSW_STP_STATE_BLOCKING = 0
+};
+
+/**
@@ -14451,29 +14609,6 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+};
+
+/**
-+ * struct dpsw_accepted_frames_cfg - Types of frames to accept configuration
-+ * @type: Defines ingress accepted frames
-+ * @unaccept_act: When a frame is not accepted, it may be discarded or
-+ * redirected to control interface depending on this mode
-+ */
-+struct dpsw_accepted_frames_cfg {
-+ enum dpsw_accepted_frames type;
-+ enum dpsw_action unaccept_act;
-+};
-+
-+int dpsw_if_set_accepted_frames(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 if_id,
-+ const struct dpsw_accepted_frames_cfg *cfg);
-+
-+int dpsw_if_set_accept_all_vlan(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 if_id,
-+ int accept_all);
-+
-+/**
+ * enum dpsw_counter - Counters types
+ * @DPSW_CNT_ING_FRAME: Counts ingress frames
+ * @DPSW_CNT_ING_BYTE: Counts ingress bytes
@@ -14510,244 +14645,6 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ enum dpsw_counter type,
+ u64 *counter);
+
-+int dpsw_if_set_counter(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 if_id,
-+ enum dpsw_counter type,
-+ u64 counter);
-+
-+/**
-+ * Maximum number of TC
-+ */
-+#define DPSW_MAX_TC 8
-+
-+/**
-+ * enum dpsw_priority_selector - User priority
-+ * @DPSW_UP_PCP: Priority Code Point (PCP): a 3-bit field which
-+ * refers to the IEEE 802.1p priority.
-+ * @DPSW_UP_DSCP: Differentiated services Code Point (DSCP): 6 bit
-+ * field from IP header
-+ *
-+ */
-+enum dpsw_priority_selector {
-+ DPSW_UP_PCP = 0,
-+ DPSW_UP_DSCP = 1
-+};
-+
-+/**
-+ * enum dpsw_schedule_mode - Traffic classes scheduling
-+ * @DPSW_SCHED_STRICT_PRIORITY: schedule strict priority
-+ * @DPSW_SCHED_WEIGHTED: schedule based on token bucket created algorithm
-+ */
-+enum dpsw_schedule_mode {
-+ DPSW_SCHED_STRICT_PRIORITY,
-+ DPSW_SCHED_WEIGHTED
-+};
-+
-+/**
-+ * struct dpsw_tx_schedule_cfg - traffic class configuration
-+ * @mode: Strict or weight-based scheduling
-+ * @delta_bandwidth: weighted Bandwidth in range from 100 to 10000
-+ */
-+struct dpsw_tx_schedule_cfg {
-+ enum dpsw_schedule_mode mode;
-+ u16 delta_bandwidth;
-+};
-+
-+/**
-+ * struct dpsw_tx_selection_cfg - Mapping user priority into traffic
-+ * class configuration
-+ * @priority_selector: Source for user priority regeneration
-+ * @tc_id: The Regenerated User priority that the incoming
-+ * User Priority is mapped to for this interface
-+ * @tc_sched: Traffic classes configuration
-+ */
-+struct dpsw_tx_selection_cfg {
-+ enum dpsw_priority_selector priority_selector;
-+ u8 tc_id[DPSW_MAX_PRIORITIES];
-+ struct dpsw_tx_schedule_cfg tc_sched[DPSW_MAX_TC];
-+};
-+
-+int dpsw_if_set_tx_selection(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 if_id,
-+ const struct dpsw_tx_selection_cfg *cfg);
-+
-+/**
-+ * enum dpsw_reflection_filter - Filter type for frames to reflect
-+ * @DPSW_REFLECTION_FILTER_INGRESS_ALL: Reflect all frames
-+ * @DPSW_REFLECTION_FILTER_INGRESS_VLAN: Reflect only frames belong to
-+ * particular VLAN defined by vid parameter
-+ *
-+ */
-+enum dpsw_reflection_filter {
-+ DPSW_REFLECTION_FILTER_INGRESS_ALL = 0,
-+ DPSW_REFLECTION_FILTER_INGRESS_VLAN = 1
-+};
-+
-+/**
-+ * struct dpsw_reflection_cfg - Structure representing reflection information
-+ * @filter: Filter type for frames to reflect
-+ * @vlan_id: Vlan Id to reflect; valid only when filter type is
-+ * DPSW_INGRESS_VLAN
-+ */
-+struct dpsw_reflection_cfg {
-+ enum dpsw_reflection_filter filter;
-+ u16 vlan_id;
-+};
-+
-+int dpsw_if_add_reflection(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 if_id,
-+ const struct dpsw_reflection_cfg *cfg);
-+
-+int dpsw_if_remove_reflection(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 if_id,
-+ const struct dpsw_reflection_cfg *cfg);
-+
-+/**
-+ * enum dpsw_metering_mode - Metering modes
-+ * @DPSW_METERING_MODE_NONE: metering disabled
-+ * @DPSW_METERING_MODE_RFC2698: RFC 2698
-+ * @DPSW_METERING_MODE_RFC4115: RFC 4115
-+ */
-+enum dpsw_metering_mode {
-+ DPSW_METERING_MODE_NONE = 0,
-+ DPSW_METERING_MODE_RFC2698,
-+ DPSW_METERING_MODE_RFC4115
-+};
-+
-+/**
-+ * enum dpsw_metering_unit - Metering count
-+ * @DPSW_METERING_UNIT_BYTES: count bytes
-+ * @DPSW_METERING_UNIT_FRAMES: count frames
-+ */
-+enum dpsw_metering_unit {
-+ DPSW_METERING_UNIT_BYTES = 0,
-+ DPSW_METERING_UNIT_FRAMES
-+};
-+
-+/**
-+ * struct dpsw_metering_cfg - Metering configuration
-+ * @mode: metering modes
-+ * @units: Bytes or frame units
-+ * @cir: Committed information rate (CIR) in Kbits/s
-+ * @eir: Peak information rate (PIR) Kbit/s rfc2698
-+ * Excess information rate (EIR) Kbit/s rfc4115
-+ * @cbs: Committed burst size (CBS) in bytes
-+ * @ebs: Peak burst size (PBS) in bytes for rfc2698
-+ * Excess bust size (EBS) in bytes rfc4115
-+ *
-+ */
-+struct dpsw_metering_cfg {
-+ enum dpsw_metering_mode mode;
-+ enum dpsw_metering_unit units;
-+ u32 cir;
-+ u32 eir;
-+ u32 cbs;
-+ u32 ebs;
-+};
-+
-+int dpsw_if_set_flooding_metering(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 if_id,
-+ const struct dpsw_metering_cfg *cfg);
-+
-+int dpsw_if_set_metering(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 if_id,
-+ u8 tc_id,
-+ const struct dpsw_metering_cfg *cfg);
-+
-+/**
-+ * enum dpsw_early_drop_unit - DPSW early drop unit
-+ * @DPSW_EARLY_DROP_UNIT_BYTE: count bytes
-+ * @DPSW_EARLY_DROP_UNIT_FRAMES: count frames
-+ */
-+enum dpsw_early_drop_unit {
-+ DPSW_EARLY_DROP_UNIT_BYTE = 0,
-+ DPSW_EARLY_DROP_UNIT_FRAMES
-+};
-+
-+/**
-+ * enum dpsw_early_drop_mode - DPSW early drop mode
-+ * @DPSW_EARLY_DROP_MODE_NONE: early drop is disabled
-+ * @DPSW_EARLY_DROP_MODE_TAIL: early drop in taildrop mode
-+ * @DPSW_EARLY_DROP_MODE_WRED: early drop in WRED mode
-+ */
-+enum dpsw_early_drop_mode {
-+ DPSW_EARLY_DROP_MODE_NONE = 0,
-+ DPSW_EARLY_DROP_MODE_TAIL,
-+ DPSW_EARLY_DROP_MODE_WRED
-+};
-+
-+/**
-+ * struct dpsw_wred_cfg - WRED configuration
-+ * @max_threshold: maximum threshold that packets may be discarded. Above this
-+ * threshold all packets are discarded; must be less than 2^39;
-+ * approximated to be expressed as (x+256)*2^(y-1) due to HW
-+ * implementation.
-+ * @min_threshold: minimum threshold that packets may be discarded at
-+ * @drop_probability: probability that a packet will be discarded (1-100,
-+ * associated with the maximum threshold)
-+ */
-+struct dpsw_wred_cfg {
-+ u64 min_threshold;
-+ u64 max_threshold;
-+ u8 drop_probability;
-+};
-+
-+/**
-+ * struct dpsw_early_drop_cfg - early-drop configuration
-+ * @drop_mode: drop mode
-+ * @units: count units
-+ * @yellow: WRED - 'yellow' configuration
-+ * @green: WRED - 'green' configuration
-+ * @tail_drop_threshold: tail drop threshold
-+ */
-+struct dpsw_early_drop_cfg {
-+ enum dpsw_early_drop_mode drop_mode;
-+ enum dpsw_early_drop_unit units;
-+ struct dpsw_wred_cfg yellow;
-+ struct dpsw_wred_cfg green;
-+ u32 tail_drop_threshold;
-+};
-+
-+void dpsw_prepare_early_drop(const struct dpsw_early_drop_cfg *cfg,
-+ u8 *early_drop_buf);
-+
-+int dpsw_if_set_early_drop(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 if_id,
-+ u8 tc_id,
-+ u64 early_drop_iova);
-+
-+/**
-+ * struct dpsw_custom_tpid_cfg - Structure representing tag Protocol identifier
-+ * @tpid: An additional tag protocol identifier
-+ */
-+struct dpsw_custom_tpid_cfg {
-+ u16 tpid;
-+};
-+
-+int dpsw_add_custom_tpid(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ const struct dpsw_custom_tpid_cfg *cfg);
-+
-+int dpsw_remove_custom_tpid(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ const struct dpsw_custom_tpid_cfg *cfg);
-+
+int dpsw_if_enable(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
@@ -14758,49 +14655,12 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ u16 token,
+ u16 if_id);
+
-+/**
-+ * struct dpsw_if_attr - Structure representing DPSW interface attributes
-+ * @num_tcs: Number of traffic classes
-+ * @rate: Transmit rate in bits per second
-+ * @options: Interface configuration options (bitmap)
-+ * @enabled: Indicates if interface is enabled
-+ * @accept_all_vlan: The device discards/accepts incoming frames
-+ * for VLANs that do not include this interface
-+ * @admit_untagged: When set to 'DPSW_ADMIT_ONLY_VLAN_TAGGED', the device
-+ * discards untagged frames or priority-tagged frames received on
-+ * this interface;
-+ * When set to 'DPSW_ADMIT_ALL', untagged frames or priority-
-+ * tagged frames received on this interface are accepted
-+ * @qdid: control frames transmit qdid
-+ */
-+struct dpsw_if_attr {
-+ u8 num_tcs;
-+ u32 rate;
-+ u32 options;
-+ int enabled;
-+ int accept_all_vlan;
-+ enum dpsw_accepted_frames admit_untagged;
-+ u16 qdid;
-+};
-+
-+int dpsw_if_get_attributes(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 if_id,
-+ struct dpsw_if_attr *attr);
-+
+int dpsw_if_set_max_frame_length(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u16 if_id,
+ u16 frame_length);
+
-+int dpsw_if_get_max_frame_length(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 if_id,
-+ u16 *frame_length);
-+
+/**
+ * struct dpsw_vlan_cfg - VLAN Configuration
+ * @fdb_id: Forwarding Data Base
@@ -14839,12 +14699,6 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ u16 vlan_id,
+ const struct dpsw_vlan_if_cfg *cfg);
+
-+int dpsw_vlan_add_if_flooding(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 vlan_id,
-+ const struct dpsw_vlan_if_cfg *cfg);
-+
+int dpsw_vlan_remove_if(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
@@ -14857,77 +14711,12 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ u16 vlan_id,
+ const struct dpsw_vlan_if_cfg *cfg);
+
-+int dpsw_vlan_remove_if_flooding(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 vlan_id,
-+ const struct dpsw_vlan_if_cfg *cfg);
-+
+int dpsw_vlan_remove(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u16 vlan_id);
+
+/**
-+ * struct dpsw_vlan_attr - VLAN attributes
-+ * @fdb_id: Associated FDB ID
-+ * @num_ifs: Number of interfaces
-+ * @num_untagged_ifs: Number of untagged interfaces
-+ * @num_flooding_ifs: Number of flooding interfaces
-+ */
-+struct dpsw_vlan_attr {
-+ u16 fdb_id;
-+ u16 num_ifs;
-+ u16 num_untagged_ifs;
-+ u16 num_flooding_ifs;
-+};
-+
-+int dpsw_vlan_get_attributes(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 vlan_id,
-+ struct dpsw_vlan_attr *attr);
-+
-+int dpsw_vlan_get_if(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 vlan_id,
-+ struct dpsw_vlan_if_cfg *cfg);
-+
-+int dpsw_vlan_get_if_flooding(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 vlan_id,
-+ struct dpsw_vlan_if_cfg *cfg);
-+
-+int dpsw_vlan_get_if_untagged(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 vlan_id,
-+ struct dpsw_vlan_if_cfg *cfg);
-+
-+/**
-+ * struct dpsw_fdb_cfg - FDB Configuration
-+ * @num_fdb_entries: Number of FDB entries
-+ * @fdb_aging_time: Aging time in seconds
-+ */
-+struct dpsw_fdb_cfg {
-+ u16 num_fdb_entries;
-+ u16 fdb_aging_time;
-+};
-+
-+int dpsw_fdb_add(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 *fdb_id,
-+ const struct dpsw_fdb_cfg *cfg);
-+
-+int dpsw_fdb_remove(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 fdb_id);
-+
-+/**
+ * enum dpsw_fdb_entry_type - FDB Entry type - Static/Dynamic
+ * @DPSW_FDB_ENTRY_STATIC: Static entry
+ * @DPSW_FDB_ENTRY_DINAMIC: Dynamic entry
@@ -14955,12 +14744,6 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ u16 fdb_id,
+ const struct dpsw_fdb_unicast_cfg *cfg);
+
-+int dpsw_fdb_get_unicast(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 fdb_id,
-+ struct dpsw_fdb_unicast_cfg *cfg);
-+
+int dpsw_fdb_remove_unicast(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
@@ -14987,12 +14770,6 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ u16 fdb_id,
+ const struct dpsw_fdb_multicast_cfg *cfg);
+
-+int dpsw_fdb_get_multicast(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 fdb_id,
-+ struct dpsw_fdb_multicast_cfg *cfg);
-+
+int dpsw_fdb_remove_multicast(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
@@ -15058,227 +14835,6 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ u16 max_fdb_mc_groups;
+};
+
-+int dpsw_fdb_get_attributes(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 fdb_id,
-+ struct dpsw_fdb_attr *attr);
-+
-+/**
-+ * struct dpsw_acl_cfg - ACL Configuration
-+ * @max_entries: Number of FDB entries
-+ */
-+struct dpsw_acl_cfg {
-+ u16 max_entries;
-+};
-+
-+/**
-+ * struct dpsw_acl_fields - ACL fields.
-+ * @l2_dest_mac: Destination MAC address: BPDU, Multicast, Broadcast, Unicast,
-+ * slow protocols, MVRP, STP
-+ * @l2_source_mac: Source MAC address
-+ * @l2_tpid: Layer 2 (Ethernet) protocol type, used to identify the following
-+ * protocols: MPLS, PTP, PFC, ARP, Jumbo frames, LLDP, IEEE802.1ae,
-+ * Q-in-Q, IPv4, IPv6, PPPoE
-+ * @l2_pcp_dei: indicate which protocol is encapsulated in the payload
-+ * @l2_vlan_id: layer 2 VLAN ID
-+ * @l2_ether_type: layer 2 Ethernet type
-+ * @l3_dscp: Layer 3 differentiated services code point
-+ * @l3_protocol: Tells the Network layer at the destination host, to which
-+ * Protocol this packet belongs to. The following protocol are
-+ * supported: ICMP, IGMP, IPv4 (encapsulation), TCP, IPv6
-+ * (encapsulation), GRE, PTP
-+ * @l3_source_ip: Source IPv4 IP
-+ * @l3_dest_ip: Destination IPv4 IP
-+ * @l4_source_port: Source TCP/UDP Port
-+ * @l4_dest_port: Destination TCP/UDP Port
-+ */
-+struct dpsw_acl_fields {
-+ u8 l2_dest_mac[6];
-+ u8 l2_source_mac[6];
-+ u16 l2_tpid;
-+ u8 l2_pcp_dei;
-+ u16 l2_vlan_id;
-+ u16 l2_ether_type;
-+ u8 l3_dscp;
-+ u8 l3_protocol;
-+ u32 l3_source_ip;
-+ u32 l3_dest_ip;
-+ u16 l4_source_port;
-+ u16 l4_dest_port;
-+};
-+
-+/**
-+ * struct dpsw_acl_key - ACL key
-+ * @match: Match fields
-+ * @mask: Mask: b'1 - valid, b'0 don't care
-+ */
-+struct dpsw_acl_key {
-+ struct dpsw_acl_fields match;
-+ struct dpsw_acl_fields mask;
-+};
-+
-+/**
-+ * enum dpsw_acl_action
-+ * @DPSW_ACL_ACTION_DROP: Drop frame
-+ * @DPSW_ACL_ACTION_REDIRECT: Redirect to certain port
-+ * @DPSW_ACL_ACTION_ACCEPT: Accept frame
-+ * @DPSW_ACL_ACTION_REDIRECT_TO_CTRL_IF: Redirect to control interface
-+ */
-+enum dpsw_acl_action {
-+ DPSW_ACL_ACTION_DROP,
-+ DPSW_ACL_ACTION_REDIRECT,
-+ DPSW_ACL_ACTION_ACCEPT,
-+ DPSW_ACL_ACTION_REDIRECT_TO_CTRL_IF
-+};
-+
-+/**
-+ * struct dpsw_acl_result - ACL action
-+ * @action: Action should be taken when ACL entry hit
-+ * @if_id: Interface IDs to redirect frame. Valid only if redirect selected for
-+ * action
-+ */
-+struct dpsw_acl_result {
-+ enum dpsw_acl_action action;
-+ u16 if_id;
-+};
-+
-+/**
-+ * struct dpsw_acl_entry_cfg - ACL entry
-+ * @key_iova: I/O virtual address of DMA-able memory filled with key after call
-+ * to dpsw_acl_prepare_entry_cfg()
-+ * @result: Required action when entry hit occurs
-+ * @precedence: Precedence inside ACL 0 is lowest; This priority can not change
-+ * during the lifetime of a Policy. It is user responsibility to
-+ * space the priorities according to consequent rule additions.
-+ */
-+struct dpsw_acl_entry_cfg {
-+ u64 key_iova;
-+ struct dpsw_acl_result result;
-+ int precedence;
-+};
-+
-+int dpsw_acl_add(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 *acl_id,
-+ const struct dpsw_acl_cfg *cfg);
-+
-+int dpsw_acl_remove(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 acl_id);
-+
-+void dpsw_acl_prepare_entry_cfg(const struct dpsw_acl_key *key,
-+ uint8_t *entry_cfg_buf);
-+
-+int dpsw_acl_add_entry(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 acl_id,
-+ const struct dpsw_acl_entry_cfg *cfg);
-+
-+int dpsw_acl_remove_entry(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 acl_id,
-+ const struct dpsw_acl_entry_cfg *cfg);
-+
-+/**
-+ * struct dpsw_acl_if_cfg - List of interfaces to Associate with ACL
-+ * @num_ifs: Number of interfaces
-+ * @if_id: List of interfaces
-+ */
-+struct dpsw_acl_if_cfg {
-+ u16 num_ifs;
-+ u16 if_id[DPSW_MAX_IF];
-+};
-+
-+int dpsw_acl_add_if(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 acl_id,
-+ const struct dpsw_acl_if_cfg *cfg);
-+
-+int dpsw_acl_remove_if(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 acl_id,
-+ const struct dpsw_acl_if_cfg *cfg);
-+
-+/**
-+ * struct dpsw_acl_attr - ACL Attributes
-+ * @max_entries: Max number of ACL entries
-+ * @num_entries: Number of used ACL entries
-+ * @num_ifs: Number of interfaces associated with ACL
-+ */
-+struct dpsw_acl_attr {
-+ u16 max_entries;
-+ u16 num_entries;
-+ u16 num_ifs;
-+};
-+
-+int dpsw_acl_get_attributes(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 acl_id,
-+ struct dpsw_acl_attr *attr);
-+/**
-+ * struct dpsw_ctrl_if_attr - Control interface attributes
-+ * @rx_fqid: Receive FQID
-+ * @rx_err_fqid: Receive error FQID
-+ * @tx_err_conf_fqid: Transmit error and confirmation FQID
-+ */
-+struct dpsw_ctrl_if_attr {
-+ u32 rx_fqid;
-+ u32 rx_err_fqid;
-+ u32 tx_err_conf_fqid;
-+};
-+
-+int dpsw_ctrl_if_get_attributes(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ struct dpsw_ctrl_if_attr *attr);
-+
-+/**
-+ * Maximum number of DPBP
-+ */
-+#define DPSW_MAX_DPBP 8
-+
-+/**
-+ * struct dpsw_ctrl_if_pools_cfg - Control interface buffer pools configuration
-+ * @num_dpbp: Number of DPBPs
-+ * @pools: Array of buffer pools parameters; The number of valid entries
-+ * must match 'num_dpbp' value
-+ */
-+struct dpsw_ctrl_if_pools_cfg {
-+ u8 num_dpbp;
-+ /**
-+ * struct pools - Buffer pools parameters
-+ * @dpbp_id: DPBP object ID
-+ * @buffer_size: Buffer size
-+ * @backup_pool: Backup pool
-+ */
-+ struct {
-+ int dpbp_id;
-+ u16 buffer_size;
-+ int backup_pool;
-+ } pools[DPSW_MAX_DPBP];
-+};
-+
-+int dpsw_ctrl_if_set_pools(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ const struct dpsw_ctrl_if_pools_cfg *cfg);
-+
-+int dpsw_ctrl_if_enable(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token);
-+
-+int dpsw_ctrl_if_disable(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token);
-+
+int dpsw_get_api_version(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 *major_ver,
@@ -15286,9 +14842,10 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+
+#endif /* __FSL_DPSW_H */
--- /dev/null
-+++ b/drivers/staging/fsl-dpaa2/ethsw/switch.c
-@@ -0,0 +1,1857 @@
-+/* Copyright 2014-2015 Freescale Semiconductor Inc.
++++ b/drivers/staging/fsl-dpaa2/ethsw/ethsw-ethtool.c
+@@ -0,0 +1,206 @@
++/* Copyright 2014-2016 Freescale Semiconductor Inc.
++ * Copyright 2017 NXP
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
@@ -15297,8 +14854,8 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
++ * * Neither the name of the above-listed copyright holders nor the
++ * names of any contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
@@ -15307,284 +14864,363 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
++ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
-+#include <linux/module.h>
-+#include <linux/msi.h>
++#include "ethsw.h"
+
-+#include <linux/netdevice.h>
-+#include <linux/etherdevice.h>
-+#include <linux/rtnetlink.h>
-+#include <linux/if_vlan.h>
-+
-+#include <uapi/linux/if_bridge.h>
-+#include <net/netlink.h>
++static struct {
++ enum dpsw_counter id;
++ char name[ETH_GSTRING_LEN];
++} ethsw_ethtool_counters[] = {
++ {DPSW_CNT_ING_FRAME, "rx frames"},
++ {DPSW_CNT_ING_BYTE, "rx bytes"},
++ {DPSW_CNT_ING_FLTR_FRAME, "rx filtered frames"},
++ {DPSW_CNT_ING_FRAME_DISCARD, "rx discarded frames"},
++ {DPSW_CNT_ING_BCAST_FRAME, "rx b-cast frames"},
++ {DPSW_CNT_ING_BCAST_BYTES, "rx b-cast bytes"},
++ {DPSW_CNT_ING_MCAST_FRAME, "rx m-cast frames"},
++ {DPSW_CNT_ING_MCAST_BYTE, "rx m-cast bytes"},
++ {DPSW_CNT_EGR_FRAME, "tx frames"},
++ {DPSW_CNT_EGR_BYTE, "tx bytes"},
++ {DPSW_CNT_EGR_FRAME_DISCARD, "tx discarded frames"},
+
-+#include "../../fsl-mc/include/mc.h"
-+#include "dpsw.h"
-+#include "dpsw-cmd.h"
++};
+
-+static const char ethsw_drv_version[] = "0.1";
++#define ETHSW_NUM_COUNTERS ARRAY_SIZE(ethsw_ethtool_counters)
+
-+/* Minimal supported DPSE version */
-+#define DPSW_MIN_VER_MAJOR 8
-+#define DPSW_MIN_VER_MINOR 0
++static void ethsw_get_drvinfo(struct net_device *netdev,
++ struct ethtool_drvinfo *drvinfo)
++{
++ struct ethsw_port_priv *port_priv = netdev_priv(netdev);
++ u16 version_major, version_minor;
++ int err;
+
-+/* IRQ index */
-+#define DPSW_MAX_IRQ_NUM 2
++ strlcpy(drvinfo->driver, KBUILD_MODNAME, sizeof(drvinfo->driver));
+
-+#define ETHSW_VLAN_MEMBER 1
-+#define ETHSW_VLAN_UNTAGGED 2
-+#define ETHSW_VLAN_PVID 4
-+#define ETHSW_VLAN_GLOBAL 8
++ err = dpsw_get_api_version(port_priv->ethsw_data->mc_io, 0,
++ &version_major,
++ &version_minor);
++ if (err)
++ strlcpy(drvinfo->fw_version, "N/A",
++ sizeof(drvinfo->fw_version));
++ else
++ snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
++ "%u.%u", version_major, version_minor);
+
-+/* Maximum Frame Length supported by HW (currently 10k) */
-+#define DPAA2_MFL (10 * 1024)
-+#define ETHSW_MAX_FRAME_LENGTH (DPAA2_MFL - VLAN_ETH_HLEN - ETH_FCS_LEN)
-+#define ETHSW_L2_MAX_FRM(mtu) ((mtu) + VLAN_ETH_HLEN + ETH_FCS_LEN)
++ strlcpy(drvinfo->bus_info, dev_name(netdev->dev.parent->parent),
++ sizeof(drvinfo->bus_info));
++}
+
-+struct ethsw_port_priv {
-+ struct net_device *netdev;
-+ struct list_head list;
-+ u16 port_index;
-+ struct ethsw_dev_priv *ethsw_priv;
-+ u8 stp_state;
++static int
++ethsw_get_link_ksettings(struct net_device *netdev,
++ struct ethtool_link_ksettings *link_ksettings)
++{
++ struct ethsw_port_priv *port_priv = netdev_priv(netdev);
++ struct dpsw_link_state state = {0};
++ int err = 0;
+
-+ char vlans[VLAN_VID_MASK + 1];
++ err = dpsw_if_get_link_state(port_priv->ethsw_data->mc_io, 0,
++ port_priv->ethsw_data->dpsw_handle,
++ port_priv->idx,
++ &state);
++ if (err) {
++ netdev_err(netdev, "ERROR %d getting link state", err);
++ goto out;
++ }
+
-+};
++ /* At the moment, we have no way of interrogating the DPMAC
++ * from the DPSW side or there may not exist a DPMAC at all.
++ * Report only autoneg state, duplexity and speed.
++ */
++ if (state.options & DPSW_LINK_OPT_AUTONEG)
++ link_ksettings->base.autoneg = AUTONEG_ENABLE;
++ if (!(state.options & DPSW_LINK_OPT_HALF_DUPLEX))
++ link_ksettings->base.duplex = DUPLEX_FULL;
++ link_ksettings->base.speed = state.rate;
+
-+struct ethsw_dev_priv {
-+ struct net_device *netdev;
-+ struct fsl_mc_io *mc_io;
-+ u16 dpsw_handle;
-+ struct dpsw_attr sw_attr;
-+ int dev_id;
-+ /*TODO: redundant, we can use the slave dev list */
-+ struct list_head port_list;
++out:
++ return err;
++}
+
-+ bool flood;
-+ bool learning;
++static int
++ethsw_set_link_ksettings(struct net_device *netdev,
++ const struct ethtool_link_ksettings *link_ksettings)
++{
++ struct ethsw_port_priv *port_priv = netdev_priv(netdev);
++ struct dpsw_link_cfg cfg = {0};
++ int err = 0;
+
-+ char vlans[VLAN_VID_MASK + 1];
-+};
++ netdev_dbg(netdev, "Setting link parameters...");
+
-+static int ethsw_port_stop(struct net_device *netdev);
-+static int ethsw_port_open(struct net_device *netdev);
++ /* Due to a temporary MC limitation, the DPSW port must be down
++ * in order to be able to change link settings. Taking steps to let
++ * the user know that.
++ */
++ if (netif_running(netdev)) {
++ netdev_info(netdev, "Sorry, interface must be brought down first.\n");
++ return -EACCES;
++ }
+
-+static inline void __get_priv(struct net_device *netdev,
-+ struct ethsw_dev_priv **priv,
-+ struct ethsw_port_priv **port_priv)
-+{
-+ struct ethsw_dev_priv *_priv = NULL;
-+ struct ethsw_port_priv *_port_priv = NULL;
++ cfg.rate = link_ksettings->base.speed;
++ if (link_ksettings->base.autoneg == AUTONEG_ENABLE)
++ cfg.options |= DPSW_LINK_OPT_AUTONEG;
++ else
++ cfg.options &= ~DPSW_LINK_OPT_AUTONEG;
++ if (link_ksettings->base.duplex == DUPLEX_HALF)
++ cfg.options |= DPSW_LINK_OPT_HALF_DUPLEX;
++ else
++ cfg.options &= ~DPSW_LINK_OPT_HALF_DUPLEX;
+
-+ if (netdev->flags & IFF_MASTER) {
-+ _priv = netdev_priv(netdev);
-+ } else {
-+ _port_priv = netdev_priv(netdev);
-+ _priv = _port_priv->ethsw_priv;
-+ }
++ err = dpsw_if_set_link_cfg(port_priv->ethsw_data->mc_io, 0,
++ port_priv->ethsw_data->dpsw_handle,
++ port_priv->idx,
++ &cfg);
++ if (err)
++ /* ethtool will be loud enough if we return an error; no point
++ * in putting our own error message on the console by default
++ */
++ netdev_dbg(netdev, "ERROR %d setting link cfg", err);
+
-+ if (priv)
-+ *priv = _priv;
-+ if (port_priv)
-+ *port_priv = _port_priv;
++ return err;
+}
+
-+/* -------------------------------------------------------------------------- */
-+/* ethsw netdevice ops */
-+
-+static netdev_tx_t ethsw_dropframe(struct sk_buff *skb, struct net_device *dev)
++static int ethsw_ethtool_get_sset_count(struct net_device *dev, int sset)
+{
-+ /* we don't support I/O for now, drop the frame */
-+ dev_kfree_skb_any(skb);
-+ return NETDEV_TX_OK;
++ switch (sset) {
++ case ETH_SS_STATS:
++ return ETHSW_NUM_COUNTERS;
++ default:
++ return -EOPNOTSUPP;
++ }
+}
+
-+static int ethsw_open(struct net_device *netdev)
++static void ethsw_ethtool_get_strings(struct net_device *netdev,
++ u32 stringset, u8 *data)
+{
-+ struct ethsw_dev_priv *priv = netdev_priv(netdev);
-+ struct list_head *pos;
-+ struct ethsw_port_priv *port_priv = NULL;
-+ int err;
-+
-+ err = dpsw_enable(priv->mc_io, 0, priv->dpsw_handle);
-+ if (err) {
-+ netdev_err(netdev, "dpsw_enable err %d\n", err);
-+ return err;
-+ }
++ int i;
+
-+ list_for_each(pos, &priv->port_list) {
-+ port_priv = list_entry(pos, struct ethsw_port_priv, list);
-+ err = dev_open(port_priv->netdev);
-+ if (err)
-+ netdev_err(port_priv->netdev, "dev_open err %d\n", err);
++ switch (stringset) {
++ case ETH_SS_STATS:
++ for (i = 0; i < ETHSW_NUM_COUNTERS; i++)
++ memcpy(data + i * ETH_GSTRING_LEN,
++ ethsw_ethtool_counters[i].name, ETH_GSTRING_LEN);
++ break;
+ }
-+
-+ return 0;
+}
+
-+static int ethsw_stop(struct net_device *netdev)
++static void ethsw_ethtool_get_stats(struct net_device *netdev,
++ struct ethtool_stats *stats,
++ u64 *data)
+{
-+ struct ethsw_dev_priv *priv = netdev_priv(netdev);
-+ struct list_head *pos;
-+ struct ethsw_port_priv *port_priv = NULL;
-+ int err;
++ struct ethsw_port_priv *port_priv = netdev_priv(netdev);
++ int i, err;
+
-+ err = dpsw_disable(priv->mc_io, 0, priv->dpsw_handle);
-+ if (err) {
-+ netdev_err(netdev, "dpsw_disable err %d\n", err);
-+ return err;
-+ }
++ memset(data, 0,
++ sizeof(u64) * ETHSW_NUM_COUNTERS);
+
-+ list_for_each(pos, &priv->port_list) {
-+ port_priv = list_entry(pos, struct ethsw_port_priv, list);
-+ err = dev_close(port_priv->netdev);
++ for (i = 0; i < ETHSW_NUM_COUNTERS; i++) {
++ err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0,
++ port_priv->ethsw_data->dpsw_handle,
++ port_priv->idx,
++ ethsw_ethtool_counters[i].id,
++ &data[i]);
+ if (err)
-+ netdev_err(port_priv->netdev,
-+ "dev_close err %d\n", err);
++ netdev_err(netdev, "dpsw_if_get_counter[%s] err %d\n",
++ ethsw_ethtool_counters[i].name, err);
+ }
-+
-+ return 0;
+}
+
-+static int ethsw_add_vlan(struct net_device *netdev, u16 vid)
++const struct ethtool_ops ethsw_port_ethtool_ops = {
++ .get_drvinfo = ethsw_get_drvinfo,
++ .get_link = ethtool_op_get_link,
++ .get_link_ksettings = ethsw_get_link_ksettings,
++ .set_link_ksettings = ethsw_set_link_ksettings,
++ .get_strings = ethsw_ethtool_get_strings,
++ .get_ethtool_stats = ethsw_ethtool_get_stats,
++ .get_sset_count = ethsw_ethtool_get_sset_count,
++};
+--- /dev/null
++++ b/drivers/staging/fsl-dpaa2/ethsw/ethsw.c
+@@ -0,0 +1,1438 @@
++/* Copyright 2014-2016 Freescale Semiconductor Inc.
++ * Copyright 2017 NXP
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of the above-listed copyright holders nor the
++ * names of any contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
++ * POSSIBILITY OF SUCH DAMAGE.
++ */
++
++#include <linux/module.h>
++
++#include <linux/interrupt.h>
++#include <linux/msi.h>
++#include <linux/kthread.h>
++#include <linux/workqueue.h>
++
++#include <linux/fsl/mc.h>
++
++#include "ethsw.h"
++
++static struct workqueue_struct *ethsw_owq;
++
++/* Minimal supported DPSW version */
++#define DPSW_MIN_VER_MAJOR 8
++#define DPSW_MIN_VER_MINOR 0
++
++#define DEFAULT_VLAN_ID 1
++
++static int ethsw_add_vlan(struct ethsw_core *ethsw, u16 vid)
+{
-+ struct ethsw_dev_priv *priv = netdev_priv(netdev);
-+ int err;
++ int err;
+
+ struct dpsw_vlan_cfg vcfg = {
-+ /* TODO: add support for VLAN private FDBs */
+ .fdb_id = 0,
+ };
-+ if (priv->vlans[vid]) {
-+ netdev_err(netdev, "VLAN already configured\n");
++
++ if (ethsw->vlans[vid]) {
++ dev_err(ethsw->dev, "VLAN already configured\n");
+ return -EEXIST;
+ }
+
-+ err = dpsw_vlan_add(priv->mc_io, 0, priv->dpsw_handle, vid, &vcfg);
++ err = dpsw_vlan_add(ethsw->mc_io, 0,
++ ethsw->dpsw_handle, vid, &vcfg);
+ if (err) {
-+ netdev_err(netdev, "dpsw_vlan_add err %d\n", err);
++ dev_err(ethsw->dev, "dpsw_vlan_add err %d\n", err);
+ return err;
+ }
-+ priv->vlans[vid] = ETHSW_VLAN_MEMBER;
++ ethsw->vlans[vid] = ETHSW_VLAN_MEMBER;
+
+ return 0;
+}
+
-+static int ethsw_port_add_vlan(struct net_device *netdev, u16 vid, u16 flags)
++static int ethsw_port_set_pvid(struct ethsw_port_priv *port_priv, u16 pvid)
+{
-+ struct ethsw_port_priv *port_priv = netdev_priv(netdev);
-+ struct ethsw_dev_priv *priv = port_priv->ethsw_priv;
-+ int err;
-+
-+ struct dpsw_vlan_if_cfg vcfg = {
-+ .num_ifs = 1,
-+ .if_id[0] = port_priv->port_index,
-+ };
-+
-+ if (port_priv->vlans[vid]) {
-+ netdev_err(netdev, "VLAN already configured\n");
-+ return -EEXIST;
-+ }
++ struct ethsw_core *ethsw = port_priv->ethsw_data;
++ struct net_device *netdev = port_priv->netdev;
++ struct dpsw_tci_cfg tci_cfg = { 0 };
++ bool is_oper;
++ int err, ret;
+
-+ if (flags & BRIDGE_VLAN_INFO_PVID && netif_oper_up(netdev)) {
-+ netdev_err(netdev, "interface must be down to change PVID!\n");
-+ return -EBUSY;
-+ }
-+
-+ err = dpsw_vlan_add_if(priv->mc_io, 0, priv->dpsw_handle, vid, &vcfg);
++ err = dpsw_if_get_tci(ethsw->mc_io, 0, ethsw->dpsw_handle,
++ port_priv->idx, &tci_cfg);
+ if (err) {
-+ netdev_err(netdev, "dpsw_vlan_add_if err %d\n", err);
++ netdev_err(netdev, "dpsw_if_get_tci err %d\n", err);
+ return err;
+ }
-+ port_priv->vlans[vid] = ETHSW_VLAN_MEMBER;
+
-+ if (flags & BRIDGE_VLAN_INFO_UNTAGGED) {
-+ err = dpsw_vlan_add_if_untagged(priv->mc_io, 0,
-+ priv->dpsw_handle, vid, &vcfg);
++ tci_cfg.vlan_id = pvid;
++
++ /* Interface needs to be down to change PVID */
++ is_oper = netif_oper_up(netdev);
++ if (is_oper) {
++ err = dpsw_if_disable(ethsw->mc_io, 0,
++ ethsw->dpsw_handle,
++ port_priv->idx);
+ if (err) {
-+ netdev_err(netdev, "dpsw_vlan_add_if_untagged err %d\n",
-+ err);
++ netdev_err(netdev, "dpsw_if_disable err %d\n", err);
+ return err;
+ }
-+ port_priv->vlans[vid] |= ETHSW_VLAN_UNTAGGED;
+ }
+
-+ if (flags & BRIDGE_VLAN_INFO_PVID) {
-+ struct dpsw_tci_cfg tci_cfg = {
-+ /* TODO: at least add better defaults if these cannot
-+ * be configured
-+ */
-+ .pcp = 0,
-+ .dei = 0,
-+ .vlan_id = vid,
-+ };
-+
-+ err = dpsw_if_set_tci(priv->mc_io, 0, priv->dpsw_handle,
-+ port_priv->port_index, &tci_cfg);
-+ if (err) {
-+ netdev_err(netdev, "dpsw_if_set_tci err %d\n", err);
-+ return err;
++ err = dpsw_if_set_tci(ethsw->mc_io, 0, ethsw->dpsw_handle,
++ port_priv->idx, &tci_cfg);
++ if (err) {
++ netdev_err(netdev, "dpsw_if_set_tci err %d\n", err);
++ goto set_tci_error;
++ }
++
++ /* Delete previous PVID info and mark the new one */
++ port_priv->vlans[port_priv->pvid] &= ~ETHSW_VLAN_PVID;
++ port_priv->vlans[pvid] |= ETHSW_VLAN_PVID;
++ port_priv->pvid = pvid;
++
++set_tci_error:
++ if (is_oper) {
++ ret = dpsw_if_enable(ethsw->mc_io, 0,
++ ethsw->dpsw_handle,
++ port_priv->idx);
++ if (ret) {
++ netdev_err(netdev, "dpsw_if_enable err %d\n", ret);
++ return ret;
+ }
-+ port_priv->vlans[vid] |= ETHSW_VLAN_PVID;
+ }
+
-+ return 0;
++ return err;
+}
+
-+static const struct nla_policy ifla_br_policy[IFLA_MAX + 1] = {
-+ [IFLA_BRIDGE_FLAGS] = { .type = NLA_U16 },
-+ [IFLA_BRIDGE_MODE] = { .type = NLA_U16 },
-+ [IFLA_BRIDGE_VLAN_INFO] = { .type = NLA_BINARY,
-+ .len = sizeof(struct bridge_vlan_info), },
-+};
-+
-+static int ethsw_setlink_af_spec(struct net_device *netdev,
-+ struct nlattr **tb)
++static int ethsw_port_add_vlan(struct ethsw_port_priv *port_priv,
++ u16 vid, u16 flags)
+{
-+ struct bridge_vlan_info *vinfo;
-+ struct ethsw_dev_priv *priv = NULL;
-+ struct ethsw_port_priv *port_priv = NULL;
-+ int err = 0;
++ struct ethsw_core *ethsw = port_priv->ethsw_data;
++ struct net_device *netdev = port_priv->netdev;
++ struct dpsw_vlan_if_cfg vcfg;
++ int err;
+
-+ if (!tb[IFLA_BRIDGE_VLAN_INFO]) {
-+ netdev_err(netdev, "no VLAN INFO in nlmsg\n");
-+ return -EOPNOTSUPP;
++ if (port_priv->vlans[vid]) {
++ netdev_warn(netdev, "VLAN %d already configured\n", vid);
++ return -EEXIST;
+ }
+
-+ vinfo = nla_data(tb[IFLA_BRIDGE_VLAN_INFO]);
-+
-+ if (!vinfo->vid || vinfo->vid > VLAN_VID_MASK)
-+ return -EINVAL;
++ vcfg.num_ifs = 1;
++ vcfg.if_id[0] = port_priv->idx;
++ err = dpsw_vlan_add_if(ethsw->mc_io, 0, ethsw->dpsw_handle, vid, &vcfg);
++ if (err) {
++ netdev_err(netdev, "dpsw_vlan_add_if err %d\n", err);
++ return err;
++ }
+
-+ __get_priv(netdev, &priv, &port_priv);
++ port_priv->vlans[vid] = ETHSW_VLAN_MEMBER;
+
-+ if (!port_priv || !priv->vlans[vinfo->vid]) {
-+ /* command targets switch device or this is a new VLAN */
-+ err = ethsw_add_vlan(priv->netdev, vinfo->vid);
-+ if (err)
++ if (flags & BRIDGE_VLAN_INFO_UNTAGGED) {
++ err = dpsw_vlan_add_if_untagged(ethsw->mc_io, 0,
++ ethsw->dpsw_handle,
++ vid, &vcfg);
++ if (err) {
++ netdev_err(netdev,
++ "dpsw_vlan_add_if_untagged err %d\n", err);
+ return err;
-+
-+ /* command targets switch device; mark it*/
-+ if (!port_priv)
-+ priv->vlans[vinfo->vid] |= ETHSW_VLAN_GLOBAL;
++ }
++ port_priv->vlans[vid] |= ETHSW_VLAN_UNTAGGED;
+ }
+
-+ if (port_priv) {
-+ /* command targets switch port */
-+ err = ethsw_port_add_vlan(netdev, vinfo->vid, vinfo->flags);
++ if (flags & BRIDGE_VLAN_INFO_PVID) {
++ err = ethsw_port_set_pvid(port_priv, vid);
+ if (err)
+ return err;
+ }
@@ -15592,1263 +15228,941 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ return 0;
+}
+
-+static const struct nla_policy ifla_brport_policy[IFLA_BRPORT_MAX + 1] = {
-+ [IFLA_BRPORT_STATE] = { .type = NLA_U8 },
-+ [IFLA_BRPORT_COST] = { .type = NLA_U32 },
-+ [IFLA_BRPORT_PRIORITY] = { .type = NLA_U16 },
-+ [IFLA_BRPORT_MODE] = { .type = NLA_U8 },
-+ [IFLA_BRPORT_GUARD] = { .type = NLA_U8 },
-+ [IFLA_BRPORT_PROTECT] = { .type = NLA_U8 },
-+ [IFLA_BRPORT_LEARNING] = { .type = NLA_U8 },
-+ [IFLA_BRPORT_UNICAST_FLOOD] = { .type = NLA_U8 },
-+};
-+
-+static int ethsw_set_learning(struct net_device *netdev, u8 flag)
++static int ethsw_set_learning(struct ethsw_core *ethsw, u8 flag)
+{
-+ struct ethsw_dev_priv *priv = netdev_priv(netdev);
-+ enum dpsw_fdb_learning_mode learn_mode;
-+ int err;
++ enum dpsw_fdb_learning_mode learn_mode;
++ int err;
+
+ if (flag)
+ learn_mode = DPSW_FDB_LEARNING_MODE_HW;
+ else
+ learn_mode = DPSW_FDB_LEARNING_MODE_DIS;
+
-+ err = dpsw_fdb_set_learning_mode(priv->mc_io, 0, priv->dpsw_handle,
-+ 0, learn_mode);
++ err = dpsw_fdb_set_learning_mode(ethsw->mc_io, 0, ethsw->dpsw_handle, 0,
++ learn_mode);
+ if (err) {
-+ netdev_err(netdev, "dpsw_fdb_set_learning_mode err %d\n", err);
++ dev_err(ethsw->dev, "dpsw_fdb_set_learning_mode err %d\n", err);
+ return err;
+ }
-+ priv->learning = !!flag;
++ ethsw->learning = !!flag;
+
+ return 0;
+}
+
-+static int ethsw_port_set_flood(struct net_device *netdev, u8 flag)
++static int ethsw_port_set_flood(struct ethsw_port_priv *port_priv, u8 flag)
+{
-+ struct ethsw_port_priv *port_priv = netdev_priv(netdev);
-+ struct ethsw_dev_priv *priv = port_priv->ethsw_priv;
-+ int err;
++ int err;
+
-+ err = dpsw_if_set_flooding(priv->mc_io, 0, priv->dpsw_handle,
-+ port_priv->port_index, (int)flag);
++ err = dpsw_if_set_flooding(port_priv->ethsw_data->mc_io, 0,
++ port_priv->ethsw_data->dpsw_handle,
++ port_priv->idx, flag);
+ if (err) {
-+ netdev_err(netdev, "dpsw_fdb_set_learning_mode err %d\n", err);
++ netdev_err(port_priv->netdev,
++ "dpsw_fdb_set_learning_mode err %d\n", err);
+ return err;
+ }
-+ priv->flood = !!flag;
++ port_priv->flood = !!flag;
+
+ return 0;
+}
+
-+static int ethsw_port_set_state(struct net_device *netdev, u8 state)
++static int ethsw_port_set_stp_state(struct ethsw_port_priv *port_priv, u8 state)
+{
-+ struct ethsw_port_priv *port_priv = netdev_priv(netdev);
-+ struct ethsw_dev_priv *priv = port_priv->ethsw_priv;
-+ u8 old_state = port_priv->stp_state;
-+ int err;
-+
+ struct dpsw_stp_cfg stp_cfg = {
-+ .vlan_id = 1,
++ .vlan_id = DEFAULT_VLAN_ID,
+ .state = state,
+ };
-+ /* TODO: check port state, interface may be down */
-+
-+ if (state > BR_STATE_BLOCKING)
-+ return -EINVAL;
-+
-+ if (state == port_priv->stp_state)
-+ return 0;
-+
-+ if (state == BR_STATE_DISABLED) {
-+ port_priv->stp_state = state;
-+
-+ err = ethsw_port_stop(netdev);
-+ if (err)
-+ goto error;
-+ } else {
-+ err = dpsw_if_set_stp(priv->mc_io, 0, priv->dpsw_handle,
-+ port_priv->port_index, &stp_cfg);
-+ if (err) {
-+ netdev_err(netdev, "dpsw_if_set_stp err %d\n", err);
-+ return err;
-+ }
++ int err;
+
-+ port_priv->stp_state = state;
++ if (!netif_oper_up(port_priv->netdev) || state == port_priv->stp_state)
++ return 0; /* Nothing to do */
+
-+ if (old_state == BR_STATE_DISABLED) {
-+ err = ethsw_port_open(netdev);
-+ if (err)
-+ goto error;
-+ }
++ err = dpsw_if_set_stp(port_priv->ethsw_data->mc_io, 0,
++ port_priv->ethsw_data->dpsw_handle,
++ port_priv->idx, &stp_cfg);
++ if (err) {
++ netdev_err(port_priv->netdev,
++ "dpsw_if_set_stp err %d\n", err);
++ return err;
+ }
+
++ port_priv->stp_state = state;
++
+ return 0;
-+error:
-+ port_priv->stp_state = old_state;
-+ return err;
+}
+
-+static int ethsw_setlink_protinfo(struct net_device *netdev,
-+ struct nlattr **tb)
++static int ethsw_dellink_switch(struct ethsw_core *ethsw, u16 vid)
+{
-+ struct ethsw_dev_priv *priv;
-+ struct ethsw_port_priv *port_priv = NULL;
-+ int err = 0;
-+
-+ __get_priv(netdev, &priv, &port_priv);
-+
-+ if (tb[IFLA_BRPORT_LEARNING]) {
-+ u8 flag = nla_get_u8(tb[IFLA_BRPORT_LEARNING]);
-+
-+ if (port_priv)
-+ netdev_warn(netdev,
-+ "learning set on whole switch dev\n");
-+
-+ err = ethsw_set_learning(priv->netdev, flag);
-+ if (err)
-+ return err;
-+
-+ } else if (tb[IFLA_BRPORT_UNICAST_FLOOD] && port_priv) {
-+ u8 flag = nla_get_u8(tb[IFLA_BRPORT_UNICAST_FLOOD]);
-+
-+ err = ethsw_port_set_flood(port_priv->netdev, flag);
-+ if (err)
-+ return err;
++ struct ethsw_port_priv *ppriv_local = NULL;
++ int i, err;
+
-+ } else if (tb[IFLA_BRPORT_STATE] && port_priv) {
-+ u8 state = nla_get_u8(tb[IFLA_BRPORT_STATE]);
++ if (!ethsw->vlans[vid])
++ return -ENOENT;
+
-+ err = ethsw_port_set_state(port_priv->netdev, state);
-+ if (err)
-+ return err;
++ err = dpsw_vlan_remove(ethsw->mc_io, 0, ethsw->dpsw_handle, vid);
++ if (err) {
++ dev_err(ethsw->dev, "dpsw_vlan_remove err %d\n", err);
++ return err;
++ }
++ ethsw->vlans[vid] = 0;
+
-+ } else {
-+ return -EOPNOTSUPP;
++ for (i = 0; i < ethsw->sw_attr.num_ifs; i++) {
++ ppriv_local = ethsw->ports[i];
++ ppriv_local->vlans[vid] = 0;
+ }
+
+ return 0;
+}
+
-+static int ethsw_setlink(struct net_device *netdev,
-+ struct nlmsghdr *nlh,
-+ u16 flags)
++static int ethsw_port_fdb_add_uc(struct ethsw_port_priv *port_priv,
++ const unsigned char *addr)
+{
-+ struct nlattr *attr;
-+ struct nlattr *tb[(IFLA_BRIDGE_MAX > IFLA_BRPORT_MAX) ?
-+ IFLA_BRIDGE_MAX : IFLA_BRPORT_MAX + 1];
-+ int err = 0;
++ struct dpsw_fdb_unicast_cfg entry = {0};
++ int err;
+
-+ attr = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
-+ if (attr) {
-+ err = nla_parse_nested(tb, IFLA_BRIDGE_MAX, attr,
-+ ifla_br_policy);
-+ if (err) {
-+ netdev_err(netdev,
-+ "nla_parse_nested for br_policy err %d\n",
-+ err);
-+ return err;
-+ }
++ entry.if_egress = port_priv->idx;
++ entry.type = DPSW_FDB_ENTRY_STATIC;
++ ether_addr_copy(entry.mac_addr, addr);
+
-+ err = ethsw_setlink_af_spec(netdev, tb);
-+ return err;
-+ }
++ err = dpsw_fdb_add_unicast(port_priv->ethsw_data->mc_io, 0,
++ port_priv->ethsw_data->dpsw_handle,
++ 0, &entry);
++ if (err)
++ netdev_err(port_priv->netdev,
++ "dpsw_fdb_add_unicast err %d\n", err);
++ return err;
++}
+
-+ attr = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_PROTINFO);
-+ if (attr) {
-+ err = nla_parse_nested(tb, IFLA_BRPORT_MAX, attr,
-+ ifla_brport_policy);
-+ if (err) {
-+ netdev_err(netdev,
-+ "nla_parse_nested for brport_policy err %d\n",
-+ err);
-+ return err;
-+ }
++static int ethsw_port_fdb_del_uc(struct ethsw_port_priv *port_priv,
++ const unsigned char *addr)
++{
++ struct dpsw_fdb_unicast_cfg entry = {0};
++ int err;
+
-+ err = ethsw_setlink_protinfo(netdev, tb);
-+ return err;
-+ }
++ entry.if_egress = port_priv->idx;
++ entry.type = DPSW_FDB_ENTRY_STATIC;
++ ether_addr_copy(entry.mac_addr, addr);
+
-+ netdev_err(netdev, "nlmsg_find_attr found no AF_SPEC/PROTINFO\n");
-+ return -EOPNOTSUPP;
++ err = dpsw_fdb_remove_unicast(port_priv->ethsw_data->mc_io, 0,
++ port_priv->ethsw_data->dpsw_handle,
++ 0, &entry);
++ /* Silently discard calling multiple times the del command */
++ if (err && err != -ENXIO)
++ netdev_err(port_priv->netdev,
++ "dpsw_fdb_remove_unicast err %d\n", err);
++ return err;
+}
+
-+static int __nla_put_netdev(struct sk_buff *skb, struct net_device *netdev,
-+ struct ethsw_dev_priv *priv)
++static int ethsw_port_fdb_add_mc(struct ethsw_port_priv *port_priv,
++ const unsigned char *addr)
+{
-+ u8 operstate = netif_running(netdev) ? netdev->operstate : IF_OPER_DOWN;
-+ int iflink;
++ struct dpsw_fdb_multicast_cfg entry = {0};
+ int err;
+
-+ err = nla_put_string(skb, IFLA_IFNAME, netdev->name);
-+ if (err)
-+ goto nla_put_err;
-+ err = nla_put_u32(skb, IFLA_MASTER, priv->netdev->ifindex);
-+ if (err)
-+ goto nla_put_err;
-+ err = nla_put_u32(skb, IFLA_MTU, netdev->mtu);
-+ if (err)
-+ goto nla_put_err;
-+ err = nla_put_u8(skb, IFLA_OPERSTATE, operstate);
-+ if (err)
-+ goto nla_put_err;
-+ if (netdev->addr_len) {
-+ err = nla_put(skb, IFLA_ADDRESS, netdev->addr_len,
-+ netdev->dev_addr);
-+ if (err)
-+ goto nla_put_err;
-+ }
-+
-+ iflink = dev_get_iflink(netdev);
-+ if (netdev->ifindex != iflink) {
-+ err = nla_put_u32(skb, IFLA_LINK, iflink);
-+ if (err)
-+ goto nla_put_err;
-+ }
-+
-+ return 0;
++ ether_addr_copy(entry.mac_addr, addr);
++ entry.type = DPSW_FDB_ENTRY_STATIC;
++ entry.num_ifs = 1;
++ entry.if_id[0] = port_priv->idx;
+
-+nla_put_err:
-+ netdev_err(netdev, "nla_put_ err %d\n", err);
++ err = dpsw_fdb_add_multicast(port_priv->ethsw_data->mc_io, 0,
++ port_priv->ethsw_data->dpsw_handle,
++ 0, &entry);
++ /* Silently discard calling multiple times the add command */
++ if (err && err != -ENXIO)
++ netdev_err(port_priv->netdev, "dpsw_fdb_add_multicast err %d\n",
++ err);
+ return err;
+}
+
-+static int __nla_put_port(struct sk_buff *skb, struct net_device *netdev,
-+ struct ethsw_port_priv *port_priv)
++static int ethsw_port_fdb_del_mc(struct ethsw_port_priv *port_priv,
++ const unsigned char *addr)
+{
-+ struct nlattr *nest;
++ struct dpsw_fdb_multicast_cfg entry = {0};
+ int err;
+
-+ u8 stp_state = port_priv->stp_state;
++ ether_addr_copy(entry.mac_addr, addr);
++ entry.type = DPSW_FDB_ENTRY_STATIC;
++ entry.num_ifs = 1;
++ entry.if_id[0] = port_priv->idx;
+
-+ if (port_priv->stp_state == DPSW_STP_STATE_BLOCKING)
-+ stp_state = BR_STATE_BLOCKING;
++ err = dpsw_fdb_remove_multicast(port_priv->ethsw_data->mc_io, 0,
++ port_priv->ethsw_data->dpsw_handle,
++ 0, &entry);
++ /* Silently discard calling multiple times the del command */
++ if (err && err != -ENAVAIL)
++ netdev_err(port_priv->netdev,
++ "dpsw_fdb_remove_multicast err %d\n", err);
++ return err;
++}
+
-+ nest = nla_nest_start(skb, IFLA_PROTINFO | NLA_F_NESTED);
-+ if (!nest) {
-+ netdev_err(netdev, "nla_nest_start failed\n");
-+ return -ENOMEM;
-+ }
++static void port_get_stats(struct net_device *netdev,
++ struct rtnl_link_stats64 *stats)
++{
++ struct ethsw_port_priv *port_priv = netdev_priv(netdev);
++ u64 tmp;
++ int err;
+
-+ err = nla_put_u8(skb, IFLA_BRPORT_STATE, stp_state);
-+ if (err)
-+ goto nla_put_err;
-+ err = nla_put_u16(skb, IFLA_BRPORT_PRIORITY, 0);
++ err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0,
++ port_priv->ethsw_data->dpsw_handle,
++ port_priv->idx,
++ DPSW_CNT_ING_FRAME, &stats->rx_packets);
+ if (err)
-+ goto nla_put_err;
-+ err = nla_put_u32(skb, IFLA_BRPORT_COST, 0);
-+ if (err)
-+ goto nla_put_err;
-+ err = nla_put_u8(skb, IFLA_BRPORT_MODE, 0);
++ goto error;
++
++ err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0,
++ port_priv->ethsw_data->dpsw_handle,
++ port_priv->idx,
++ DPSW_CNT_EGR_FRAME, &stats->tx_packets);
+ if (err)
-+ goto nla_put_err;
-+ err = nla_put_u8(skb, IFLA_BRPORT_GUARD, 0);
++ goto error;
++
++ err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0,
++ port_priv->ethsw_data->dpsw_handle,
++ port_priv->idx,
++ DPSW_CNT_ING_BYTE, &stats->rx_bytes);
+ if (err)
-+ goto nla_put_err;
-+ err = nla_put_u8(skb, IFLA_BRPORT_PROTECT, 0);
++ goto error;
++
++ err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0,
++ port_priv->ethsw_data->dpsw_handle,
++ port_priv->idx,
++ DPSW_CNT_EGR_BYTE, &stats->tx_bytes);
+ if (err)
-+ goto nla_put_err;
-+ err = nla_put_u8(skb, IFLA_BRPORT_FAST_LEAVE, 0);
++ goto error;
++
++ err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0,
++ port_priv->ethsw_data->dpsw_handle,
++ port_priv->idx,
++ DPSW_CNT_ING_FRAME_DISCARD,
++ &stats->rx_dropped);
+ if (err)
-+ goto nla_put_err;
-+ err = nla_put_u8(skb, IFLA_BRPORT_LEARNING,
-+ port_priv->ethsw_priv->learning);
++ goto error;
++
++ err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0,
++ port_priv->ethsw_data->dpsw_handle,
++ port_priv->idx,
++ DPSW_CNT_ING_FLTR_FRAME,
++ &tmp);
+ if (err)
-+ goto nla_put_err;
-+ err = nla_put_u8(skb, IFLA_BRPORT_UNICAST_FLOOD,
-+ port_priv->ethsw_priv->flood);
++ goto error;
++ stats->rx_dropped += tmp;
++
++ err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0,
++ port_priv->ethsw_data->dpsw_handle,
++ port_priv->idx,
++ DPSW_CNT_EGR_FRAME_DISCARD,
++ &stats->tx_dropped);
+ if (err)
-+ goto nla_put_err;
-+ nla_nest_end(skb, nest);
++ goto error;
+
-+ return 0;
++ return;
+
-+nla_put_err:
-+ netdev_err(netdev, "nla_put_ err %d\n", err);
-+ nla_nest_cancel(skb, nest);
-+ return err;
++error:
++ netdev_err(netdev, "dpsw_if_get_counter err %d\n", err);
+}
+
-+static int __nla_put_vlan(struct sk_buff *skb, struct net_device *netdev,
-+ struct ethsw_dev_priv *priv,
-+ struct ethsw_port_priv *port_priv)
++static bool port_has_offload_stats(const struct net_device *netdev,
++ int attr_id)
+{
-+ struct nlattr *nest;
-+ struct bridge_vlan_info vinfo;
-+ const char *vlans;
-+ u16 i;
-+ int err;
++ return (attr_id == IFLA_OFFLOAD_XSTATS_CPU_HIT);
++}
+
-+ nest = nla_nest_start(skb, IFLA_AF_SPEC);
-+ if (!nest) {
-+ netdev_err(netdev, "nla_nest_start failed");
-+ return -ENOMEM;
++static int port_get_offload_stats(int attr_id,
++ const struct net_device *netdev,
++ void *sp)
++{
++ switch (attr_id) {
++ case IFLA_OFFLOAD_XSTATS_CPU_HIT:
++ port_get_stats((struct net_device *)netdev, sp);
++ return 0;
+ }
+
-+ if (port_priv)
-+ vlans = port_priv->vlans;
-+ else
-+ vlans = priv->vlans;
-+
-+ for (i = 0; i < VLAN_VID_MASK + 1; i++) {
-+ vinfo.flags = 0;
-+ vinfo.vid = i;
-+
-+ if (vlans[i] & ETHSW_VLAN_UNTAGGED)
-+ vinfo.flags |= BRIDGE_VLAN_INFO_UNTAGGED;
++ return -EINVAL;
++}
+
-+ if (vlans[i] & ETHSW_VLAN_PVID)
-+ vinfo.flags |= BRIDGE_VLAN_INFO_PVID;
++static int port_change_mtu(struct net_device *netdev, int mtu)
++{
++ struct ethsw_port_priv *port_priv = netdev_priv(netdev);
++ int err;
+
-+ if (vlans[i] & ETHSW_VLAN_MEMBER) {
-+ err = nla_put(skb, IFLA_BRIDGE_VLAN_INFO,
-+ sizeof(vinfo), &vinfo);
-+ if (err)
-+ goto nla_put_err;
-+ }
++ err = dpsw_if_set_max_frame_length(port_priv->ethsw_data->mc_io,
++ 0,
++ port_priv->ethsw_data->dpsw_handle,
++ port_priv->idx,
++ (u16)ETHSW_L2_MAX_FRM(mtu));
++ if (err) {
++ netdev_err(netdev,
++ "dpsw_if_set_max_frame_length() err %d\n", err);
++ return err;
+ }
+
-+ nla_nest_end(skb, nest);
-+
++ netdev->mtu = mtu;
+ return 0;
-+nla_put_err:
-+ netdev_err(netdev, "nla_put_ err %d\n", err);
-+ nla_nest_cancel(skb, nest);
-+ return err;
+}
+
-+static int ethsw_getlink(struct sk_buff *skb, u32 pid, u32 seq,
-+ struct net_device *netdev, u32 filter_mask,
-+ int nlflags)
++static int port_carrier_state_sync(struct net_device *netdev)
+{
-+ struct ethsw_dev_priv *priv;
-+ struct ethsw_port_priv *port_priv = NULL;
-+ struct ifinfomsg *hdr;
-+ struct nlmsghdr *nlh;
++ struct ethsw_port_priv *port_priv = netdev_priv(netdev);
++ struct dpsw_link_state state;
+ int err;
+
-+ __get_priv(netdev, &priv, &port_priv);
-+
-+ nlh = nlmsg_put(skb, pid, seq, RTM_NEWLINK, sizeof(*hdr), NLM_F_MULTI);
-+ if (!nlh)
-+ return -EMSGSIZE;
-+
-+ hdr = nlmsg_data(nlh);
-+ memset(hdr, 0, sizeof(*hdr));
-+ hdr->ifi_family = AF_BRIDGE;
-+ hdr->ifi_type = netdev->type;
-+ hdr->ifi_index = netdev->ifindex;
-+ hdr->ifi_flags = dev_get_flags(netdev);
-+
-+ err = __nla_put_netdev(skb, netdev, priv);
-+ if (err)
-+ goto nla_put_err;
-+
-+ if (port_priv) {
-+ err = __nla_put_port(skb, netdev, port_priv);
-+ if (err)
-+ goto nla_put_err;
-+ }
-+
-+ /* Check if the VID information is requested */
-+ if (filter_mask & RTEXT_FILTER_BRVLAN) {
-+ err = __nla_put_vlan(skb, netdev, priv, port_priv);
-+ if (err)
-+ goto nla_put_err;
++ err = dpsw_if_get_link_state(port_priv->ethsw_data->mc_io, 0,
++ port_priv->ethsw_data->dpsw_handle,
++ port_priv->idx, &state);
++ if (err) {
++ netdev_err(netdev, "dpsw_if_get_link_state() err %d\n", err);
++ return err;
+ }
+
-+ nlmsg_end(skb, nlh);
-+ return skb->len;
++ WARN_ONCE(state.up > 1, "Garbage read into link_state");
+
-+nla_put_err:
-+ nlmsg_cancel(skb, nlh);
-+ return -EMSGSIZE;
++ if (state.up != port_priv->link_state) {
++ if (state.up)
++ netif_carrier_on(netdev);
++ else
++ netif_carrier_off(netdev);
++ port_priv->link_state = state.up;
++ }
++ return 0;
+}
+
-+static int ethsw_dellink_switch(struct ethsw_dev_priv *priv, u16 vid)
++static int port_open(struct net_device *netdev)
+{
-+ struct list_head *pos;
-+ struct ethsw_port_priv *ppriv_local = NULL;
-+ int err = 0;
++ struct ethsw_port_priv *port_priv = netdev_priv(netdev);
++ int err;
+
-+ if (!priv->vlans[vid])
-+ return -ENOENT;
++ /* No need to allow Tx as control interface is disabled */
++ netif_tx_stop_all_queues(netdev);
+
-+ err = dpsw_vlan_remove(priv->mc_io, 0, priv->dpsw_handle, vid);
++ err = dpsw_if_enable(port_priv->ethsw_data->mc_io, 0,
++ port_priv->ethsw_data->dpsw_handle,
++ port_priv->idx);
+ if (err) {
-+ netdev_err(priv->netdev, "dpsw_vlan_remove err %d\n", err);
++ netdev_err(netdev, "dpsw_if_enable err %d\n", err);
+ return err;
+ }
-+ priv->vlans[vid] = 0;
+
-+ list_for_each(pos, &priv->port_list) {
-+ ppriv_local = list_entry(pos, struct ethsw_port_priv,
-+ list);
-+ ppriv_local->vlans[vid] = 0;
++ /* sync carrier state */
++ err = port_carrier_state_sync(netdev);
++ if (err) {
++ netdev_err(netdev,
++ "port_carrier_state_sync err %d\n", err);
++ goto err_carrier_sync;
+ }
+
+ return 0;
++
++err_carrier_sync:
++ dpsw_if_disable(port_priv->ethsw_data->mc_io, 0,
++ port_priv->ethsw_data->dpsw_handle,
++ port_priv->idx);
++ return err;
+}
+
-+static int ethsw_dellink_port(struct ethsw_dev_priv *priv,
-+ struct ethsw_port_priv *port_priv,
-+ u16 vid)
++static int port_stop(struct net_device *netdev)
+{
-+ struct list_head *pos;
-+ struct ethsw_port_priv *ppriv_local = NULL;
-+ struct dpsw_vlan_if_cfg vcfg = {
-+ .num_ifs = 1,
-+ .if_id[0] = port_priv->port_index,
-+ };
-+ unsigned int count = 0;
-+ int err = 0;
-+
-+ if (!port_priv->vlans[vid])
-+ return -ENOENT;
-+
-+ /* VLAN will be deleted from switch if global flag is not set
-+ * and is configured on only one port
-+ */
-+ if (!(priv->vlans[vid] & ETHSW_VLAN_GLOBAL)) {
-+ list_for_each(pos, &priv->port_list) {
-+ ppriv_local = list_entry(pos, struct ethsw_port_priv,
-+ list);
-+ if (ppriv_local->vlans[vid] & ETHSW_VLAN_MEMBER)
-+ count++;
-+ }
-+
-+ if (count == 1)
-+ return ethsw_dellink_switch(priv, vid);
-+ }
++ struct ethsw_port_priv *port_priv = netdev_priv(netdev);
++ int err;
+
-+ err = dpsw_vlan_remove_if(priv->mc_io, 0, priv->dpsw_handle,
-+ vid, &vcfg);
++ err = dpsw_if_disable(port_priv->ethsw_data->mc_io, 0,
++ port_priv->ethsw_data->dpsw_handle,
++ port_priv->idx);
+ if (err) {
-+ netdev_err(priv->netdev, "dpsw_vlan_remove_if err %d\n", err);
++ netdev_err(netdev, "dpsw_if_disable err %d\n", err);
+ return err;
+ }
-+ port_priv->vlans[vid] = 0;
++
+ return 0;
+}
+
-+static int ethsw_dellink(struct net_device *netdev,
-+ struct nlmsghdr *nlh,
-+ u16 flags)
++static netdev_tx_t port_dropframe(struct sk_buff *skb,
++ struct net_device *netdev)
+{
-+ struct nlattr *tb[IFLA_BRIDGE_MAX + 1];
-+ struct nlattr *spec;
-+ struct bridge_vlan_info *vinfo;
-+ struct ethsw_dev_priv *priv;
-+ struct ethsw_port_priv *port_priv = NULL;
-+ int err = 0;
-+
-+ spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
-+ if (!spec)
-+ return 0;
++ /* we don't support I/O for now, drop the frame */
++ dev_kfree_skb_any(skb);
+
-+ err = nla_parse_nested(tb, IFLA_BRIDGE_MAX, spec, ifla_br_policy);
-+ if (err)
-+ return err;
++ return NETDEV_TX_OK;
++}
+
-+ if (!tb[IFLA_BRIDGE_VLAN_INFO])
-+ return -EOPNOTSUPP;
++static const struct net_device_ops ethsw_port_ops = {
++ .ndo_open = port_open,
++ .ndo_stop = port_stop,
+
-+ vinfo = nla_data(tb[IFLA_BRIDGE_VLAN_INFO]);
++ .ndo_set_mac_address = eth_mac_addr,
++ .ndo_change_mtu = port_change_mtu,
++ .ndo_has_offload_stats = port_has_offload_stats,
++ .ndo_get_offload_stats = port_get_offload_stats,
+
-+ if (!vinfo->vid || vinfo->vid > VLAN_VID_MASK)
-+ return -EINVAL;
++ .ndo_start_xmit = port_dropframe,
++};
+
-+ __get_priv(netdev, &priv, &port_priv);
++static void ethsw_links_state_update(struct ethsw_core *ethsw)
++{
++ int i;
+
-+ /* decide if command targets switch device or port */
-+ if (!port_priv)
-+ err = ethsw_dellink_switch(priv, vinfo->vid);
-+ else
-+ err = ethsw_dellink_port(priv, port_priv, vinfo->vid);
++ for (i = 0; i < ethsw->sw_attr.num_ifs; i++)
++ port_carrier_state_sync(ethsw->ports[i]->netdev);
++}
+
-+ return err;
++static irqreturn_t ethsw_irq0_handler(int irq_num, void *arg)
++{
++ return IRQ_WAKE_THREAD;
+}
+
-+static const struct net_device_ops ethsw_ops = {
-+ .ndo_open = &ethsw_open,
-+ .ndo_stop = &ethsw_stop,
++static irqreturn_t ethsw_irq0_handler_thread(int irq_num, void *arg)
++{
++ struct device *dev = (struct device *)arg;
++ struct ethsw_core *ethsw = dev_get_drvdata(dev);
++
++ /* Mask the events and the if_id reserved bits to be cleared on read */
++ u32 status = DPSW_IRQ_EVENT_LINK_CHANGED | 0xFFFF0000;
++ int err;
+
-+ .ndo_bridge_setlink = &ethsw_setlink,
-+ .ndo_bridge_getlink = &ethsw_getlink,
-+ .ndo_bridge_dellink = &ethsw_dellink,
++ err = dpsw_get_irq_status(ethsw->mc_io, 0, ethsw->dpsw_handle,
++ DPSW_IRQ_INDEX_IF, &status);
++ if (err) {
++ dev_err(dev, "Can't get irq status (err %d)", err);
+
-+ .ndo_start_xmit = &ethsw_dropframe,
-+};
++ err = dpsw_clear_irq_status(ethsw->mc_io, 0, ethsw->dpsw_handle,
++ DPSW_IRQ_INDEX_IF, 0xFFFFFFFF);
++ if (err)
++ dev_err(dev, "Can't clear irq status (err %d)", err);
++ goto out;
++ }
+
-+/*--------------------------------------------------------------------------- */
-+/* switch port netdevice ops */
++ if (status & DPSW_IRQ_EVENT_LINK_CHANGED)
++ ethsw_links_state_update(ethsw);
+
-+static int _ethsw_port_carrier_state_sync(struct net_device *netdev)
++out:
++ return IRQ_HANDLED;
++}
++
++static int ethsw_setup_irqs(struct fsl_mc_device *sw_dev)
+{
-+ struct ethsw_port_priv *port_priv = netdev_priv(netdev);
-+ struct dpsw_link_state state;
-+ int err;
++ struct device *dev = &sw_dev->dev;
++ struct ethsw_core *ethsw = dev_get_drvdata(dev);
++ u32 mask = DPSW_IRQ_EVENT_LINK_CHANGED;
++ struct fsl_mc_device_irq *irq;
++ int err;
+
-+ err = dpsw_if_get_link_state(port_priv->ethsw_priv->mc_io, 0,
-+ port_priv->ethsw_priv->dpsw_handle,
-+ port_priv->port_index, &state);
-+ if (unlikely(err)) {
-+ netdev_err(netdev, "dpsw_if_get_link_state() err %d\n", err);
++ err = fsl_mc_allocate_irqs(sw_dev);
++ if (err) {
++ dev_err(dev, "MC irqs allocation failed\n");
+ return err;
+ }
+
-+ WARN_ONCE(state.up > 1, "Garbage read into link_state");
++ if (WARN_ON(sw_dev->obj_desc.irq_count != DPSW_IRQ_NUM)) {
++ err = -EINVAL;
++ goto free_irq;
++ }
+
-+ if (state.up)
-+ netif_carrier_on(port_priv->netdev);
-+ else
-+ netif_carrier_off(port_priv->netdev);
++ err = dpsw_set_irq_enable(ethsw->mc_io, 0, ethsw->dpsw_handle,
++ DPSW_IRQ_INDEX_IF, 0);
++ if (err) {
++ dev_err(dev, "dpsw_set_irq_enable err %d\n", err);
++ goto free_irq;
++ }
+
-+ return 0;
-+}
++ irq = sw_dev->irqs[DPSW_IRQ_INDEX_IF];
+
-+static int ethsw_port_open(struct net_device *netdev)
-+{
-+ struct ethsw_port_priv *port_priv = netdev_priv(netdev);
-+ int err;
++ err = devm_request_threaded_irq(dev, irq->msi_desc->irq,
++ ethsw_irq0_handler,
++ ethsw_irq0_handler_thread,
++ IRQF_NO_SUSPEND | IRQF_ONESHOT,
++ dev_name(dev), dev);
++ if (err) {
++ dev_err(dev, "devm_request_threaded_irq(): %d", err);
++ goto free_irq;
++ }
+
-+ err = dpsw_if_enable(port_priv->ethsw_priv->mc_io, 0,
-+ port_priv->ethsw_priv->dpsw_handle,
-+ port_priv->port_index);
++ err = dpsw_set_irq_mask(ethsw->mc_io, 0, ethsw->dpsw_handle,
++ DPSW_IRQ_INDEX_IF, mask);
+ if (err) {
-+ netdev_err(netdev, "dpsw_if_enable err %d\n", err);
-+ return err;
++ dev_err(dev, "dpsw_set_irq_mask(): %d", err);
++ goto free_devm_irq;
+ }
+
-+ /* sync carrier state */
-+ err = _ethsw_port_carrier_state_sync(netdev);
++ err = dpsw_set_irq_enable(ethsw->mc_io, 0, ethsw->dpsw_handle,
++ DPSW_IRQ_INDEX_IF, 1);
+ if (err) {
-+ netdev_err(netdev, "_ethsw_port_carrier_state_sync err %d\n",
-+ err);
-+ goto err_carrier_sync;
++ dev_err(dev, "dpsw_set_irq_enable(): %d", err);
++ goto free_devm_irq;
+ }
+
+ return 0;
+
-+err_carrier_sync:
-+ dpsw_if_disable(port_priv->ethsw_priv->mc_io, 0,
-+ port_priv->ethsw_priv->dpsw_handle,
-+ port_priv->port_index);
++free_devm_irq:
++ devm_free_irq(dev, irq->msi_desc->irq, dev);
++free_irq:
++ fsl_mc_free_irqs(sw_dev);
+ return err;
+}
+
-+static int ethsw_port_stop(struct net_device *netdev)
++static void ethsw_teardown_irqs(struct fsl_mc_device *sw_dev)
+{
-+ struct ethsw_port_priv *port_priv = netdev_priv(netdev);
-+ int err;
++ struct device *dev = &sw_dev->dev;
++ struct ethsw_core *ethsw = dev_get_drvdata(dev);
++ struct fsl_mc_device_irq *irq;
++ int err;
+
-+ err = dpsw_if_disable(port_priv->ethsw_priv->mc_io, 0,
-+ port_priv->ethsw_priv->dpsw_handle,
-+ port_priv->port_index);
-+ if (err) {
-+ netdev_err(netdev, "dpsw_if_disable err %d\n", err);
-+ return err;
-+ }
++ irq = sw_dev->irqs[DPSW_IRQ_INDEX_IF];
++ err = dpsw_set_irq_enable(ethsw->mc_io, 0, ethsw->dpsw_handle,
++ DPSW_IRQ_INDEX_IF, 0);
++ if (err)
++ dev_err(dev, "dpsw_set_irq_enable err %d\n", err);
+
-+ return 0;
++ fsl_mc_free_irqs(sw_dev);
+}
+
-+static int ethsw_port_fdb_add_uc(struct net_device *netdev,
-+ const unsigned char *addr)
++static int swdev_port_attr_get(struct net_device *netdev,
++ struct switchdev_attr *attr)
+{
-+ struct ethsw_port_priv *port_priv = netdev_priv(netdev);
-+ struct dpsw_fdb_unicast_cfg entry = {0};
-+ int err;
++ struct ethsw_port_priv *port_priv = netdev_priv(netdev);
+
-+ entry.if_egress = port_priv->port_index;
-+ entry.type = DPSW_FDB_ENTRY_STATIC;
-+ ether_addr_copy(entry.mac_addr, addr);
++ switch (attr->id) {
++ case SWITCHDEV_ATTR_ID_PORT_PARENT_ID:
++ attr->u.ppid.id_len = 1;
++ attr->u.ppid.id[0] = port_priv->ethsw_data->dev_id;
++ break;
++ case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
++ attr->u.brport_flags =
++ (port_priv->ethsw_data->learning ? BR_LEARNING : 0) |
++ (port_priv->flood ? BR_FLOOD : 0);
++ break;
++ case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS_SUPPORT:
++ attr->u.brport_flags_support = BR_LEARNING | BR_FLOOD;
++ break;
++ default:
++ return -EOPNOTSUPP;
++ }
+
-+ err = dpsw_fdb_add_unicast(port_priv->ethsw_priv->mc_io, 0,
-+ port_priv->ethsw_priv->dpsw_handle,
-+ 0, &entry);
-+ if (err)
-+ netdev_err(netdev, "dpsw_fdb_add_unicast err %d\n", err);
-+ return err;
++ return 0;
+}
+
-+static int ethsw_port_fdb_del_uc(struct net_device *netdev,
-+ const unsigned char *addr)
++static int port_attr_stp_state_set(struct net_device *netdev,
++ struct switchdev_trans *trans,
++ u8 state)
+{
-+ struct ethsw_port_priv *port_priv = netdev_priv(netdev);
-+ struct dpsw_fdb_unicast_cfg entry = {0};
-+ int err;
++ struct ethsw_port_priv *port_priv = netdev_priv(netdev);
+
-+ entry.if_egress = port_priv->port_index;
-+ entry.type = DPSW_FDB_ENTRY_STATIC;
-+ ether_addr_copy(entry.mac_addr, addr);
++ if (switchdev_trans_ph_prepare(trans))
++ return 0;
+
-+ err = dpsw_fdb_remove_unicast(port_priv->ethsw_priv->mc_io, 0,
-+ port_priv->ethsw_priv->dpsw_handle,
-+ 0, &entry);
-+ if (err)
-+ netdev_err(netdev, "dpsw_fdb_remove_unicast err %d\n", err);
-+ return err;
++ return ethsw_port_set_stp_state(port_priv, state);
+}
+
-+static int ethsw_port_fdb_add_mc(struct net_device *netdev,
-+ const unsigned char *addr)
++static int port_attr_br_flags_set(struct net_device *netdev,
++ struct switchdev_trans *trans,
++ unsigned long flags)
+{
-+ struct ethsw_port_priv *port_priv = netdev_priv(netdev);
-+ struct dpsw_fdb_multicast_cfg entry = {0};
-+ int err;
++ struct ethsw_port_priv *port_priv = netdev_priv(netdev);
++ int err = 0;
+
-+ ether_addr_copy(entry.mac_addr, addr);
-+ entry.type = DPSW_FDB_ENTRY_STATIC;
-+ entry.num_ifs = 1;
-+ entry.if_id[0] = port_priv->port_index;
++ if (switchdev_trans_ph_prepare(trans))
++ return 0;
+
-+ err = dpsw_fdb_add_multicast(port_priv->ethsw_priv->mc_io, 0,
-+ port_priv->ethsw_priv->dpsw_handle,
-+ 0, &entry);
++ /* Learning is enabled per switch */
++ err = ethsw_set_learning(port_priv->ethsw_data, !!(flags & BR_LEARNING));
+ if (err)
-+ netdev_err(netdev, "dpsw_fdb_add_multicast err %d\n", err);
++ goto exit;
++
++ err = ethsw_port_set_flood(port_priv, !!(flags & BR_FLOOD));
++
++exit:
+ return err;
+}
+
-+static int ethsw_port_fdb_del_mc(struct net_device *netdev,
-+ const unsigned char *addr)
++static int swdev_port_attr_set(struct net_device *netdev,
++ const struct switchdev_attr *attr,
++ struct switchdev_trans *trans)
+{
-+ struct ethsw_port_priv *port_priv = netdev_priv(netdev);
-+ struct dpsw_fdb_multicast_cfg entry = {0};
-+ int err;
++ int err = 0;
+
-+ ether_addr_copy(entry.mac_addr, addr);
-+ entry.type = DPSW_FDB_ENTRY_STATIC;
-+ entry.num_ifs = 1;
-+ entry.if_id[0] = port_priv->port_index;
++ switch (attr->id) {
++ case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
++ err = port_attr_stp_state_set(netdev, trans,
++ attr->u.stp_state);
++ break;
++ case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
++ err = port_attr_br_flags_set(netdev, trans,
++ attr->u.brport_flags);
++ break;
++ case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING:
++ /* VLANs are supported by default */
++ break;
++ default:
++ err = -EOPNOTSUPP;
++ break;
++ }
+
-+ err = dpsw_fdb_remove_multicast(port_priv->ethsw_priv->mc_io, 0,
-+ port_priv->ethsw_priv->dpsw_handle,
-+ 0, &entry);
-+ if (err)
-+ netdev_err(netdev, "dpsw_fdb_remove_multicast err %d\n", err);
+ return err;
+}
+
-+static int _lookup_address(struct net_device *netdev, int is_uc,
-+ const unsigned char *addr)
++static int port_vlans_add(struct net_device *netdev,
++ const struct switchdev_obj_port_vlan *vlan,
++ struct switchdev_trans *trans)
+{
-+ struct netdev_hw_addr *ha;
-+ struct netdev_hw_addr_list *list = (is_uc) ? &netdev->uc : &netdev->mc;
++ struct ethsw_port_priv *port_priv = netdev_priv(netdev);
++ int vid, err;
+
-+ netif_addr_lock_bh(netdev);
-+ list_for_each_entry(ha, &list->list, list) {
-+ if (ether_addr_equal(ha->addr, addr)) {
-+ netif_addr_unlock_bh(netdev);
-+ return 1;
++ if (switchdev_trans_ph_prepare(trans))
++ return 0;
++
++ for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
++ if (!port_priv->ethsw_data->vlans[vid]) {
++ /* this is a new VLAN */
++ err = ethsw_add_vlan(port_priv->ethsw_data, vid);
++ if (err)
++ return err;
++
++ port_priv->ethsw_data->vlans[vid] |= ETHSW_VLAN_GLOBAL;
+ }
++ err = ethsw_port_add_vlan(port_priv, vid, vlan->flags);
++ if (err)
++ break;
+ }
-+ netif_addr_unlock_bh(netdev);
-+ return 0;
++
++ return err;
+}
+
-+static int ethsw_port_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
-+ struct net_device *netdev,
-+ const unsigned char *addr, u16 vid,
-+ u16 flags)
++static int swdev_port_obj_add(struct net_device *netdev,
++ const struct switchdev_obj *obj,
++ struct switchdev_trans *trans)
+{
-+ struct list_head *pos;
-+ struct ethsw_port_priv *port_priv = netdev_priv(netdev);
-+ struct ethsw_dev_priv *priv = port_priv->ethsw_priv;
+ int err;
+
-+ /* TODO: add replace support when added to iproute bridge */
-+ if (!(flags & NLM_F_REQUEST)) {
-+ netdev_err(netdev,
-+ "ethsw_port_fdb_add unexpected flags value %08x\n",
-+ flags);
-+ return -EINVAL;
++ switch (obj->id) {
++ case SWITCHDEV_OBJ_ID_PORT_VLAN:
++ err = port_vlans_add(netdev,
++ SWITCHDEV_OBJ_PORT_VLAN(obj),
++ trans);
++ break;
++ default:
++ err = -EOPNOTSUPP;
++ break;
+ }
+
-+ if (is_unicast_ether_addr(addr)) {
-+ /* if entry cannot be replaced, return error if exists */
-+ if (flags & NLM_F_EXCL || flags & NLM_F_APPEND) {
-+ list_for_each(pos, &priv->port_list) {
-+ port_priv = list_entry(pos,
-+ struct ethsw_port_priv,
-+ list);
-+ if (_lookup_address(port_priv->netdev,
-+ 1, addr))
-+ return -EEXIST;
-+ }
-+ }
-+
-+ err = ethsw_port_fdb_add_uc(netdev, addr);
-+ if (err) {
-+ netdev_err(netdev, "ethsw_port_fdb_add_uc err %d\n",
-+ err);
-+ return err;
-+ }
-+
-+ /* we might have replaced an existing entry for a different
-+ * switch port, make sure the address doesn't linger in any
-+ * port address list
-+ */
-+ list_for_each(pos, &priv->port_list) {
-+ port_priv = list_entry(pos, struct ethsw_port_priv,
-+ list);
-+ dev_uc_del(port_priv->netdev, addr);
-+ }
++ return err;
++}
+
-+ err = dev_uc_add(netdev, addr);
-+ if (err) {
-+ netdev_err(netdev, "dev_uc_add err %d\n", err);
-+ return err;
-+ }
-+ } else {
-+ struct dpsw_fdb_multicast_cfg entry = {
-+ .type = DPSW_FDB_ENTRY_STATIC,
-+ .num_ifs = 0,
-+ };
++static int ethsw_port_del_vlan(struct ethsw_port_priv *port_priv, u16 vid)
++{
++ struct ethsw_core *ethsw = port_priv->ethsw_data;
++ struct net_device *netdev = port_priv->netdev;
++ struct dpsw_vlan_if_cfg vcfg;
++ int i, err;
+
-+ /* check if address is already set on this port */
-+ if (_lookup_address(netdev, 0, addr))
-+ return -EEXIST;
++ if (!port_priv->vlans[vid])
++ return -ENOENT;
+
-+ /* check if the address exists on other port */
-+ ether_addr_copy(entry.mac_addr, addr);
-+ err = dpsw_fdb_get_multicast(priv->mc_io, 0, priv->dpsw_handle,
-+ 0, &entry);
-+ if (!err) {
-+ /* entry exists, can we replace it? */
-+ if (flags & NLM_F_EXCL)
-+ return -EEXIST;
-+ } else if (err != -ENAVAIL) {
-+ netdev_err(netdev, "dpsw_fdb_get_unicast err %d\n",
-+ err);
++ if (port_priv->vlans[vid] & ETHSW_VLAN_PVID) {
++ err = ethsw_port_set_pvid(port_priv, 0);
++ if (err)
+ return err;
-+ }
++ }
+
-+ err = ethsw_port_fdb_add_mc(netdev, addr);
++ vcfg.num_ifs = 1;
++ vcfg.if_id[0] = port_priv->idx;
++ if (port_priv->vlans[vid] & ETHSW_VLAN_UNTAGGED) {
++ err = dpsw_vlan_remove_if_untagged(ethsw->mc_io, 0,
++ ethsw->dpsw_handle,
++ vid, &vcfg);
+ if (err) {
-+ netdev_err(netdev, "ethsw_port_fdb_add_mc err %d\n",
++ netdev_err(netdev,
++ "dpsw_vlan_remove_if_untagged err %d\n",
+ err);
-+ return err;
-+ }
-+
-+ err = dev_mc_add(netdev, addr);
-+ if (err) {
-+ netdev_err(netdev, "dev_mc_add err %d\n", err);
-+ return err;
+ }
++ port_priv->vlans[vid] &= ~ETHSW_VLAN_UNTAGGED;
+ }
+
-+ return 0;
-+}
-+
-+static int ethsw_port_fdb_del(struct ndmsg *ndm, struct nlattr *tb[],
-+ struct net_device *netdev,
-+ const unsigned char *addr, u16 vid)
-+{
-+ int err;
-+
-+ if (is_unicast_ether_addr(addr)) {
-+ err = ethsw_port_fdb_del_uc(netdev, addr);
++ if (port_priv->vlans[vid] & ETHSW_VLAN_MEMBER) {
++ err = dpsw_vlan_remove_if(ethsw->mc_io, 0, ethsw->dpsw_handle,
++ vid, &vcfg);
+ if (err) {
-+ netdev_err(netdev, "ethsw_port_fdb_del_uc err %d\n",
-+ err);
++ netdev_err(netdev,
++ "dpsw_vlan_remove_if err %d\n", err);
+ return err;
+ }
++ port_priv->vlans[vid] &= ~ETHSW_VLAN_MEMBER;
+
-+ /* also delete if configured on port */
-+ err = dev_uc_del(netdev, addr);
-+ if (err && err != -ENOENT) {
-+ netdev_err(netdev, "dev_uc_del err %d\n", err);
-+ return err;
-+ }
-+ } else {
-+ if (!_lookup_address(netdev, 0, addr))
-+ return -ENOENT;
++ /* Delete VLAN from switch if it is no longer configured on
++ * any port
++ */
++ for (i = 0; i < ethsw->sw_attr.num_ifs; i++)
++ if (ethsw->ports[i]->vlans[vid] & ETHSW_VLAN_MEMBER)
++ return 0; /* Found a port member in VID */
+
-+ err = dev_mc_del(netdev, addr);
-+ if (err) {
-+ netdev_err(netdev, "dev_mc_del err %d\n", err);
-+ return err;
-+ }
++ ethsw->vlans[vid] &= ~ETHSW_VLAN_GLOBAL;
+
-+ err = ethsw_port_fdb_del_mc(netdev, addr);
-+ if (err) {
-+ netdev_err(netdev, "ethsw_port_fdb_del_mc err %d\n",
-+ err);
++ err = ethsw_dellink_switch(ethsw, vid);
++ if (err)
+ return err;
-+ }
+ }
+
+ return 0;
+}
+
-+struct rtnl_link_stats64 *ethsw_port_get_stats(struct net_device *netdev,
-+ struct rtnl_link_stats64 *storage)
++static int port_vlans_del(struct net_device *netdev,
++ const struct switchdev_obj_port_vlan *vlan)
+{
-+ struct ethsw_port_priv *port_priv = netdev_priv(netdev);
-+ u64 tmp;
-+ int err;
-+
-+ err = dpsw_if_get_counter(port_priv->ethsw_priv->mc_io, 0,
-+ port_priv->ethsw_priv->dpsw_handle,
-+ port_priv->port_index,
-+ DPSW_CNT_ING_FRAME, &storage->rx_packets);
-+ if (err)
-+ goto error;
-+
-+ err = dpsw_if_get_counter(port_priv->ethsw_priv->mc_io, 0,
-+ port_priv->ethsw_priv->dpsw_handle,
-+ port_priv->port_index,
-+ DPSW_CNT_EGR_FRAME, &storage->tx_packets);
-+ if (err)
-+ goto error;
-+
-+ err = dpsw_if_get_counter(port_priv->ethsw_priv->mc_io, 0,
-+ port_priv->ethsw_priv->dpsw_handle,
-+ port_priv->port_index,
-+ DPSW_CNT_ING_BYTE, &storage->rx_bytes);
-+ if (err)
-+ goto error;
-+
-+ err = dpsw_if_get_counter(port_priv->ethsw_priv->mc_io, 0,
-+ port_priv->ethsw_priv->dpsw_handle,
-+ port_priv->port_index,
-+ DPSW_CNT_EGR_BYTE, &storage->tx_bytes);
-+ if (err)
-+ goto error;
-+
-+ err = dpsw_if_get_counter(port_priv->ethsw_priv->mc_io, 0,
-+ port_priv->ethsw_priv->dpsw_handle,
-+ port_priv->port_index,
-+ DPSW_CNT_ING_FRAME_DISCARD,
-+ &storage->rx_dropped);
-+ if (err)
-+ goto error;
-+
-+ err = dpsw_if_get_counter(port_priv->ethsw_priv->mc_io, 0,
-+ port_priv->ethsw_priv->dpsw_handle,
-+ port_priv->port_index,
-+ DPSW_CNT_ING_FLTR_FRAME,
-+ &tmp);
-+ if (err)
-+ goto error;
-+ storage->rx_dropped += tmp;
-+
-+ err = dpsw_if_get_counter(port_priv->ethsw_priv->mc_io, 0,
-+ port_priv->ethsw_priv->dpsw_handle,
-+ port_priv->port_index,
-+ DPSW_CNT_EGR_FRAME_DISCARD,
-+ &storage->tx_dropped);
-+ if (err)
-+ goto error;
++ struct ethsw_port_priv *port_priv = netdev_priv(netdev);
++ int vid, err;
+
-+ return storage;
++ for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
++ err = ethsw_port_del_vlan(port_priv, vid);
++ if (err)
++ break;
++ }
+
-+error:
-+ netdev_err(netdev, "dpsw_if_get_counter err %d\n", err);
++ return err;
+}
+
-+static int ethsw_port_change_mtu(struct net_device *netdev, int mtu)
++static int swdev_port_obj_del(struct net_device *netdev,
++ const struct switchdev_obj *obj)
+{
-+ struct ethsw_port_priv *port_priv = netdev_priv(netdev);
-+ int err;
-+
-+ if (mtu < ETH_ZLEN || mtu > ETHSW_MAX_FRAME_LENGTH) {
-+ netdev_err(netdev, "Invalid MTU %d. Valid range is: %d..%d\n",
-+ mtu, ETH_ZLEN, ETHSW_MAX_FRAME_LENGTH);
-+ return -EINVAL;
-+ }
++ int err;
+
-+ err = dpsw_if_set_max_frame_length(port_priv->ethsw_priv->mc_io,
-+ 0,
-+ port_priv->ethsw_priv->dpsw_handle,
-+ port_priv->port_index,
-+ (u16)ETHSW_L2_MAX_FRM(mtu));
-+ if (err) {
-+ netdev_err(netdev,
-+ "dpsw_if_set_max_frame_length() err %d\n", err);
-+ return err;
++ switch (obj->id) {
++ case SWITCHDEV_OBJ_ID_PORT_VLAN:
++ err = port_vlans_del(netdev, SWITCHDEV_OBJ_PORT_VLAN(obj));
++ break;
++ default:
++ err = -EOPNOTSUPP;
++ break;
+ }
-+
-+ netdev->mtu = mtu;
-+ return 0;
++ return err;
+}
+
-+static const struct net_device_ops ethsw_port_ops = {
-+ .ndo_open = &ethsw_port_open,
-+ .ndo_stop = &ethsw_port_stop,
-+
-+ .ndo_fdb_add = &ethsw_port_fdb_add,
-+ .ndo_fdb_del = &ethsw_port_fdb_del,
-+ .ndo_fdb_dump = &ndo_dflt_fdb_dump,
-+
-+ .ndo_get_stats64 = &ethsw_port_get_stats,
-+ .ndo_change_mtu = &ethsw_port_change_mtu,
-+
-+ .ndo_start_xmit = &ethsw_dropframe,
++static const struct switchdev_ops ethsw_port_switchdev_ops = {
++ .switchdev_port_attr_get = swdev_port_attr_get,
++ .switchdev_port_attr_set = swdev_port_attr_set,
++ .switchdev_port_obj_add = swdev_port_obj_add,
++ .switchdev_port_obj_del = swdev_port_obj_del,
+};
+
-+static void ethsw_get_drvinfo(struct net_device *netdev,
-+ struct ethtool_drvinfo *drvinfo)
++/* For the moment, only flood setting needs to be updated */
++static int port_bridge_join(struct net_device *netdev)
+{
+ struct ethsw_port_priv *port_priv = netdev_priv(netdev);
-+ u16 version_major, version_minor;
-+ int err;
+
-+ strlcpy(drvinfo->driver, KBUILD_MODNAME, sizeof(drvinfo->driver));
-+ strlcpy(drvinfo->version, ethsw_drv_version, sizeof(drvinfo->version));
-+
-+ err = dpsw_get_api_version(port_priv->ethsw_priv->mc_io, 0,
-+ &version_major,
-+ &version_minor);
-+ if (err)
-+ strlcpy(drvinfo->fw_version, "N/A",
-+ sizeof(drvinfo->fw_version));
-+ else
-+ snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
-+ "%u.%u", version_major, version_minor);
-+
-+ strlcpy(drvinfo->bus_info, dev_name(netdev->dev.parent->parent),
-+ sizeof(drvinfo->bus_info));
++ /* Enable flooding */
++ return ethsw_port_set_flood(port_priv, 1);
+}
+
-+static int ethsw_get_settings(struct net_device *netdev,
-+ struct ethtool_cmd *cmd)
++static int port_bridge_leave(struct net_device *netdev)
+{
+ struct ethsw_port_priv *port_priv = netdev_priv(netdev);
-+ struct dpsw_link_state state = {0};
-+ int err = 0;
-+
-+ err = dpsw_if_get_link_state(port_priv->ethsw_priv->mc_io, 0,
-+ port_priv->ethsw_priv->dpsw_handle,
-+ port_priv->port_index,
-+ &state);
-+ if (err) {
-+ netdev_err(netdev, "ERROR %d getting link state", err);
-+ goto out;
-+ }
-+
-+ /* At the moment, we have no way of interrogating the DPMAC
-+ * from the DPSW side or there may not exist a DPMAC at all.
-+ * Report only autoneg state, duplexity and speed.
-+ */
-+ if (state.options & DPSW_LINK_OPT_AUTONEG)
-+ cmd->autoneg = AUTONEG_ENABLE;
-+ if (!(state.options & DPSW_LINK_OPT_HALF_DUPLEX))
-+ cmd->autoneg = DUPLEX_FULL;
-+ ethtool_cmd_speed_set(cmd, state.rate);
+
-+out:
-+ return err;
++ /* Disable flooding */
++ return ethsw_port_set_flood(port_priv, 0);
+}
+
-+static int ethsw_set_settings(struct net_device *netdev,
-+ struct ethtool_cmd *cmd)
++static int port_netdevice_event(struct notifier_block *unused,
++ unsigned long event, void *ptr)
+{
-+ struct ethsw_port_priv *port_priv = netdev_priv(netdev);
-+ struct dpsw_link_state state = {0};
-+ struct dpsw_link_cfg cfg = {0};
++ struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
++ struct netdev_notifier_changeupper_info *info = ptr;
++ struct net_device *upper_dev;
+ int err = 0;
+
-+ netdev_dbg(netdev, "Setting link parameters...");
-+
-+ err = dpsw_if_get_link_state(port_priv->ethsw_priv->mc_io, 0,
-+ port_priv->ethsw_priv->dpsw_handle,
-+ port_priv->port_index,
-+ &state);
-+ if (err) {
-+ netdev_err(netdev, "ERROR %d getting link state", err);
-+ goto out;
-+ }
-+
-+ /* Due to a temporary MC limitation, the DPSW port must be down
-+ * in order to be able to change link settings. Taking steps to let
-+ * the user know that.
-+ */
-+ if (netif_running(netdev)) {
-+ netdev_info(netdev,
-+ "Sorry, interface must be brought down first.\n");
-+ return -EACCES;
++ if (netdev->netdev_ops != &ethsw_port_ops)
++ return NOTIFY_DONE;
++
++ /* Handle just upper dev link/unlink for the moment */
++ if (event == NETDEV_CHANGEUPPER) {
++ upper_dev = info->upper_dev;
++ if (netif_is_bridge_master(upper_dev)) {
++ if (info->linking)
++ err = port_bridge_join(netdev);
++ else
++ err = port_bridge_leave(netdev);
++ }
+ }
+
-+ cfg.options = state.options;
-+ cfg.rate = ethtool_cmd_speed(cmd);
-+ if (cmd->autoneg == AUTONEG_ENABLE)
-+ cfg.options |= DPSW_LINK_OPT_AUTONEG;
-+ else
-+ cfg.options &= ~DPSW_LINK_OPT_AUTONEG;
-+ if (cmd->duplex == DUPLEX_HALF)
-+ cfg.options |= DPSW_LINK_OPT_HALF_DUPLEX;
-+ else
-+ cfg.options &= ~DPSW_LINK_OPT_HALF_DUPLEX;
-+
-+ err = dpsw_if_set_link_cfg(port_priv->ethsw_priv->mc_io, 0,
-+ port_priv->ethsw_priv->dpsw_handle,
-+ port_priv->port_index,
-+ &cfg);
-+ if (err)
-+ /* ethtool will be loud enough if we return an error; no point
-+ * in putting our own error message on the console by default
-+ */
-+ netdev_dbg(netdev, "ERROR %d setting link cfg", err);
-+
-+out:
-+ return err;
++ return notifier_from_errno(err);
+}
+
-+static struct {
-+ enum dpsw_counter id;
-+ char name[ETH_GSTRING_LEN];
-+} ethsw_ethtool_counters[] = {
-+ {DPSW_CNT_ING_FRAME, "rx frames"},
-+ {DPSW_CNT_ING_BYTE, "rx bytes"},
-+ {DPSW_CNT_ING_FLTR_FRAME, "rx filtered frames"},
-+ {DPSW_CNT_ING_FRAME_DISCARD, "rx discarded frames"},
-+ {DPSW_CNT_ING_BCAST_FRAME, "rx b-cast frames"},
-+ {DPSW_CNT_ING_BCAST_BYTES, "rx b-cast bytes"},
-+ {DPSW_CNT_ING_MCAST_FRAME, "rx m-cast frames"},
-+ {DPSW_CNT_ING_MCAST_BYTE, "rx m-cast bytes"},
-+ {DPSW_CNT_EGR_FRAME, "tx frames"},
-+ {DPSW_CNT_EGR_BYTE, "tx bytes"},
-+ {DPSW_CNT_EGR_FRAME_DISCARD, "tx discarded frames"},
++static struct notifier_block port_nb __read_mostly = {
++ .notifier_call = port_netdevice_event,
++};
+
++struct ethsw_switchdev_event_work {
++ struct work_struct work;
++ struct switchdev_notifier_fdb_info fdb_info;
++ struct net_device *dev;
++ unsigned long event;
+};
+
-+static int ethsw_ethtool_get_sset_count(struct net_device *dev, int sset)
++static void ethsw_switchdev_event_work(struct work_struct *work)
+{
-+ switch (sset) {
-+ case ETH_SS_STATS:
-+ return ARRAY_SIZE(ethsw_ethtool_counters);
-+ default:
-+ return -EOPNOTSUPP;
-+ }
-+}
++ struct ethsw_switchdev_event_work *switchdev_work =
++ container_of(work, struct ethsw_switchdev_event_work, work);
++ struct net_device *dev = switchdev_work->dev;
++ struct switchdev_notifier_fdb_info *fdb_info;
++ struct ethsw_port_priv *port_priv;
+
-+static void ethsw_ethtool_get_strings(struct net_device *netdev,
-+ u32 stringset, u8 *data)
-+{
-+ u32 i;
++ rtnl_lock();
++ port_priv = netdev_priv(dev);
++ fdb_info = &switchdev_work->fdb_info;
+
-+ switch (stringset) {
-+ case ETH_SS_STATS:
-+ for (i = 0; i < ARRAY_SIZE(ethsw_ethtool_counters); i++)
-+ memcpy(data + i * ETH_GSTRING_LEN,
-+ ethsw_ethtool_counters[i].name, ETH_GSTRING_LEN);
++ switch (switchdev_work->event) {
++ case SWITCHDEV_FDB_ADD_TO_DEVICE:
++ if (is_unicast_ether_addr(fdb_info->addr))
++ ethsw_port_fdb_add_uc(netdev_priv(dev), fdb_info->addr);
++ else
++ ethsw_port_fdb_add_mc(netdev_priv(dev), fdb_info->addr);
++ break;
++ case SWITCHDEV_FDB_DEL_TO_DEVICE:
++ if (is_unicast_ether_addr(fdb_info->addr))
++ ethsw_port_fdb_del_uc(netdev_priv(dev), fdb_info->addr);
++ else
++ ethsw_port_fdb_del_mc(netdev_priv(dev), fdb_info->addr);
+ break;
+ }
++
++ rtnl_unlock();
++ kfree(switchdev_work->fdb_info.addr);
++ kfree(switchdev_work);
++ dev_put(dev);
+}
+
-+static void ethsw_ethtool_get_stats(struct net_device *netdev,
-+ struct ethtool_stats *stats,
-+ u64 *data)
++/* Called under rcu_read_lock() */
++static int port_switchdev_event(struct notifier_block *unused,
++ unsigned long event, void *ptr)
+{
-+ struct ethsw_port_priv *port_priv = netdev_priv(netdev);
-+ u32 i;
-+ int err;
++ struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
++ struct ethsw_switchdev_event_work *switchdev_work;
++ struct switchdev_notifier_fdb_info *fdb_info = ptr;
+
-+ for (i = 0; i < ARRAY_SIZE(ethsw_ethtool_counters); i++) {
-+ err = dpsw_if_get_counter(port_priv->ethsw_priv->mc_io, 0,
-+ port_priv->ethsw_priv->dpsw_handle,
-+ port_priv->port_index,
-+ ethsw_ethtool_counters[i].id,
-+ &data[i]);
-+ if (err)
-+ netdev_err(netdev, "dpsw_if_get_counter[%s] err %d\n",
-+ ethsw_ethtool_counters[i].name, err);
-+ }
-+}
-+
-+static const struct ethtool_ops ethsw_port_ethtool_ops = {
-+ .get_drvinfo = &ethsw_get_drvinfo,
-+ .get_link = &ethtool_op_get_link,
-+ .get_settings = &ethsw_get_settings,
-+ .set_settings = &ethsw_set_settings,
-+ .get_strings = &ethsw_ethtool_get_strings,
-+ .get_ethtool_stats = &ethsw_ethtool_get_stats,
-+ .get_sset_count = &ethsw_ethtool_get_sset_count,
-+};
++ switchdev_work = kzalloc(sizeof(*switchdev_work), GFP_ATOMIC);
++ if (!switchdev_work)
++ return NOTIFY_BAD;
+
-+/* -------------------------------------------------------------------------- */
-+/* ethsw driver functions */
++ INIT_WORK(&switchdev_work->work, ethsw_switchdev_event_work);
++ switchdev_work->dev = dev;
++ switchdev_work->event = event;
+
-+static int ethsw_links_state_update(struct ethsw_dev_priv *priv)
-+{
-+ struct list_head *pos;
-+ struct ethsw_port_priv *port_priv;
-+ int err;
++ switch (event) {
++ case SWITCHDEV_FDB_ADD_TO_DEVICE:
++ case SWITCHDEV_FDB_DEL_TO_DEVICE:
++ memcpy(&switchdev_work->fdb_info, ptr,
++ sizeof(switchdev_work->fdb_info));
++ switchdev_work->fdb_info.addr = kzalloc(ETH_ALEN, GFP_ATOMIC);
++ if (!switchdev_work->fdb_info.addr)
++ goto err_addr_alloc;
+
-+ list_for_each(pos, &priv->port_list) {
-+ port_priv = list_entry(pos, struct ethsw_port_priv,
-+ list);
++ ether_addr_copy((u8 *)switchdev_work->fdb_info.addr,
++ fdb_info->addr);
+
-+ err = _ethsw_port_carrier_state_sync(port_priv->netdev);
-+ if (err)
-+ netdev_err(port_priv->netdev,
-+ "_ethsw_port_carrier_state_sync err %d\n",
-+ err);
++ /* Take a reference on the device to avoid being freed. */
++ dev_hold(dev);
++ break;
++ default:
++ return NOTIFY_DONE;
+ }
+
-+ return 0;
-+}
++ queue_work(ethsw_owq, &switchdev_work->work);
+
-+static irqreturn_t ethsw_irq0_handler(int irq_num, void *arg)
-+{
-+ return IRQ_WAKE_THREAD;
-+}
++ return NOTIFY_DONE;
+
-+static irqreturn_t _ethsw_irq0_handler_thread(int irq_num, void *arg)
-+{
-+ struct device *dev = (struct device *)arg;
-+ struct net_device *netdev = dev_get_drvdata(dev);
-+ struct ethsw_dev_priv *priv = netdev_priv(netdev);
++err_addr_alloc:
++ kfree(switchdev_work);
++ return NOTIFY_BAD;
++}
+
-+ struct fsl_mc_io *io = priv->mc_io;
-+ u16 token = priv->dpsw_handle;
-+ int irq_index = DPSW_IRQ_INDEX_IF;
++static struct notifier_block port_switchdev_nb = {
++ .notifier_call = port_switchdev_event,
++};
+
-+ /* Mask the events and the if_id reserved bits to be cleared on read */
-+ u32 status = DPSW_IRQ_EVENT_LINK_CHANGED | 0xFFFF0000;
++static int ethsw_register_notifier(struct device *dev)
++{
+ int err;
+
-+ err = dpsw_get_irq_status(io, 0, token, irq_index, &status);
-+ if (unlikely(err)) {
-+ netdev_err(netdev, "Can't get irq status (err %d)", err);
-+
-+ err = dpsw_clear_irq_status(io, 0, token, irq_index,
-+ 0xFFFFFFFF);
-+ if (unlikely(err))
-+ netdev_err(netdev, "Can't clear irq status (err %d)",
-+ err);
-+ goto out;
++ err = register_netdevice_notifier(&port_nb);
++ if (err) {
++ dev_err(dev, "Failed to register netdev notifier\n");
++ return err;
+ }
+
-+ if (status & DPSW_IRQ_EVENT_LINK_CHANGED) {
-+ err = ethsw_links_state_update(priv);
-+ if (unlikely(err))
-+ goto out;
++ err = register_switchdev_notifier(&port_switchdev_nb);
++ if (err) {
++ dev_err(dev, "Failed to register switchdev notifier\n");
++ goto err_switchdev_nb;
+ }
+
-+out:
-+ return IRQ_HANDLED;
++ return 0;
++
++err_switchdev_nb:
++ unregister_netdevice_notifier(&port_nb);
++ return err;
+}
+
-+static int ethsw_setup_irqs(struct fsl_mc_device *sw_dev)
++static int ethsw_open(struct ethsw_core *ethsw)
+{
-+ struct device *dev = &sw_dev->dev;
-+ struct net_device *netdev = dev_get_drvdata(dev);
-+ struct ethsw_dev_priv *priv = netdev_priv(netdev);
-+ int err = 0;
-+ struct fsl_mc_device_irq *irq;
-+ const int irq_index = DPSW_IRQ_INDEX_IF;
-+ u32 mask = DPSW_IRQ_EVENT_LINK_CHANGED;
++ struct ethsw_port_priv *port_priv = NULL;
++ int i, err;
+
-+ err = fsl_mc_allocate_irqs(sw_dev);
-+ if (unlikely(err)) {
-+ dev_err(dev, "MC irqs allocation failed\n");
++ err = dpsw_enable(ethsw->mc_io, 0, ethsw->dpsw_handle);
++ if (err) {
++ dev_err(ethsw->dev, "dpsw_enable err %d\n", err);
+ return err;
+ }
+
-+ if (WARN_ON(sw_dev->obj_desc.irq_count != DPSW_MAX_IRQ_NUM)) {
-+ err = -EINVAL;
-+ goto free_irq;
-+ }
-+
-+ err = dpsw_set_irq_enable(priv->mc_io, 0, priv->dpsw_handle,
-+ irq_index, 0);
-+ if (unlikely(err)) {
-+ dev_err(dev, "dpsw_set_irq_enable err %d\n", err);
-+ goto free_irq;
++ for (i = 0; i < ethsw->sw_attr.num_ifs; i++) {
++ port_priv = ethsw->ports[i];
++ err = dev_open(port_priv->netdev);
++ if (err) {
++ netdev_err(port_priv->netdev, "dev_open err %d\n", err);
++ return err;
++ }
+ }
+
-+ irq = sw_dev->irqs[irq_index];
++ return 0;
++}
+
-+ err = devm_request_threaded_irq(dev, irq->msi_desc->irq,
-+ ethsw_irq0_handler,
-+ _ethsw_irq0_handler_thread,
-+ IRQF_NO_SUSPEND | IRQF_ONESHOT,
-+ dev_name(dev), dev);
-+ if (unlikely(err)) {
-+ dev_err(dev, "devm_request_threaded_irq(): %d", err);
-+ goto free_irq;
-+ }
++static int ethsw_stop(struct ethsw_core *ethsw)
++{
++ struct ethsw_port_priv *port_priv = NULL;
++ int i, err;
+
-+ err = dpsw_set_irq_mask(priv->mc_io, 0, priv->dpsw_handle,
-+ irq_index, mask);
-+ if (unlikely(err)) {
-+ dev_err(dev, "dpsw_set_irq_mask(): %d", err);
-+ goto free_devm_irq;
++ for (i = 0; i < ethsw->sw_attr.num_ifs; i++) {
++ port_priv = ethsw->ports[i];
++ dev_close(port_priv->netdev);
+ }
+
-+ err = dpsw_set_irq_enable(priv->mc_io, 0, priv->dpsw_handle,
-+ irq_index, 1);
-+ if (unlikely(err)) {
-+ dev_err(dev, "dpsw_set_irq_enable(): %d", err);
-+ goto free_devm_irq;
++ err = dpsw_disable(ethsw->mc_io, 0, ethsw->dpsw_handle);
++ if (err) {
++ dev_err(ethsw->dev, "dpsw_disable err %d\n", err);
++ return err;
+ }
+
+ return 0;
-+
-+free_devm_irq:
-+ devm_free_irq(dev, irq->msi_desc->irq, dev);
-+free_irq:
-+ fsl_mc_free_irqs(sw_dev);
-+ return err;
-+}
-+
-+static void ethsw_teardown_irqs(struct fsl_mc_device *sw_dev)
-+{
-+ struct device *dev = &sw_dev->dev;
-+ struct net_device *netdev = dev_get_drvdata(dev);
-+ struct ethsw_dev_priv *priv = netdev_priv(netdev);
-+
-+ dpsw_set_irq_enable(priv->mc_io, 0, priv->dpsw_handle,
-+ DPSW_IRQ_INDEX_IF, 0);
-+ devm_free_irq(dev,
-+ sw_dev->irqs[DPSW_IRQ_INDEX_IF]->msi_desc->irq,
-+ dev);
-+ fsl_mc_free_irqs(sw_dev);
+}
+
-+static int __cold
-+ethsw_init(struct fsl_mc_device *sw_dev)
++static int ethsw_init(struct fsl_mc_device *sw_dev)
+{
-+ struct device *dev = &sw_dev->dev;
-+ struct ethsw_dev_priv *priv;
-+ struct net_device *netdev;
-+ int err = 0;
-+ u16 i;
-+ u16 version_major, version_minor;
-+ const struct dpsw_stp_cfg stp_cfg = {
-+ .vlan_id = 1,
-+ .state = DPSW_STP_STATE_FORWARDING,
-+ };
-+
-+ netdev = dev_get_drvdata(dev);
-+ priv = netdev_priv(netdev);
++ struct device *dev = &sw_dev->dev;
++ struct ethsw_core *ethsw = dev_get_drvdata(dev);
++ u16 version_major, version_minor, i;
++ struct dpsw_stp_cfg stp_cfg;
++ int err;
+
-+ priv->dev_id = sw_dev->obj_desc.id;
++ ethsw->dev_id = sw_dev->obj_desc.id;
+
-+ err = dpsw_open(priv->mc_io, 0, priv->dev_id, &priv->dpsw_handle);
++ err = dpsw_open(ethsw->mc_io, 0, ethsw->dev_id, &ethsw->dpsw_handle);
+ if (err) {
+ dev_err(dev, "dpsw_open err %d\n", err);
-+ goto err_exit;
-+ }
-+ if (!priv->dpsw_handle) {
-+ dev_err(dev, "dpsw_open returned null handle but no error\n");
-+ err = -EFAULT;
-+ goto err_exit;
++ return err;
+ }
+
-+ err = dpsw_get_attributes(priv->mc_io, 0, priv->dpsw_handle,
-+ &priv->sw_attr);
++ err = dpsw_get_attributes(ethsw->mc_io, 0, ethsw->dpsw_handle,
++ &ethsw->sw_attr);
+ if (err) {
+ dev_err(dev, "dpsw_get_attributes err %d\n", err);
+ goto err_close;
+ }
+
-+ err = dpsw_get_api_version(priv->mc_io, 0,
++ err = dpsw_get_api_version(ethsw->mc_io, 0,
+ &version_major,
+ &version_minor);
+ if (err) {
@@ -16868,21 +16182,24 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ goto err_close;
+ }
+
-+ err = dpsw_reset(priv->mc_io, 0, priv->dpsw_handle);
++ err = dpsw_reset(ethsw->mc_io, 0, ethsw->dpsw_handle);
+ if (err) {
+ dev_err(dev, "dpsw_reset err %d\n", err);
+ goto err_close;
+ }
+
-+ err = dpsw_fdb_set_learning_mode(priv->mc_io, 0, priv->dpsw_handle, 0,
++ err = dpsw_fdb_set_learning_mode(ethsw->mc_io, 0, ethsw->dpsw_handle, 0,
+ DPSW_FDB_LEARNING_MODE_HW);
+ if (err) {
+ dev_err(dev, "dpsw_fdb_set_learning_mode err %d\n", err);
+ goto err_close;
+ }
+
-+ for (i = 0; i < priv->sw_attr.num_ifs; i++) {
-+ err = dpsw_if_set_stp(priv->mc_io, 0, priv->dpsw_handle, i,
++ stp_cfg.vlan_id = DEFAULT_VLAN_ID;
++ stp_cfg.state = DPSW_STP_STATE_FORWARDING;
++
++ for (i = 0; i < ethsw->sw_attr.num_ifs; i++) {
++ err = dpsw_if_set_stp(ethsw->mc_io, 0, ethsw->dpsw_handle, i,
+ &stp_cfg);
+ if (err) {
+ dev_err(dev, "dpsw_if_set_stp err %d for port %d\n",
@@ -16890,8 +16207,8 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ goto err_close;
+ }
+
-+ err = dpsw_if_set_broadcast(priv->mc_io, 0,
-+ priv->dpsw_handle, i, 1);
++ err = dpsw_if_set_broadcast(ethsw->mc_io, 0,
++ ethsw->dpsw_handle, i, 1);
+ if (err) {
+ dev_err(dev,
+ "dpsw_if_set_broadcast err %d for port %d\n",
@@ -16900,225 +16217,253 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ }
+ }
+
++ ethsw_owq = alloc_ordered_workqueue("%s_ordered", WQ_MEM_RECLAIM,
++ "ethsw");
++ if (!ethsw_owq) {
++ err = -ENOMEM;
++ goto err_close;
++ }
++
++ err = ethsw_register_notifier(dev);
++ if (err)
++ goto err_destroy_ordered_workqueue;
++
+ return 0;
+
++err_destroy_ordered_workqueue:
++ destroy_workqueue(ethsw_owq);
++
+err_close:
-+ dpsw_close(priv->mc_io, 0, priv->dpsw_handle);
-+err_exit:
++ dpsw_close(ethsw->mc_io, 0, ethsw->dpsw_handle);
+ return err;
+}
+
-+static int __cold
-+ethsw_takedown(struct fsl_mc_device *sw_dev)
++static int ethsw_port_init(struct ethsw_port_priv *port_priv, u16 port)
+{
-+ struct device *dev = &sw_dev->dev;
-+ struct net_device *netdev;
-+ struct ethsw_dev_priv *priv;
-+ int err;
++ const char def_mcast[ETH_ALEN] = {0x01, 0x00, 0x5e, 0x00, 0x00, 0x01};
++ struct net_device *netdev = port_priv->netdev;
++ struct ethsw_core *ethsw = port_priv->ethsw_data;
++ struct dpsw_vlan_if_cfg vcfg;
++ int err;
+
-+ netdev = dev_get_drvdata(dev);
-+ priv = netdev_priv(netdev);
++ /* Switch starts with all ports configured to VLAN 1. Need to
++ * remove this setting to allow configuration at bridge join
++ */
++ vcfg.num_ifs = 1;
++ vcfg.if_id[0] = port_priv->idx;
++
++ err = dpsw_vlan_remove_if_untagged(ethsw->mc_io, 0, ethsw->dpsw_handle,
++ DEFAULT_VLAN_ID, &vcfg);
++ if (err) {
++ netdev_err(netdev, "dpsw_vlan_remove_if_untagged err %d\n",
++ err);
++ return err;
++ }
+
-+ err = dpsw_close(priv->mc_io, 0, priv->dpsw_handle);
++ err = ethsw_port_set_pvid(port_priv, 0);
+ if (err)
-+ dev_warn(dev, "dpsw_close err %d\n", err);
++ return err;
+
-+ return 0;
++ err = dpsw_vlan_remove_if(ethsw->mc_io, 0, ethsw->dpsw_handle,
++ DEFAULT_VLAN_ID, &vcfg);
++ if (err) {
++ netdev_err(netdev, "dpsw_vlan_remove_if err %d\n", err);
++ return err;
++ }
++
++ err = ethsw_port_fdb_add_mc(port_priv, def_mcast);
++
++ return err;
+}
+
-+static int __cold
-+ethsw_remove(struct fsl_mc_device *sw_dev)
++static void ethsw_unregister_notifier(struct device *dev)
+{
-+ struct device *dev;
-+ struct net_device *netdev;
-+ struct ethsw_dev_priv *priv;
-+ struct ethsw_port_priv *port_priv;
-+ struct list_head *pos;
++ int err;
++
++ err = unregister_switchdev_notifier(&port_switchdev_nb);
++ if (err)
++ dev_err(dev,
++ "Failed to unregister switchdev notifier (%d)\n", err);
++
++ err = unregister_netdevice_notifier(&port_nb);
++ if (err)
++ dev_err(dev,
++ "Failed to unregister netdev notifier (%d)\n", err);
++}
++
++static void ethsw_takedown(struct fsl_mc_device *sw_dev)
++{
++ struct device *dev = &sw_dev->dev;
++ struct ethsw_core *ethsw = dev_get_drvdata(dev);
++ int err;
++
++ ethsw_unregister_notifier(dev);
++
++ err = dpsw_close(ethsw->mc_io, 0, ethsw->dpsw_handle);
++ if (err)
++ dev_warn(dev, "dpsw_close err %d\n", err);
++}
++
++static int ethsw_remove(struct fsl_mc_device *sw_dev)
++{
++ struct ethsw_port_priv *port_priv;
++ struct ethsw_core *ethsw;
++ struct device *dev;
++ int i;
+
+ dev = &sw_dev->dev;
-+ netdev = dev_get_drvdata(dev);
-+ priv = netdev_priv(netdev);
++ ethsw = dev_get_drvdata(dev);
+
-+ list_for_each(pos, &priv->port_list) {
-+ port_priv = list_entry(pos, struct ethsw_port_priv, list);
++ ethsw_teardown_irqs(sw_dev);
+
-+ rtnl_lock();
-+ netdev_upper_dev_unlink(port_priv->netdev, netdev);
-+ rtnl_unlock();
++ destroy_workqueue(ethsw_owq);
+
++ rtnl_lock();
++ ethsw_stop(ethsw);
++ rtnl_unlock();
++
++ for (i = 0; i < ethsw->sw_attr.num_ifs; i++) {
++ port_priv = ethsw->ports[i];
+ unregister_netdev(port_priv->netdev);
+ free_netdev(port_priv->netdev);
+ }
-+
-+ ethsw_teardown_irqs(sw_dev);
-+
-+ unregister_netdev(netdev);
++ kfree(ethsw->ports);
+
+ ethsw_takedown(sw_dev);
-+ fsl_mc_portal_free(priv->mc_io);
++ fsl_mc_portal_free(ethsw->mc_io);
++
++ kfree(ethsw);
+
+ dev_set_drvdata(dev, NULL);
-+ free_netdev(netdev);
+
+ return 0;
+}
+
-+static int __cold
-+ethsw_probe(struct fsl_mc_device *sw_dev)
++static int ethsw_probe_port(struct ethsw_core *ethsw, u16 port_idx)
+{
-+ struct device *dev;
-+ struct net_device *netdev = NULL;
-+ struct ethsw_dev_priv *priv = NULL;
-+ int err = 0;
-+ u16 i;
-+ const char def_mcast[ETH_ALEN] = {
-+ 0x01, 0x00, 0x5e, 0x00, 0x00, 0x01,
-+ };
-+ char port_name[IFNAMSIZ];
-+
-+ dev = &sw_dev->dev;
++ struct ethsw_port_priv *port_priv;
++ struct device *dev = ethsw->dev;
++ struct net_device *port_netdev;
++ int err;
+
-+ /* register switch device, it's for management only - no I/O */
-+ netdev = alloc_etherdev(sizeof(*priv));
-+ if (!netdev) {
++ port_netdev = alloc_etherdev(sizeof(struct ethsw_port_priv));
++ if (!port_netdev) {
+ dev_err(dev, "alloc_etherdev error\n");
+ return -ENOMEM;
+ }
-+ netdev->netdev_ops = &ethsw_ops;
+
-+ SET_NETDEV_DEV(netdev, dev);
-+ dev_set_drvdata(dev, netdev);
++ port_priv = netdev_priv(port_netdev);
++ port_priv->netdev = port_netdev;
++ port_priv->ethsw_data = ethsw;
+
-+ priv = netdev_priv(netdev);
-+ priv->netdev = netdev;
++ port_priv->idx = port_idx;
++ port_priv->stp_state = BR_STATE_FORWARDING;
+
-+ err = fsl_mc_portal_allocate(sw_dev, 0, &priv->mc_io);
-+ if (err) {
-+ dev_err(dev, "fsl_mc_portal_allocate err %d\n", err);
-+ goto err_free_netdev;
-+ }
-+ if (!priv->mc_io) {
-+ dev_err(dev, "fsl_mc_portal_allocate returned null handle but no error\n");
-+ err = -EFAULT;
-+ goto err_free_netdev;
-+ }
++ /* Flooding is implicitly enabled */
++ port_priv->flood = true;
+
-+ err = ethsw_init(sw_dev);
-+ if (err) {
-+ dev_err(dev, "switch init err %d\n", err);
-+ goto err_free_cmdport;
-+ }
++ SET_NETDEV_DEV(port_netdev, dev);
++ port_netdev->netdev_ops = &ethsw_port_ops;
++ port_netdev->ethtool_ops = &ethsw_port_ethtool_ops;
++ port_netdev->switchdev_ops = &ethsw_port_switchdev_ops;
+
-+ netdev->flags = netdev->flags | IFF_PROMISC | IFF_MASTER;
++ /* Set MTU limits */
++ port_netdev->min_mtu = ETH_MIN_MTU;
++ port_netdev->max_mtu = ETHSW_MAX_FRAME_LENGTH;
+
-+ /* TODO: should we hold rtnl_lock here? We can't register_netdev under
-+ * lock
-+ */
-+ dev_alloc_name(netdev, "sw%d");
-+ err = register_netdev(netdev);
++ err = register_netdev(port_netdev);
+ if (err < 0) {
+ dev_err(dev, "register_netdev error %d\n", err);
-+ goto err_takedown;
-+ }
-+ if (err)
-+ dev_info(dev, "register_netdev res %d\n", err);
-+
-+ /* VLAN 1 is implicitly configured on the switch */
-+ priv->vlans[1] = ETHSW_VLAN_MEMBER;
-+ /* Flooding, learning are implicitly enabled */
-+ priv->learning = true;
-+ priv->flood = true;
-+
-+ /* register switch ports */
-+ snprintf(port_name, IFNAMSIZ, "%sp%%d", netdev->name);
-+
-+ INIT_LIST_HEAD(&priv->port_list);
-+ for (i = 0; i < priv->sw_attr.num_ifs; i++) {
-+ struct net_device *port_netdev;
-+ struct ethsw_port_priv *port_priv;
-+
-+ port_netdev = alloc_etherdev(sizeof(struct ethsw_port_priv));
-+ if (!port_netdev) {
-+ dev_err(dev, "alloc_etherdev error\n");
-+ goto err_takedown;
++ free_netdev(port_netdev);
++ return err;
+ }
+
-+ port_priv = netdev_priv(port_netdev);
-+ port_priv->netdev = port_netdev;
-+ port_priv->ethsw_priv = priv;
++ ethsw->ports[port_idx] = port_priv;
+
-+ port_priv->port_index = i;
-+ port_priv->stp_state = BR_STATE_FORWARDING;
-+ /* VLAN 1 is configured by default on all switch ports */
-+ port_priv->vlans[1] = ETHSW_VLAN_MEMBER | ETHSW_VLAN_UNTAGGED |
-+ ETHSW_VLAN_PVID;
++ return ethsw_port_init(port_priv, port_idx);
++}
+
-+ SET_NETDEV_DEV(port_netdev, dev);
-+ port_netdev->netdev_ops = &ethsw_port_ops;
-+ port_netdev->ethtool_ops = &ethsw_port_ethtool_ops;
++static int ethsw_probe(struct fsl_mc_device *sw_dev)
++{
++ struct device *dev = &sw_dev->dev;
++ struct ethsw_core *ethsw;
++ int i, err;
+
-+ port_netdev->flags = port_netdev->flags |
-+ IFF_PROMISC | IFF_SLAVE;
++ /* Allocate switch core*/
++ ethsw = kzalloc(sizeof(*ethsw), GFP_KERNEL);
+
-+ dev_alloc_name(port_netdev, port_name);
-+ err = register_netdev(port_netdev);
-+ if (err < 0) {
-+ dev_err(dev, "register_netdev error %d\n", err);
-+ free_netdev(port_netdev);
-+ goto err_takedown;
-+ }
++ if (!ethsw)
++ return -ENOMEM;
+
-+ rtnl_lock();
++ ethsw->dev = dev;
++ dev_set_drvdata(dev, ethsw);
+
-+ err = netdev_master_upper_dev_link(port_netdev, netdev,
-+ NULL, NULL);
-+ if (err) {
-+ dev_err(dev, "netdev_master_upper_dev_link error %d\n",
-+ err);
-+ unregister_netdev(port_netdev);
-+ free_netdev(port_netdev);
-+ rtnl_unlock();
-+ goto err_takedown;
-+ }
++ err = fsl_mc_portal_allocate(sw_dev, 0, &ethsw->mc_io);
++ if (err) {
++ dev_err(dev, "fsl_mc_portal_allocate err %d\n", err);
++ goto err_free_drvdata;
++ }
+
-+ rtmsg_ifinfo(RTM_NEWLINK, port_netdev, IFF_SLAVE, GFP_KERNEL);
++ err = ethsw_init(sw_dev);
++ if (err)
++ goto err_free_cmdport;
+
-+ rtnl_unlock();
++ /* DEFAULT_VLAN_ID is implicitly configured on the switch */
++ ethsw->vlans[DEFAULT_VLAN_ID] = ETHSW_VLAN_MEMBER;
+
-+ list_add(&port_priv->list, &priv->port_list);
++ /* Learning is implicitly enabled */
++ ethsw->learning = true;
+
-+ /* TODO: implmenet set_rm_mode instead of this */
-+ err = ethsw_port_fdb_add_mc(port_netdev, def_mcast);
++ ethsw->ports = kcalloc(ethsw->sw_attr.num_ifs, sizeof(*ethsw->ports),
++ GFP_KERNEL);
++ if (!(ethsw->ports)) {
++ err = -ENOMEM;
++ goto err_takedown;
++ }
++
++ for (i = 0; i < ethsw->sw_attr.num_ifs; i++) {
++ err = ethsw_probe_port(ethsw, i);
+ if (err)
-+ dev_warn(&netdev->dev,
-+ "ethsw_port_fdb_add_mc err %d\n", err);
++ goto err_free_ports;
+ }
+
-+ /* the switch starts up enabled */
++ /* Switch starts up enabled */
+ rtnl_lock();
-+ err = dev_open(netdev);
++ err = ethsw_open(ethsw);
+ rtnl_unlock();
+ if (err)
-+ dev_warn(dev, "dev_open err %d\n", err);
++ goto err_free_ports;
+
-+ /* setup irqs */
++ /* Setup IRQs */
+ err = ethsw_setup_irqs(sw_dev);
-+ if (unlikely(err)) {
-+ dev_warn(dev, "ethsw_setup_irqs err %d\n", err);
-+ goto err_takedown;
-+ }
++ if (err)
++ goto err_stop;
+
-+ dev_info(&netdev->dev,
-+ "probed %d port switch\n", priv->sw_attr.num_ifs);
++ dev_info(dev, "probed %d port switch\n", ethsw->sw_attr.num_ifs);
+ return 0;
+
++err_stop:
++ rtnl_lock();
++ ethsw_stop(ethsw);
++ rtnl_unlock();
++
++err_free_ports:
++ /* Cleanup registered ports only */
++ for (i--; i >= 0; i--) {
++ unregister_netdev(ethsw->ports[i]->netdev);
++ free_netdev(ethsw->ports[i]->netdev);
++ }
++ kfree(ethsw->ports);
++
+err_takedown:
-+ ethsw_remove(sw_dev);
++ ethsw_takedown(sw_dev);
++
+err_free_cmdport:
-+ fsl_mc_portal_free(priv->mc_io);
-+err_free_netdev:
++ fsl_mc_portal_free(ethsw->mc_io);
++
++err_free_drvdata:
++ kfree(ethsw);
+ dev_set_drvdata(dev, NULL);
-+ free_netdev(netdev);
+
+ return err;
+}
@@ -17128,23 +16473,117 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ .vendor = FSL_MC_VENDOR_FREESCALE,
+ .obj_type = "dpsw",
+ },
-+ {}
++ { .vendor = 0x0 }
+};
++MODULE_DEVICE_TABLE(fslmc, ethsw_match_id_table);
+
+static struct fsl_mc_driver eth_sw_drv = {
+ .driver = {
-+ .name = KBUILD_MODNAME,
-+ .owner = THIS_MODULE,
++ .name = KBUILD_MODNAME,
++ .owner = THIS_MODULE,
+ },
-+ .probe = ethsw_probe,
-+ .remove = ethsw_remove,
-+ .match_id_table = ethsw_match_id_table,
++ .probe = ethsw_probe,
++ .remove = ethsw_remove,
++ .match_id_table = ethsw_match_id_table
+};
+
+module_fsl_mc_driver(eth_sw_drv);
+
-+MODULE_LICENSE("GPL");
-+MODULE_DESCRIPTION("DPAA2 Ethernet Switch Driver (prototype)");
++MODULE_LICENSE("Dual BSD/GPL");
++MODULE_DESCRIPTION("DPAA2 Ethernet Switch Driver");
+--- /dev/null
++++ b/drivers/staging/fsl-dpaa2/ethsw/ethsw.h
+@@ -0,0 +1,90 @@
++/* Copyright 2014-2017 Freescale Semiconductor Inc.
++ * Copyright 2017 NXP
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of the above-listed copyright holders nor the
++ * names of any contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
++ * POSSIBILITY OF SUCH DAMAGE.
++ */
++
++#ifndef __ETHSW_H
++#define __ETHSW_H
++
++#include <linux/netdevice.h>
++#include <linux/etherdevice.h>
++#include <linux/rtnetlink.h>
++#include <linux/if_vlan.h>
++#include <uapi/linux/if_bridge.h>
++#include <net/switchdev.h>
++#include <linux/if_bridge.h>
++
++#include "dpsw.h"
++
++/* Number of IRQs supported */
++#define DPSW_IRQ_NUM 2
++
++#define ETHSW_VLAN_MEMBER 1
++#define ETHSW_VLAN_UNTAGGED 2
++#define ETHSW_VLAN_PVID 4
++#define ETHSW_VLAN_GLOBAL 8
++
++/* Maximum Frame Length supported by HW (currently 10k) */
++#define DPAA2_MFL (10 * 1024)
++#define ETHSW_MAX_FRAME_LENGTH (DPAA2_MFL - VLAN_ETH_HLEN - ETH_FCS_LEN)
++#define ETHSW_L2_MAX_FRM(mtu) ((mtu) + VLAN_ETH_HLEN + ETH_FCS_LEN)
++
++extern const struct ethtool_ops ethsw_port_ethtool_ops;
++
++struct ethsw_core;
++
++/* Per port private data */
++struct ethsw_port_priv {
++ struct net_device *netdev;
++ u16 idx;
++ struct ethsw_core *ethsw_data;
++ u8 link_state;
++ u8 stp_state;
++ bool flood;
++
++ u8 vlans[VLAN_VID_MASK + 1];
++ u16 pvid;
++};
++
++/* Switch data */
++struct ethsw_core {
++ struct device *dev;
++ struct fsl_mc_io *mc_io;
++ u16 dpsw_handle;
++ struct dpsw_attr sw_attr;
++ int dev_id;
++ struct ethsw_port_priv **ports;
++
++ u8 vlans[VLAN_VID_MASK + 1];
++ bool learning;
++};
++
++#endif /* __ETHSW_H */
--- /dev/null
+++ b/drivers/staging/fsl-dpaa2/evb/Kconfig
@@ -0,0 +1,7 @@
@@ -17452,7 +16891,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+#endif /* _FSL_DPDMUX_CMD_H */
--- /dev/null
+++ b/drivers/staging/fsl-dpaa2/evb/dpdmux.c
-@@ -0,0 +1,1112 @@
+@@ -0,0 +1,1111 @@
+/* Copyright 2013-2016 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
@@ -17484,8 +16923,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
-+#include "../../fsl-mc/include/mc-sys.h"
-+#include "../../fsl-mc/include/mc-cmd.h"
++#include <linux/fsl/mc.h>
+#include "dpdmux.h"
+#include "dpdmux-cmd.h"
+
@@ -17511,7 +16949,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ int dpdmux_id,
+ u16 *token)
+{
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+ struct dpdmux_cmd_open *cmd_params;
+ int err;
+
@@ -17548,7 +16986,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ u32 cmd_flags,
+ u16 token)
+{
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_CLOSE,
@@ -17587,7 +17025,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ const struct dpdmux_cfg *cfg,
+ u32 *obj_id)
+{
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+ struct dpdmux_cmd_create *cmd_params;
+ int err;
+
@@ -17636,7 +17074,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ u32 cmd_flags,
+ u32 object_id)
+{
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+ struct dpdmux_cmd_destroy *cmd_params;
+
+ /* prepare command */
@@ -17662,7 +17100,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ u32 cmd_flags,
+ u16 token)
+{
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_ENABLE,
@@ -17685,7 +17123,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ u32 cmd_flags,
+ u16 token)
+{
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_DISABLE,
@@ -17710,7 +17148,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ u16 token,
+ int *en)
+{
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+ struct dpdmux_rsp_is_enabled *rsp_params;
+ int err;
+
@@ -17743,7 +17181,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ u32 cmd_flags,
+ u16 token)
+{
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_RESET,
@@ -17775,7 +17213,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ u8 irq_index,
+ u8 en)
+{
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+ struct dpdmux_cmd_set_irq_enable *cmd_params;
+
+ /* prepare command */
@@ -17806,7 +17244,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ u8 irq_index,
+ u8 *en)
+{
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+ struct dpdmux_cmd_get_irq_enable *cmd_params;
+ struct dpdmux_rsp_get_irq_enable *rsp_params;
+ int err;
@@ -17852,7 +17290,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ u8 irq_index,
+ u32 mask)
+{
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+ struct dpdmux_cmd_set_irq_mask *cmd_params;
+
+ /* prepare command */
@@ -17886,7 +17324,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ u8 irq_index,
+ u32 *mask)
+{
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+ struct dpdmux_cmd_get_irq_mask *cmd_params;
+ struct dpdmux_rsp_get_irq_mask *rsp_params;
+ int err;
@@ -17928,7 +17366,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ u8 irq_index,
+ u32 *status)
+{
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+ struct dpdmux_cmd_get_irq_status *cmd_params;
+ struct dpdmux_rsp_get_irq_status *rsp_params;
+ int err;
@@ -17971,7 +17409,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ u8 irq_index,
+ u32 status)
+{
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+ struct dpdmux_cmd_clear_irq_status *cmd_params;
+
+ /* prepare command */
@@ -18000,7 +17438,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ u16 token,
+ struct dpdmux_attr *attr)
+{
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+ struct dpdmux_rsp_get_attr *rsp_params;
+ int err;
+
@@ -18041,7 +17479,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ u16 if_id)
+{
+ struct dpdmux_cmd_if *cmd_params;
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_IF_ENABLE,
@@ -18069,7 +17507,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ u16 if_id)
+{
+ struct dpdmux_cmd_if *cmd_params;
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_IF_DISABLE,
@@ -18101,7 +17539,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ u16 token,
+ u16 max_frame_length)
+{
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+ struct dpdmux_cmd_set_max_frame_length *cmd_params;
+
+ /* prepare command */
@@ -18127,7 +17565,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ u32 cmd_flags,
+ u16 token)
+{
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_UL_RESET_COUNTERS,
@@ -18161,7 +17599,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ u16 if_id,
+ const struct dpdmux_accepted_frames *cfg)
+{
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+ struct dpdmux_cmd_if_set_accepted_frames *cmd_params;
+
+ /* prepare command */
@@ -18195,7 +17633,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ u16 if_id,
+ struct dpdmux_if_attr *attr)
+{
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+ struct dpdmux_cmd_if *cmd_params;
+ struct dpdmux_rsp_if_get_attr *rsp_params;
+ int err;
@@ -18242,7 +17680,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ u16 if_id,
+ const struct dpdmux_l2_rule *rule)
+{
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+ struct dpdmux_cmd_if_l2_rule *cmd_params;
+
+ /* prepare command */
@@ -18282,7 +17720,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ u16 if_id,
+ const struct dpdmux_l2_rule *rule)
+{
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+ struct dpdmux_cmd_if_l2_rule *cmd_params;
+
+ /* prepare command */
@@ -18321,7 +17759,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ enum dpdmux_counter_type counter_type,
+ u64 *counter)
+{
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+ struct dpdmux_cmd_if_get_counter *cmd_params;
+ struct dpdmux_rsp_if_get_counter *rsp_params;
+ int err;
@@ -18362,7 +17800,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ u16 if_id,
+ struct dpdmux_link_cfg *cfg)
+{
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+ struct dpdmux_cmd_if_set_link_cfg *cmd_params;
+
+ /* prepare command */
@@ -18394,7 +17832,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ u16 if_id,
+ struct dpdmux_link_state *state)
+{
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+ struct dpdmux_cmd_if_get_link_state *cmd_params;
+ struct dpdmux_rsp_if_get_link_state *rsp_params;
+ int err;
@@ -18445,7 +17883,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ u64 key_cfg_iova)
+{
+ struct dpdmux_set_custom_key *cmd_params;
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_SET_CUSTOM_KEY,
@@ -18481,7 +17919,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ struct dpdmux_cls_action *action)
+{
+ struct dpdmux_cmd_add_custom_cls_entry *cmd_params;
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_ADD_CUSTOM_CLS_ENTRY,
@@ -18518,7 +17956,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ struct dpdmux_rule_cfg *rule)
+{
+ struct dpdmux_cmd_remove_custom_cls_entry *cmd_params;
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_REMOVE_CUSTOM_CLS_ENTRY,
@@ -18547,7 +17985,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ u16 *major_ver,
+ u16 *minor_ver)
+{
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+ struct dpdmux_rsp_get_api_version *rsp_params;
+ int err;
+
@@ -19023,7 +18461,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+#endif /* __FSL_DPDMUX_H */
--- /dev/null
+++ b/drivers/staging/fsl-dpaa2/evb/evb.c
-@@ -0,0 +1,1350 @@
+@@ -0,0 +1,1354 @@
+/* Copyright 2015 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
@@ -19064,7 +18502,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+#include <uapi/linux/if_bridge.h>
+#include <net/netlink.h>
+
-+#include "../../fsl-mc/include/mc.h"
++#include <linux/fsl/mc.h>
+
+#include "dpdmux.h"
+#include "dpdmux-cmd.h"
@@ -19079,9 +18517,9 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+#define DPDMUX_MAX_IRQ_NUM 2
+
+/* MAX FRAME LENGTH (currently 10k) */
-+#define EVB_MAX_FRAME_LENGTH (10 * 1024)
-+/* MIN FRAME LENGTH (64 bytes + 4 bytes CRC) */
-+#define EVB_MIN_FRAME_LENGTH 68
++#define EVB_MAX_FRAME_LENGTH (10 * 1024)
++#define EVB_MAX_MTU (EVB_MAX_FRAME_LENGTH - VLAN_ETH_HLEN)
++#define EVB_MIN_MTU 68
+
+struct evb_port_priv {
+ struct net_device *netdev;
@@ -19482,16 +18920,10 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ if (port_priv->port_index > 0)
+ return -EPERM;
+
-+ if (mtu < EVB_MIN_FRAME_LENGTH || mtu > EVB_MAX_FRAME_LENGTH) {
-+ netdev_err(netdev, "Invalid MTU %d. Valid range is: %d..%d\n",
-+ mtu, EVB_MIN_FRAME_LENGTH, EVB_MAX_FRAME_LENGTH);
-+ return -EINVAL;
-+ }
-+
+ err = dpdmux_set_max_frame_length(evb_priv->mc_io,
+ 0,
+ evb_priv->mux_handle,
-+ (uint16_t)mtu);
++ (uint16_t)(mtu + VLAN_ETH_HLEN));
+
+ if (unlikely(err)) {
+ netdev_err(netdev, "dpdmux_ul_set_max_frame_length err %d\n",
@@ -19872,6 +19304,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+
+error:
+ netdev_err(netdev, "dpdmux_if_get_counter err %d\n", err);
++ return storage;
+}
+
+static const struct net_device_ops evb_port_ops = {
@@ -20234,11 +19667,16 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+
+ priv = netdev_priv(netdev);
+
-+ err = fsl_mc_portal_allocate(evb_dev, 0, &priv->mc_io);
-+ if (unlikely(err)) {
-+ dev_err(dev, "fsl_mc_portal_allocate err %d\n", err);
++ err = fsl_mc_portal_allocate(evb_dev, FSL_MC_IO_ATOMIC_CONTEXT_PORTAL,
++ &priv->mc_io);
++ if (err) {
++ if (err == -ENXIO)
++ err = -EPROBE_DEFER;
++ else
++ dev_err(dev, "fsl_mc_portal_allocate err %d\n", err);
+ goto err_free_netdev;
+ }
++
+ if (!priv->mc_io) {
+ dev_err(dev, "fsl_mc_portal_allocate returned null handle but no error\n");
+ err = -EFAULT;
@@ -20315,6 +19753,10 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+
+ list_add(&port_priv->list, &priv->port_list);
+ } else {
++ /* Set MTU limits only on uplink */
++ port_netdev->min_mtu = EVB_MIN_MTU;
++ port_netdev->max_mtu = EVB_MAX_MTU;
++
+ err = register_netdev(netdev);
+
+ if (err < 0) {
@@ -20590,7 +20032,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+#endif /* _FSL_DPMAC_CMD_H */
--- /dev/null
+++ b/drivers/staging/fsl-dpaa2/mac/dpmac.c
-@@ -0,0 +1,620 @@
+@@ -0,0 +1,619 @@
+/* Copyright 2013-2016 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
@@ -20622,8 +20064,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
-+#include "../../fsl-mc/include/mc-sys.h"
-+#include "../../fsl-mc/include/mc-cmd.h"
++#include <linux/fsl/mc.h>
+#include "dpmac.h"
+#include "dpmac-cmd.h"
+
@@ -20650,7 +20091,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ u16 *token)
+{
+ struct dpmac_cmd_open *cmd_params;
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
@@ -20686,7 +20127,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ u32 cmd_flags,
+ u16 token)
+{
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_CLOSE, cmd_flags,
@@ -20722,7 +20163,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ u32 *obj_id)
+{
+ struct dpmac_cmd_create *cmd_params;
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
@@ -20764,7 +20205,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ u32 object_id)
+{
+ struct dpmac_cmd_destroy *cmd_params;
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_DESTROY,
@@ -20799,7 +20240,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ u8 en)
+{
+ struct dpmac_cmd_set_irq_enable *cmd_params;
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_SET_IRQ_ENABLE,
@@ -20831,7 +20272,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+{
+ struct dpmac_cmd_get_irq_enable *cmd_params;
+ struct dpmac_rsp_get_irq_enable *rsp_params;
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
@@ -20876,7 +20317,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ u32 mask)
+{
+ struct dpmac_cmd_set_irq_mask *cmd_params;
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_SET_IRQ_MASK,
@@ -20911,7 +20352,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+{
+ struct dpmac_cmd_get_irq_mask *cmd_params;
+ struct dpmac_rsp_get_irq_mask *rsp_params;
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
@@ -20954,7 +20395,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+{
+ struct dpmac_cmd_get_irq_status *cmd_params;
+ struct dpmac_rsp_get_irq_status *rsp_params;
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
@@ -20997,7 +20438,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ u32 status)
+{
+ struct dpmac_cmd_clear_irq_status *cmd_params;
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_CLEAR_IRQ_STATUS,
@@ -21027,7 +20468,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ struct dpmac_attr *attr)
+{
+ struct dpmac_rsp_get_attributes *rsp_params;
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
@@ -21065,7 +20506,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ struct dpmac_link_cfg *cfg)
+{
+ struct dpmac_rsp_get_link_cfg *rsp_params;
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+ int err = 0;
+
+ /* prepare command */
@@ -21100,7 +20541,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ struct dpmac_link_state *link_state)
+{
+ struct dpmac_cmd_set_link_state *cmd_params;
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_SET_LINK_STATE,
@@ -21133,7 +20574,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+{
+ struct dpmac_cmd_get_counter *dpmac_cmd;
+ struct dpmac_rsp_get_counter *dpmac_rsp;
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+ int err = 0;
+
+ /* prepare command */
@@ -21161,7 +20602,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ const u8 addr[6])
+{
+ struct dpmac_cmd_set_port_mac_addr *dpmac_cmd;
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_SET_PORT_MAC_ADDR,
@@ -21194,7 +20635,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ u16 *minor_ver)
+{
+ struct dpmac_rsp_get_api_version *rsp_params;
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+ int err;
+
+ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_GET_API_VERSION,
@@ -21558,7 +20999,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+#endif /* __FSL_DPMAC_H */
--- /dev/null
+++ b/drivers/staging/fsl-dpaa2/mac/mac.c
-@@ -0,0 +1,670 @@
+@@ -0,0 +1,673 @@
+/* Copyright 2015 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
@@ -21607,8 +21048,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+#include <linux/phy.h>
+#include <linux/phy_fixed.h>
+
-+#include "../../fsl-mc/include/mc.h"
-+#include "../../fsl-mc/include/mc-sys.h"
++#include <linux/fsl/mc.h>
+
+#include "dpmac.h"
+#include "dpmac-cmd.h"
@@ -21713,16 +21153,18 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ return NETDEV_TX_OK;
+}
+
-+static int dpaa2_mac_get_settings(struct net_device *netdev,
-+ struct ethtool_cmd *cmd)
++static int dpaa2_mac_get_link_ksettings(struct net_device *netdev,
++ struct ethtool_link_ksettings *ks)
+{
-+ return phy_ethtool_gset(netdev->phydev, cmd);
++ phy_ethtool_ksettings_get(netdev->phydev, ks);
++
++ return 0;
+}
+
-+static int dpaa2_mac_set_settings(struct net_device *netdev,
-+ struct ethtool_cmd *cmd)
++static int dpaa2_mac_set_link_ksettings(struct net_device *netdev,
++ const struct ethtool_link_ksettings *ks)
+{
-+ return phy_ethtool_sset(netdev->phydev, cmd);
++ return phy_ethtool_ksettings_set(netdev->phydev, ks);
+}
+
+static struct rtnl_link_stats64 *dpaa2_mac_get_stats(struct net_device *netdev,
@@ -21881,8 +21323,8 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+};
+
+static const struct ethtool_ops dpaa2_mac_ethtool_ops = {
-+ .get_settings = &dpaa2_mac_get_settings,
-+ .set_settings = &dpaa2_mac_set_settings,
++ .get_link_ksettings = &dpaa2_mac_get_link_ksettings,
++ .set_link_ksettings = &dpaa2_mac_set_link_ksettings,
+ .get_strings = &dpaa2_mac_get_strings,
+ .get_ethtool_stats = &dpaa2_mac_get_ethtool_stats,
+ .get_sset_count = &dpaa2_mac_get_sset_count,
@@ -22051,10 +21493,12 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+
+ dev_set_drvdata(dev, priv);
+
-+ err = fsl_mc_portal_allocate(mc_dev, 0, &mc_dev->mc_io);
++ /* We may need to issue MC commands while in atomic context */
++ err = fsl_mc_portal_allocate(mc_dev, FSL_MC_IO_ATOMIC_CONTEXT_PORTAL,
++ &mc_dev->mc_io);
+ if (err || !mc_dev->mc_io) {
-+ dev_err(dev, "fsl_mc_portal_allocate error: %d\n", err);
-+ err = -ENODEV;
++ dev_dbg(dev, "fsl_mc_portal_allocate error: %d\n", err);
++ err = -EPROBE_DEFER;
+ goto err_free_netdev;
+ }
+
@@ -22439,8 +21883,8 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
-+#include "../../fsl-mc/include/mc-sys.h"
-+#include "../../fsl-mc/include/mc-cmd.h"
++#include <linux/fsl/mc.h>
++
+#include "dprtc.h"
+#include "dprtc-cmd.h"
+
@@ -22467,7 +21911,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ uint16_t *token)
+{
+ struct dprtc_cmd_open *cmd_params;
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
@@ -22503,7 +21947,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ uint32_t cmd_flags,
+ uint16_t token)
+{
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_CLOSE, cmd_flags,
@@ -22538,7 +21982,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ const struct dprtc_cfg *cfg,
+ uint32_t *obj_id)
+{
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+ int err;
+
+ (void)(cfg); /* unused */
@@ -22580,7 +22024,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ uint32_t object_id)
+{
+ struct dprtc_cmd_destroy *cmd_params;
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_DESTROY,
@@ -22597,7 +22041,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ uint32_t cmd_flags,
+ uint16_t token)
+{
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_ENABLE, cmd_flags,
@@ -22611,7 +22055,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ uint32_t cmd_flags,
+ uint16_t token)
+{
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_DISABLE,
@@ -22628,7 +22072,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ int *en)
+{
+ struct dprtc_rsp_is_enabled *rsp_params;
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
@@ -22651,7 +22095,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ uint32_t cmd_flags,
+ uint16_t token)
+{
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_RESET,
@@ -22684,7 +22128,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ uint8_t en)
+{
+ struct dprtc_cmd_set_irq_enable *cmd_params;
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_SET_IRQ_ENABLE,
@@ -22716,7 +22160,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+{
+ struct dprtc_rsp_get_irq_enable *rsp_params;
+ struct dprtc_cmd_get_irq *cmd_params;
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
@@ -22761,7 +22205,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ uint32_t mask)
+{
+ struct dprtc_cmd_set_irq_mask *cmd_params;
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_SET_IRQ_MASK,
@@ -22796,7 +22240,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+{
+ struct dprtc_rsp_get_irq_mask *rsp_params;
+ struct dprtc_cmd_get_irq *cmd_params;
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
@@ -22839,7 +22283,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+{
+ struct dprtc_cmd_get_irq_status *cmd_params;
+ struct dprtc_rsp_get_irq_status *rsp_params;
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
@@ -22882,7 +22326,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ uint32_t status)
+{
+ struct dprtc_cmd_clear_irq_status *cmd_params;
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_CLEAR_IRQ_STATUS,
@@ -22912,7 +22356,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ struct dprtc_attr *attr)
+{
+ struct dprtc_rsp_get_attributes *rsp_params;
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
@@ -22949,7 +22393,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ int64_t offset)
+{
+ struct dprtc_cmd_set_clock_offset *cmd_params;
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_SET_CLOCK_OFFSET,
@@ -22978,7 +22422,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ uint32_t freq_compensation)
+{
+ struct dprtc_get_freq_compensation *cmd_params;
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_SET_FREQ_COMPENSATION,
@@ -23007,7 +22451,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ uint32_t *freq_compensation)
+{
+ struct dprtc_get_freq_compensation *rsp_params;
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
@@ -23043,7 +22487,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ uint64_t *time)
+{
+ struct dprtc_time *rsp_params;
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
@@ -23079,7 +22523,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ uint64_t time)
+{
+ struct dprtc_time *cmd_params;
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_SET_TIME,
@@ -23109,7 +22553,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ uint16_t token, uint64_t time)
+{
+ struct dprtc_time *cmd_params;
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_SET_ALARM,
@@ -23137,7 +22581,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ uint16_t *minor_ver)
+{
+ struct dprtc_rsp_get_api_version *rsp_params;
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+ int err;
+
+ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_GET_API_VERSION,
@@ -23331,7 +22775,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+#endif /* __FSL_DPRTC_H */
--- /dev/null
+++ b/drivers/staging/fsl-dpaa2/rtc/rtc.c
-@@ -0,0 +1,243 @@
+@@ -0,0 +1,242 @@
+/* Copyright 2013-2015 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
@@ -23367,8 +22811,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+#include <linux/module.h>
+#include <linux/ptp_clock_kernel.h>
+
-+#include "../../fsl-mc/include/mc.h"
-+#include "../../fsl-mc/include/mc-sys.h"
++#include <linux/fsl/mc.h>
+
+#include "dprtc.h"
+#include "dprtc-cmd.h"
@@ -23575,3 +23018,21 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("DPAA2 RTC (PTP 1588 clock) driver (prototype)");
+--- a/include/linux/filter.h
++++ b/include/linux/filter.h
+@@ -429,12 +429,15 @@ struct sk_filter {
+
+ struct bpf_skb_data_end {
+ struct qdisc_skb_cb qdisc_cb;
++ void *data_meta;
+ void *data_end;
+ };
+
+ struct xdp_buff {
+ void *data;
+ void *data_end;
++ void *data_meta;
++ void *data_hard_start;
+ };
+
+ /* compute the linear packet data range [data, data_end) which